Jaasomn commited on
Commit
ceb3821
·
0 Parent(s):

Initial deployment

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +38 -0
  2. Dockerfile +50 -0
  3. README.md +29 -0
  4. configs/antigravity/1769225597642_oauth_creds.json +7 -0
  5. configs/antigravity/1769225702945_oauth_creds.json +7 -0
  6. configs/antigravity/1769225782003_oauth_creds.json +7 -0
  7. configs/antigravity/1769225856991_oauth_creds.json +7 -0
  8. configs/api-potluck-data.json +4 -0
  9. configs/api-potluck-data.json.example +29 -0
  10. configs/api-potluck-keys.json.example +16 -0
  11. configs/config.json +25 -0
  12. configs/config.json.example +49 -0
  13. configs/gemini/1769225674911_oauth_creds.json +7 -0
  14. configs/input_system_prompt.txt +0 -0
  15. configs/plugins.json +12 -0
  16. configs/plugins.json.example +12 -0
  17. configs/provider_pools.json +90 -0
  18. configs/provider_pools.json.example +213 -0
  19. configs/pwd +1 -0
  20. configs/token-store.json +9 -0
  21. configs/usage-cache.json +907 -0
  22. healthcheck.js +46 -0
  23. package-lock.json +0 -0
  24. package.json +42 -0
  25. src/auth/codex-oauth.js +851 -0
  26. src/auth/gemini-oauth.js +289 -0
  27. src/auth/iflow-oauth.js +529 -0
  28. src/auth/index.js +32 -0
  29. src/auth/kiro-oauth.js +1117 -0
  30. src/auth/oauth-handlers.js +24 -0
  31. src/auth/qwen-oauth.js +339 -0
  32. src/convert/convert-old.js +0 -0
  33. src/convert/convert.js +389 -0
  34. src/converters/BaseConverter.js +115 -0
  35. src/converters/ConverterFactory.js +182 -0
  36. src/converters/register-converters.js +29 -0
  37. src/converters/strategies/ClaudeConverter.js +1652 -0
  38. src/converters/strategies/CodexConverter.js +489 -0
  39. src/converters/strategies/GeminiConverter.js +1226 -0
  40. src/converters/strategies/OllamaConverter.js +690 -0
  41. src/converters/strategies/OpenAIConverter.js +1565 -0
  42. src/converters/strategies/OpenAIResponsesConverter.js +582 -0
  43. src/converters/utils.js +433 -0
  44. src/core/config-manager.js +257 -0
  45. src/core/master.js +378 -0
  46. src/core/plugin-manager.js +522 -0
  47. src/example/claude/oldResponse1762501965597.json +1 -0
  48. src/example/claude/oldResponseChunk1762501803864.json +19 -0
  49. src/example/claude/originalRequestBody1762501795790.json +1 -0
  50. src/example/claude/originalRequestBody1762501964743.json +1 -0
.gitattributes ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.png filter=lfs diff=lfs merge=lfs -text
37
+ *.webp filter=lfs diff=lfs merge=lfs -text
38
+ *.ico filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Hugging Face Spaces Docker 部署文件
2
+ # 使用官方Node.js运行时作为基础镜像
3
+ FROM node:20-alpine
4
+
5
+ # 设置标签
6
+ LABEL maintainer="AIClient2API Team"
7
+ LABEL description="Docker image for AIClient2API server on Hugging Face Spaces"
8
+
9
+ # 安装必要的系统工具
10
+ RUN apk add --no-cache tar git
11
+
12
+ # 创建非 root 用户 (HF Spaces 要求)
13
+ RUN adduser -D -u 1000 appuser
14
+
15
+ # 设置工作目录
16
+ WORKDIR /app
17
+
18
+ # 复制package.json和package-lock.json
19
+ COPY package*.json ./
20
+
21
+ # 安装依赖
22
+ RUN npm install --omit=dev
23
+
24
+ # 复制源代码
25
+ COPY . .
26
+
27
+ # 创建配置和日志目录,并设置权限
28
+ RUN mkdir -p /app/configs /app/logs && \
29
+ chown -R appuser:appuser /app
30
+
31
+ # 复制示例配置文件
32
+ RUN cp -n configs/config.json.example configs/config.json 2>/dev/null || true
33
+
34
+ # 切换到非 root 用户
35
+ USER appuser
36
+
37
+ # HF Spaces 默认端口是 7860,但我们的应用使用 3000
38
+ # 通过环境变量设置端口
39
+ ENV SERVER_PORT=7860
40
+ ENV HOST=0.0.0.0
41
+
42
+ # 暴露端口
43
+ EXPOSE 7860
44
+
45
+ # 健康检查
46
+ HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \
47
+ CMD node healthcheck.js || exit 1
48
+
49
+ # 启动命令
50
+ CMD ["node", "src/core/master.js"]
README.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: AIClient-2-API
3
+ emoji: 🚀
4
+ colorFrom: blue
5
+ colorTo: purple
6
+ sdk: docker
7
+ pinned: false
8
+ license: gpl-3.0
9
+ app_port: 7860
10
+ ---
11
+
12
+ # AIClient-2-API
13
+
14
+ 一个能将多种仅客户端内使用的大模型 API(Gemini CLI, Antigravity, Qwen Code, Kiro ...),模拟请求,统一封装为本地 OpenAI 兼容接口的强大代理。
15
+
16
+ ## 使用说明
17
+
18
+ 1. 访问 Web UI 管理界面添加账号配置
19
+ 2. 使用 OpenAI 兼容的 API 格式调用
20
+
21
+ ## API 端点
22
+
23
+ - `POST /v1/chat/completions` - 聊天补全
24
+ - `GET /v1/models` - 获取可用模型列表
25
+ - `GET /health` - 健康检查
26
+
27
+ ## 更多信息
28
+
29
+ 请访问 [GitHub 仓库](https://github.com/justlovemaki/AIClient-2-API) 了解详情。
configs/antigravity/1769225597642_oauth_creds.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "access_token": "ya29.a0AUMWg_JeH7Sv6jyaowhfmL3G840d-BfpsSzcO9d0kQTqOZdqKs8yDWOXaOMD6XuinSd3wzqFyMGWRtC2r1NlbvgwsDQ9sJ2xeKx-35-L8b6EhCx5NNJELmb4MFhGutyzQPFRdH8Icu7cvuxe52n21wKnXyHjWFn2EAYC7wHe1vA6LVY_GVGYA6vK8O4_UuZC1KOwPgs8eHzFaCgYKAQESARISFQHGX2MiUZVn11HuBiwd-y9Dmt85lQ0211",
3
+ "refresh_token": "1//0gj4nApJu9H2rCgYIARAAGBASNwF-L9IrGmrKt0QPaeJhTTnQwB11ay7Rj_IAyxPhqaGnLTI11NUw6UslAjLIBKT5W6BqqeR0a3Y",
4
+ "scope": "https://www.googleapis.com/auth/cloud-platform",
5
+ "token_type": "Bearer",
6
+ "expiry_date": 1769229196641
7
+ }
configs/antigravity/1769225702945_oauth_creds.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "access_token": "ya29.a0AUMWg_LXsfEby-mp2js055q9ETPYyIQGsppOEbUBRRZnQWppHGiCbNDedtkBzyS95N82H-gwxD90kpJ5bIdC7VBCKlEGGDIjIFtKFdgIpmu1MnJoNr5RcpVdGHrSmPqYl5L4pAYYYw00nPvdqsTIJaQK0CKXjygwYcFf7O1AlN4cesr9Ak3WJKwu3ATnXzvPXJJ38vBMOmC5aCgYKAbsSARESFQHGX2MiML_vy6uw9_otVEWp2pr-Ow0211",
3
+ "refresh_token": "1//0ghNIZ9aGEjWwCgYIARAAGBASNwF-L9Irxv1fzCbtzjydvELuh_QDHVueFa6oY4MPucSC7xTtF4LACk1uxCdPe8jwnb7nieyKfe0",
4
+ "scope": "https://www.googleapis.com/auth/cloud-platform",
5
+ "token_type": "Bearer",
6
+ "expiry_date": 1769229301944
7
+ }
configs/antigravity/1769225782003_oauth_creds.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "access_token": "ya29.a0AUMWg_JTJ4UYXpcfxExjKbQiz7bdJ5aMYSfCxA2kl6O6sRr4w0f6xBoJXLLqTTpgP3vexEGHx1T1l_pv4ScXK6le4YWCROPLDm8MFUywrU_jyCesCyOme_zHzDxKEmnM2wisJ7RZshkCB4spBmyXMVSQ1XKQfYa1vRAyeQJeJF8waMmeeWM0q5kUVPR51YQxPXyCZfniNUDyaCgYKAQ4SARISFQHGX2Mi4qcpxyBXBYIt-iKhPzULlQ0211",
3
+ "refresh_token": "1//0g73AXXq_01hqCgYIARAAGBASNwF-L9IrWfZjVzvqMx9Rm9ypo5sgoJo5gLBcMvtNXe5osvrPr7MGwoFxVMxpvWacLgGnxnCb5t8",
4
+ "scope": "https://www.googleapis.com/auth/cloud-platform",
5
+ "token_type": "Bearer",
6
+ "expiry_date": 1769229381002
7
+ }
configs/antigravity/1769225856991_oauth_creds.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "access_token": "ya29.a0AUMWg_J7HQtBbdg7GIbJwmcjmjPe1Z8Y5i_0IrDNCapapEYy077JrTKv86VbkoIyllhvYAKnt1ZeOX0J0xWxNZhhLZjHLd0Cj_rvOFCZS1SX7jsy3NqGIfIVBqcM_j1bjAD0wTDDy5Q9qFWb7Pm4bRW2kuaB7-WdzVfu3BDbkHpHTpi7KSOMP559W51T9n49U2eUMi0AxO30aCgYKAUUSARISFQHGX2MiBiD7jtM3Anr43U6tdOXDxw0211",
3
+ "refresh_token": "1//0g2AXkvBZIr3-CgYIARAAGBASNwF-L9IrJPSLuDBTntjzQr9w86JXInpqbZ9uobXGamOmeZ5sb0IaCpftz3cN03Gn6ZxYLj2hVls",
4
+ "scope": "https://www.googleapis.com/auth/cloud-platform",
5
+ "token_type": "Bearer",
6
+ "expiry_date": 1769229455991
7
+ }
configs/api-potluck-data.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "config": {},
3
+ "users": {}
4
+ }
configs/api-potluck-data.json.example ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "defaultDailyLimit": 500,
4
+ "bonusPerCredential": 300,
5
+ "bonusValidityDays": 30,
6
+ "persistInterval": 5000
7
+ },
8
+ "users": {
9
+ "maki_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx": {
10
+ "credentials": [
11
+ {
12
+ "id": "cred_0000000000000_xxxxxx",
13
+ "path": "configs/kiro/example_kiro-auth-token/example_kiro-auth-token.json",
14
+ "provider": "claude-kiro-oauth",
15
+ "authMethod": "refresh-token",
16
+ "addedAt": "2026-01-01T00:00:00.000Z"
17
+ }
18
+ ],
19
+ "credentialBonuses": [
20
+ {
21
+ "credentialId": "cred_0000000000000_xxxxxx",
22
+ "grantedAt": "2026-01-01T00:00:00.000Z",
23
+ "usedCount": 0
24
+ }
25
+ ],
26
+ "createdAt": "2026-01-01T00:00:00.000Z"
27
+ }
28
+ }
29
+ }
configs/api-potluck-keys.json.example ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "keys": {
3
+ "maki_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx": {
4
+ "id": "maki_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
5
+ "name": "示例用户",
6
+ "createdAt": "2026-01-01T00:00:00.000Z",
7
+ "dailyLimit": 500,
8
+ "todayUsage": 0,
9
+ "totalUsage": 0,
10
+ "lastResetDate": "2026-01-01",
11
+ "lastUsedAt": null,
12
+ "enabled": true,
13
+ "bonusRemaining": 0
14
+ }
15
+ }
16
+ }
configs/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "REQUIRED_API_KEY": "123456",
3
+ "SERVER_PORT": 3000,
4
+ "HOST": "0.0.0.0",
5
+ "MODEL_PROVIDER": "gemini-antigravity",
6
+ "SYSTEM_PROMPT_FILE_PATH": "configs/input_system_prompt.txt",
7
+ "SYSTEM_PROMPT_MODE": "append",
8
+ "PROMPT_LOG_BASE_NAME": "prompt_log",
9
+ "PROMPT_LOG_MODE": "none",
10
+ "REQUEST_MAX_RETRIES": 3,
11
+ "REQUEST_BASE_DELAY": 1000,
12
+ "CREDENTIAL_SWITCH_MAX_RETRIES": 5,
13
+ "CRON_NEAR_MINUTES": 15,
14
+ "CRON_REFRESH_TOKEN": false,
15
+ "PROVIDER_POOLS_FILE_PATH": "configs/provider_pools.json",
16
+ "MAX_ERROR_COUNT": 10,
17
+ "WARMUP_TARGET": 0,
18
+ "REFRESH_CONCURRENCY_PER_PROVIDER": 1,
19
+ "providerFallbackChain": {},
20
+ "modelFallbackMapping": {},
21
+ "PROXY_URL": null,
22
+ "PROXY_ENABLED_PROVIDERS": [
23
+ "gemini-antigravity"
24
+ ]
25
+ }
configs/config.json.example ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "REQUIRED_API_KEY": "123456",
3
+ "SERVER_PORT": 3000,
4
+ "HOST": "0.0.0.0",
5
+ "MODEL_PROVIDER": "gemini-cli-oauth",
6
+ "SYSTEM_PROMPT_FILE_PATH": "configs/input_system_prompt.txt",
7
+ "SYSTEM_PROMPT_MODE": "overwrite",
8
+ "PROMPT_LOG_BASE_NAME": "prompt_log",
9
+ "PROMPT_LOG_MODE": "none",
10
+ "REQUEST_MAX_RETRIES": 3,
11
+ "REQUEST_BASE_DELAY": 1000,
12
+ "CRON_NEAR_MINUTES": 1,
13
+ "CRON_REFRESH_TOKEN": false,
14
+ "PROVIDER_POOLS_FILE_PATH": "configs/provider_pools.json",
15
+ "MAX_ERROR_COUNT": 3,
16
+ "providerFallbackChain": {
17
+ "gemini-cli-oauth": ["gemini-antigravity"],
18
+ "gemini-antigravity": ["gemini-cli-oauth"],
19
+ "claude-kiro-oauth": ["claude-custom"],
20
+ "claude-custom": ["claude-kiro-oauth"]
21
+ },
22
+ "modelFallbackMapping": {
23
+ "gemini-claude-opus-4-5-thinking": {
24
+ "targetProviderType": "claude-kiro-oauth",
25
+ "targetModel": "claude-opus-4-5"
26
+ },
27
+ "gemini-claude-sonnet-4-5-thinking": {
28
+ "targetProviderType": "claude-kiro-oauth",
29
+ "targetModel": "claude-sonnet-4-5"
30
+ },
31
+ "gemini-claude-sonnet-4-5": {
32
+ "targetProviderType": "claude-kiro-oauth",
33
+ "targetModel": "claude-sonnet-4-5"
34
+ },
35
+ "claude-opus-4-5": {
36
+ "targetProviderType": "gemini-antigravity",
37
+ "targetModel": "gemini-claude-opus-4-5-thinking"
38
+ },
39
+ "claude-sonnet-4-5": {
40
+ "targetProviderType": "gemini-antigravity",
41
+ "targetModel": "gemini-claude-sonnet-4-5"
42
+ }
43
+ },
44
+ "PROXY_URL": "http://127.0.0.1:1089",
45
+ "PROXY_ENABLED_PROVIDERS": [
46
+ "gemini-cli-oauth",
47
+ "gemini-antigravity"
48
+ ]
49
+ }
configs/gemini/1769225674911_oauth_creds.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "access_token": "ya29.a0AUMWg_KAYGG-Hw_Py9KPKou7c2DOpA8nMONO0A4qnL4L8N9RsPLgAcTbZmzdWuU0Y-WvnSvqeKsn7Sd1n4Fj22-lhGXL87HVCBcheb0nbe8J_dnxIEHAshlh5bg6SJvRC83h6t2RxiPbQrkWMPT98gDTjxs_BjJHTdnU5evV6AbWXjraOWUZeW5k-dhiJOz0UxXfx7UGzeE8aCgYKAYQSARESFQHGX2Mi2hHBN1sDCU3BCQNUUnr47w0211",
3
+ "refresh_token": "1//0gULsM_b1IjcqCgYIARAAGBASNwF-L9IrR_jh9j1sxZ3JJqAq097QqO6ny5EJZ3NxUUh32uznBcaGYIu0Azpm-4vCxWs--fSOtEE",
4
+ "scope": "https://www.googleapis.com/auth/cloud-platform",
5
+ "token_type": "Bearer",
6
+ "expiry_date": 1769229273910
7
+ }
configs/input_system_prompt.txt ADDED
File without changes
configs/plugins.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "plugins": {
3
+ "api-potluck": {
4
+ "enabled": true,
5
+ "description": "API 大锅饭 - Key 管理和用量统计插件<br>管理端:<a href=\"potluck.html\" target=\"_blank\">potluck.html</a><br>用户端:<a href=\"potluck-user.html\" target=\"_blank\">potluck-user.html</a>"
6
+ },
7
+ "default-auth": {
8
+ "enabled": true,
9
+ "description": "默认 API Key 认证插件"
10
+ }
11
+ }
12
+ }
configs/plugins.json.example ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "plugins": {
3
+ "api-potluck": {
4
+ "enabled": true,
5
+ "description": "API 大锅饭 - Key 管理和用量统计插件"
6
+ },
7
+ "default-auth": {
8
+ "enabled": true,
9
+ "description": "默认 API Key 认证插件(内置)"
10
+ }
11
+ }
12
+ }
configs/provider_pools.json ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "claude-kiro-oauth": [],
3
+ "openai-qwen-oauth": [],
4
+ "openai-iflow": [],
5
+ "openai-codex-oauth": [],
6
+ "gemini-cli-oauth": [
7
+ {
8
+ "GEMINI_OAUTH_CREDS_FILE_PATH": "configs/gemini/1769225674911_oauth_creds.json",
9
+ "uuid": "8cb833b9-8533-4ea8-930f-46a272129bcc",
10
+ "checkModelName": "gemini-2.5-flash",
11
+ "checkHealth": false,
12
+ "isHealthy": true,
13
+ "isDisabled": false,
14
+ "lastUsed": null,
15
+ "usageCount": 0,
16
+ "errorCount": 0,
17
+ "lastErrorTime": null,
18
+ "lastHealthCheckTime": null,
19
+ "lastHealthCheckModel": null,
20
+ "lastErrorMessage": null,
21
+ "PROJECT_ID": ""
22
+ }
23
+ ],
24
+ "gemini-antigravity": [
25
+ {
26
+ "ANTIGRAVITY_OAUTH_CREDS_FILE_PATH": "configs/antigravity/1769225597642_oauth_creds.json",
27
+ "uuid": "7caa7971-7fb4-4466-bc64-b1bbaa3dc5d0",
28
+ "checkModelName": "gemini-2.5-computer-use-preview-10-2025",
29
+ "checkHealth": false,
30
+ "isHealthy": true,
31
+ "isDisabled": false,
32
+ "lastUsed": null,
33
+ "usageCount": 0,
34
+ "errorCount": 0,
35
+ "lastErrorTime": null,
36
+ "lastHealthCheckTime": null,
37
+ "lastHealthCheckModel": null,
38
+ "lastErrorMessage": null,
39
+ "PROJECT_ID": ""
40
+ },
41
+ {
42
+ "ANTIGRAVITY_OAUTH_CREDS_FILE_PATH": "configs/antigravity/1769225702945_oauth_creds.json",
43
+ "uuid": "74a34d43-2e2d-47a1-ae04-f7956f4eac32",
44
+ "checkModelName": "gemini-2.5-computer-use-preview-10-2025",
45
+ "checkHealth": false,
46
+ "isHealthy": true,
47
+ "isDisabled": false,
48
+ "lastUsed": null,
49
+ "usageCount": 0,
50
+ "errorCount": 0,
51
+ "lastErrorTime": null,
52
+ "lastHealthCheckTime": null,
53
+ "lastHealthCheckModel": null,
54
+ "lastErrorMessage": null,
55
+ "PROJECT_ID": ""
56
+ },
57
+ {
58
+ "ANTIGRAVITY_OAUTH_CREDS_FILE_PATH": "configs/antigravity/1769225782003_oauth_creds.json",
59
+ "uuid": "2dc29208-0047-442a-a810-fd9af1290875",
60
+ "checkModelName": "gemini-2.5-computer-use-preview-10-2025",
61
+ "checkHealth": false,
62
+ "isHealthy": true,
63
+ "isDisabled": false,
64
+ "lastUsed": null,
65
+ "usageCount": 0,
66
+ "errorCount": 0,
67
+ "lastErrorTime": null,
68
+ "lastHealthCheckTime": null,
69
+ "lastHealthCheckModel": null,
70
+ "lastErrorMessage": null,
71
+ "PROJECT_ID": ""
72
+ },
73
+ {
74
+ "ANTIGRAVITY_OAUTH_CREDS_FILE_PATH": "configs/antigravity/1769225856991_oauth_creds.json",
75
+ "uuid": "6d7301c2-da5c-491c-8fb9-e822ec781bb8",
76
+ "checkModelName": "gemini-2.5-computer-use-preview-10-2025",
77
+ "checkHealth": false,
78
+ "isHealthy": true,
79
+ "isDisabled": false,
80
+ "lastUsed": null,
81
+ "usageCount": 0,
82
+ "errorCount": 0,
83
+ "lastErrorTime": null,
84
+ "lastHealthCheckTime": null,
85
+ "lastHealthCheckModel": null,
86
+ "lastErrorMessage": null,
87
+ "PROJECT_ID": ""
88
+ }
89
+ ]
90
+ }
configs/provider_pools.json.example ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "openai-custom": [
3
+ {
4
+ "customName": "OpenAI节点1",
5
+ "OPENAI_API_KEY": "sk-openai-key1",
6
+ "OPENAI_BASE_URL": "https://api.openai.com/v1",
7
+ "checkModelName": null,
8
+ "checkHealth": false,
9
+ "notSupportedModels": ["gpt-4-turbo"],
10
+ "uuid": "2f579c65-d3c5-41b1-9985-9f6e3d7bf39c",
11
+ "isHealthy": true,
12
+ "isDisabled": false,
13
+ "lastUsed": null,
14
+ "usageCount": 0,
15
+ "errorCount": 0,
16
+ "lastErrorTime": null
17
+ },
18
+ {
19
+ "customName": "OpenAI节点2",
20
+ "OPENAI_API_KEY": "sk-openai-key2",
21
+ "OPENAI_BASE_URL": "https://api.openai.com/v1",
22
+ "checkModelName": null,
23
+ "checkHealth": false,
24
+ "notSupportedModels": ["gpt-4-turbo", "gpt-4"],
25
+ "uuid": "e284628d-302f-456d-91f3-6095386fb3b8",
26
+ "isHealthy": true,
27
+ "isDisabled": true,
28
+ "lastUsed": null,
29
+ "usageCount": 0,
30
+ "errorCount": 0,
31
+ "lastErrorTime": null
32
+ }
33
+ ],
34
+ "openaiResponses-custom": [
35
+ {
36
+ "customName": "OpenAI Responses节点",
37
+ "OPENAI_API_KEY": "sk-openai-key",
38
+ "OPENAI_BASE_URL": "https://api.openai.com/v1",
39
+ "checkModelName": null,
40
+ "checkHealth": false,
41
+ "uuid": "e284628d-302f-456d-91f3-609538678968",
42
+ "isHealthy": true,
43
+ "isDisabled": false,
44
+ "lastUsed": null,
45
+ "usageCount": 0,
46
+ "errorCount": 0,
47
+ "lastErrorTime": null
48
+ }
49
+ ],
50
+ "gemini-cli-oauth": [
51
+ {
52
+ "customName": "Gemini OAuth节点1",
53
+ "GEMINI_OAUTH_CREDS_FILE_PATH": "./credentials1.json",
54
+ "PROJECT_ID": "your-project-id-1",
55
+ "checkModelName": null,
56
+ "checkHealth": false,
57
+ "uuid": "ac200154-26b8-4f5f-8650-e8cc738b06e3",
58
+ "isHealthy": true,
59
+ "isDisabled": false,
60
+ "lastUsed": null,
61
+ "usageCount": 0,
62
+ "errorCount": 0,
63
+ "lastErrorTime": null
64
+ },
65
+ {
66
+ "customName": "Gemini OAuth节点2",
67
+ "GEMINI_OAUTH_CREDS_FILE_PATH": "./credentials2.json",
68
+ "PROJECT_ID": "your-project-id-2",
69
+ "checkModelName": null,
70
+ "checkHealth": false,
71
+ "uuid": "4f8afcc2-a9bb-4b96-bb50-3b9667a71f54",
72
+ "isHealthy": true,
73
+ "isDisabled": false,
74
+ "lastUsed": null,
75
+ "usageCount": 0,
76
+ "errorCount": 0,
77
+ "lastErrorTime": null
78
+ }
79
+ ],
80
+ "claude-custom": [
81
+ {
82
+ "customName": "Claude节点1",
83
+ "CLAUDE_API_KEY": "sk-claude-key1",
84
+ "CLAUDE_BASE_URL": "https://api.anthropic.com",
85
+ "checkModelName": null,
86
+ "checkHealth": false,
87
+ "uuid": "bb87047a-3b1d-4249-adbb-1087ecd58128",
88
+ "isHealthy": true,
89
+ "isDisabled": false,
90
+ "lastUsed": null,
91
+ "usageCount": 0,
92
+ "errorCount": 0,
93
+ "lastErrorTime": null
94
+ },
95
+ {
96
+ "customName": "Claude节点2",
97
+ "CLAUDE_API_KEY": "sk-claude-key2",
98
+ "CLAUDE_BASE_URL": "https://api.anthropic.com",
99
+ "checkModelName": null,
100
+ "checkHealth": false,
101
+ "uuid": "7c2002c6-122a-4db0-af06-8a0ff433801a",
102
+ "isHealthy": true,
103
+ "isDisabled": false,
104
+ "lastUsed": null,
105
+ "usageCount": 0,
106
+ "errorCount": 0,
107
+ "lastErrorTime": null
108
+ }
109
+ ],
110
+ "claude-kiro-oauth": [
111
+ {
112
+ "customName": "Kiro OAuth节点1",
113
+ "KIRO_OAUTH_CREDS_FILE_PATH": "./kiro_creds1.json",
114
+ "uuid": "2c69d0ac-b86f-43d8-9d17-0d300afc5cfd",
115
+ "checkModelName": null,
116
+ "checkHealth": false,
117
+ "isHealthy": true,
118
+ "isDisabled": false,
119
+ "lastUsed": null,
120
+ "usageCount": 0,
121
+ "errorCount": 0,
122
+ "lastErrorTime": null
123
+ },
124
+ {
125
+ "customName": "Kiro OAuth节点2",
126
+ "KIRO_OAUTH_CREDS_FILE_PATH": "./kiro_creds2.json",
127
+ "uuid": "7482abe6-8083-4288-bb7d-d8ecb7c461e2",
128
+ "checkModelName": null,
129
+ "checkHealth": false,
130
+ "isHealthy": true,
131
+ "isDisabled": false,
132
+ "lastUsed": null,
133
+ "usageCount": 0,
134
+ "errorCount": 0,
135
+ "lastErrorTime": null
136
+ }
137
+ ],
138
+ "openai-qwen-oauth": [
139
+ {
140
+ "customName": "Qwen OAuth节点",
141
+ "QWEN_OAUTH_CREDS_FILE_PATH": "./qwen_creds.json",
142
+ "uuid": "658a2114-c4c9-d713-b8d4-ceabf0e0bf18",
143
+ "checkModelName": null,
144
+ "checkHealth": false,
145
+ "isHealthy": true,
146
+ "isDisabled": false,
147
+ "lastUsed": null,
148
+ "usageCount": 0,
149
+ "errorCount": 0,
150
+ "lastErrorTime": null
151
+ }
152
+ ],
153
+ "gemini-antigravity": [
154
+ {
155
+ "customName": "Antigravity节点1",
156
+ "ANTIGRAVITY_OAUTH_CREDS_FILE_PATH": "./antigravity_creds1.json",
157
+ "PROJECT_ID": "antigravity-project-1",
158
+ "uuid": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
159
+ "checkModelName": null,
160
+ "checkHealth": false,
161
+ "isHealthy": true,
162
+ "isDisabled": false,
163
+ "lastUsed": null,
164
+ "usageCount": 0,
165
+ "errorCount": 0,
166
+ "lastErrorTime": null
167
+ },
168
+ {
169
+ "customName": "Antigravity节点2",
170
+ "ANTIGRAVITY_OAUTH_CREDS_FILE_PATH": "./antigravity_creds2.json",
171
+ "PROJECT_ID": "antigravity-project-2",
172
+ "uuid": "f0e9d8c7-b6a5-4321-fedc-ba9876543210",
173
+ "checkModelName": null,
174
+ "checkHealth": false,
175
+ "isHealthy": true,
176
+ "isDisabled": false,
177
+ "lastUsed": null,
178
+ "usageCount": 0,
179
+ "errorCount": 0,
180
+ "lastErrorTime": null
181
+ }
182
+ ],
183
+ "openai-iflow": [
184
+ {
185
+ "customName": "iFlow Token节点1",
186
+ "IFLOW_TOKEN_FILE_PATH": "./configs/iflow/iflow_token.json",
187
+ "IFLOW_BASE_URL": "https://apis.iflow.cn/v1",
188
+ "uuid": "11223344-5566-7788-99aa-bbccddeeff00",
189
+ "checkModelName": "gpt-4o",
190
+ "checkHealth": false,
191
+ "isHealthy": true,
192
+ "isDisabled": false,
193
+ "lastUsed": null,
194
+ "usageCount": 0,
195
+ "errorCount": 0,
196
+ "lastErrorTime": null
197
+ },
198
+ {
199
+ "customName": "iFlow Token节点2",
200
+ "IFLOW_TOKEN_FILE_PATH": "./configs/iflow/iflow_token2.json",
201
+ "IFLOW_BASE_URL": "https://apis.iflow.cn/v1",
202
+ "uuid": "aabbccdd-eeff-0011-2233-445566778899",
203
+ "checkModelName": "gpt-4o",
204
+ "checkHealth": false,
205
+ "isHealthy": true,
206
+ "isDisabled": false,
207
+ "lastUsed": null,
208
+ "usageCount": 0,
209
+ "errorCount": 0,
210
+ "lastErrorTime": null
211
+ }
212
+ ]
213
+ }
configs/pwd ADDED
@@ -0,0 +1 @@
 
 
1
+ admin123
configs/token-store.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "tokens": {
3
+ "59fe6fdb91e45d1710d8cd190246064ed3a8c87e6a4b7fe8b29e8c20b251a29d": {
4
+ "username": "admin",
5
+ "loginTime": 1769225324808,
6
+ "expiryTime": 1769228924808
7
+ }
8
+ }
9
+ }
configs/usage-cache.json ADDED
@@ -0,0 +1,907 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "timestamp": "2026-01-24T03:38:30.804Z",
3
+ "providers": {
4
+ "claude-kiro-oauth": {
5
+ "providerType": "claude-kiro-oauth",
6
+ "instances": [],
7
+ "totalCount": 0,
8
+ "successCount": 0,
9
+ "errorCount": 0
10
+ },
11
+ "gemini-cli-oauth": {
12
+ "providerType": "gemini-cli-oauth",
13
+ "instances": [
14
+ {
15
+ "uuid": "8cb833b9-8533-4ea8-930f-46a272129bcc",
16
+ "name": "8cb833b9-8533-4ea8-930f-46a272129bcc",
17
+ "isHealthy": true,
18
+ "isDisabled": false,
19
+ "success": true,
20
+ "usage": {
21
+ "daysUntilReset": null,
22
+ "nextDateReset": null,
23
+ "subscription": {
24
+ "title": "Gemini CLI OAuth",
25
+ "type": "gemini-cli-oauth",
26
+ "upgradeCapability": null,
27
+ "overageCapability": null
28
+ },
29
+ "user": {
30
+ "email": null,
31
+ "userId": null
32
+ },
33
+ "usageBreakdown": [
34
+ {
35
+ "resourceType": "MODEL_USAGE",
36
+ "displayName": "gemini-2.5-flash",
37
+ "displayNamePlural": "gemini-2.5-flash",
38
+ "unit": "quota",
39
+ "currency": null,
40
+ "currentUsage": 0,
41
+ "usageLimit": 100,
42
+ "currentOverages": 0,
43
+ "overageCap": 0,
44
+ "overageRate": null,
45
+ "overageCharges": 0,
46
+ "nextDateReset": "2026-01-25T03:38:34.000Z",
47
+ "freeTrial": null,
48
+ "bonuses": [],
49
+ "modelName": "gemini-2.5-flash",
50
+ "inputTokenLimit": 0,
51
+ "outputTokenLimit": 0,
52
+ "remaining": 1,
53
+ "remainingPercent": 100,
54
+ "resetTime": "01-25 19:38",
55
+ "resetTimeRaw": "2026-01-25T03:38:34Z"
56
+ },
57
+ {
58
+ "resourceType": "MODEL_USAGE",
59
+ "displayName": "gemini-2.5-flash-lite",
60
+ "displayNamePlural": "gemini-2.5-flash-lite",
61
+ "unit": "quota",
62
+ "currency": null,
63
+ "currentUsage": 0,
64
+ "usageLimit": 100,
65
+ "currentOverages": 0,
66
+ "overageCap": 0,
67
+ "overageRate": null,
68
+ "overageCharges": 0,
69
+ "nextDateReset": "2026-01-25T03:38:34.000Z",
70
+ "freeTrial": null,
71
+ "bonuses": [],
72
+ "modelName": "gemini-2.5-flash-lite",
73
+ "inputTokenLimit": 0,
74
+ "outputTokenLimit": 0,
75
+ "remaining": 1,
76
+ "remainingPercent": 100,
77
+ "resetTime": "01-25 19:38",
78
+ "resetTimeRaw": "2026-01-25T03:38:34Z"
79
+ },
80
+ {
81
+ "resourceType": "MODEL_USAGE",
82
+ "displayName": "gemini-2.5-pro",
83
+ "displayNamePlural": "gemini-2.5-pro",
84
+ "unit": "quota",
85
+ "currency": null,
86
+ "currentUsage": 0,
87
+ "usageLimit": 100,
88
+ "currentOverages": 0,
89
+ "overageCap": 0,
90
+ "overageRate": null,
91
+ "overageCharges": 0,
92
+ "nextDateReset": "2026-01-25T03:38:34.000Z",
93
+ "freeTrial": null,
94
+ "bonuses": [],
95
+ "modelName": "gemini-2.5-pro",
96
+ "inputTokenLimit": 0,
97
+ "outputTokenLimit": 0,
98
+ "remaining": 1,
99
+ "remainingPercent": 100,
100
+ "resetTime": "01-25 19:38",
101
+ "resetTimeRaw": "2026-01-25T03:38:34Z"
102
+ },
103
+ {
104
+ "resourceType": "MODEL_USAGE",
105
+ "displayName": "gemini-3-flash-preview",
106
+ "displayNamePlural": "gemini-3-flash-preview",
107
+ "unit": "quota",
108
+ "currency": null,
109
+ "currentUsage": 0,
110
+ "usageLimit": 100,
111
+ "currentOverages": 0,
112
+ "overageCap": 0,
113
+ "overageRate": null,
114
+ "overageCharges": 0,
115
+ "nextDateReset": "2026-01-25T03:38:34.000Z",
116
+ "freeTrial": null,
117
+ "bonuses": [],
118
+ "modelName": "gemini-3-flash-preview",
119
+ "inputTokenLimit": 0,
120
+ "outputTokenLimit": 0,
121
+ "remaining": 1,
122
+ "remainingPercent": 100,
123
+ "resetTime": "01-25 19:38",
124
+ "resetTimeRaw": "2026-01-25T03:38:34Z"
125
+ },
126
+ {
127
+ "resourceType": "MODEL_USAGE",
128
+ "displayName": "gemini-3-pro-preview",
129
+ "displayNamePlural": "gemini-3-pro-preview",
130
+ "unit": "quota",
131
+ "currency": null,
132
+ "currentUsage": 0,
133
+ "usageLimit": 100,
134
+ "currentOverages": 0,
135
+ "overageCap": 0,
136
+ "overageRate": null,
137
+ "overageCharges": 0,
138
+ "nextDateReset": "2026-01-25T03:38:34.000Z",
139
+ "freeTrial": null,
140
+ "bonuses": [],
141
+ "modelName": "gemini-3-pro-preview",
142
+ "inputTokenLimit": 0,
143
+ "outputTokenLimit": 0,
144
+ "remaining": 1,
145
+ "remainingPercent": 100,
146
+ "resetTime": "01-25 19:38",
147
+ "resetTimeRaw": "2026-01-25T03:38:34Z"
148
+ }
149
+ ]
150
+ },
151
+ "error": null
152
+ }
153
+ ],
154
+ "totalCount": 1,
155
+ "successCount": 1,
156
+ "errorCount": 0
157
+ },
158
+ "gemini-antigravity": {
159
+ "providerType": "gemini-antigravity",
160
+ "instances": [
161
+ {
162
+ "uuid": "7caa7971-7fb4-4466-bc64-b1bbaa3dc5d0",
163
+ "name": "7caa7971-7fb4-4466-bc64-b1bbaa3dc5d0",
164
+ "isHealthy": true,
165
+ "isDisabled": false,
166
+ "success": true,
167
+ "usage": {
168
+ "daysUntilReset": null,
169
+ "nextDateReset": null,
170
+ "subscription": {
171
+ "title": "Gemini Antigravity",
172
+ "type": "gemini-antigravity",
173
+ "upgradeCapability": null,
174
+ "overageCapability": null
175
+ },
176
+ "user": {
177
+ "email": null,
178
+ "userId": null
179
+ },
180
+ "usageBreakdown": [
181
+ {
182
+ "resourceType": "MODEL_USAGE",
183
+ "displayName": "gemini-2.5-flash-preview",
184
+ "displayNamePlural": "gemini-2.5-flash-preview",
185
+ "unit": "quota",
186
+ "currency": null,
187
+ "currentUsage": 0,
188
+ "usageLimit": 100,
189
+ "currentOverages": 0,
190
+ "overageCap": 0,
191
+ "overageRate": null,
192
+ "overageCharges": 0,
193
+ "nextDateReset": "2026-01-24T08:38:42.000Z",
194
+ "freeTrial": null,
195
+ "bonuses": [],
196
+ "modelName": "gemini-2.5-flash-preview",
197
+ "inputTokenLimit": 0,
198
+ "outputTokenLimit": 0,
199
+ "remaining": 1,
200
+ "remainingPercent": 100,
201
+ "resetTime": "01-25 00:38",
202
+ "resetTimeRaw": "2026-01-24T08:38:42Z"
203
+ },
204
+ {
205
+ "resourceType": "MODEL_USAGE",
206
+ "displayName": "gemini-3-flash-preview",
207
+ "displayNamePlural": "gemini-3-flash-preview",
208
+ "unit": "quota",
209
+ "currency": null,
210
+ "currentUsage": 0,
211
+ "usageLimit": 100,
212
+ "currentOverages": 0,
213
+ "overageCap": 0,
214
+ "overageRate": null,
215
+ "overageCharges": 0,
216
+ "nextDateReset": "2026-01-24T08:38:42.000Z",
217
+ "freeTrial": null,
218
+ "bonuses": [],
219
+ "modelName": "gemini-3-flash-preview",
220
+ "inputTokenLimit": 0,
221
+ "outputTokenLimit": 0,
222
+ "remaining": 1,
223
+ "remainingPercent": 100,
224
+ "resetTime": "01-25 00:38",
225
+ "resetTimeRaw": "2026-01-24T08:38:42Z"
226
+ },
227
+ {
228
+ "resourceType": "MODEL_USAGE",
229
+ "displayName": "gemini-3-pro-image-preview",
230
+ "displayNamePlural": "gemini-3-pro-image-preview",
231
+ "unit": "quota",
232
+ "currency": null,
233
+ "currentUsage": 0,
234
+ "usageLimit": 100,
235
+ "currentOverages": 0,
236
+ "overageCap": 0,
237
+ "overageRate": null,
238
+ "overageCharges": 0,
239
+ "nextDateReset": "2026-01-24T08:38:42.000Z",
240
+ "freeTrial": null,
241
+ "bonuses": [],
242
+ "modelName": "gemini-3-pro-image-preview",
243
+ "inputTokenLimit": 0,
244
+ "outputTokenLimit": 0,
245
+ "remaining": 1,
246
+ "remainingPercent": 100,
247
+ "resetTime": "01-25 00:38",
248
+ "resetTimeRaw": "2026-01-24T08:38:42Z"
249
+ },
250
+ {
251
+ "resourceType": "MODEL_USAGE",
252
+ "displayName": "gemini-3-pro-preview",
253
+ "displayNamePlural": "gemini-3-pro-preview",
254
+ "unit": "quota",
255
+ "currency": null,
256
+ "currentUsage": 0,
257
+ "usageLimit": 100,
258
+ "currentOverages": 0,
259
+ "overageCap": 0,
260
+ "overageRate": null,
261
+ "overageCharges": 0,
262
+ "nextDateReset": "2026-01-24T08:38:42.000Z",
263
+ "freeTrial": null,
264
+ "bonuses": [],
265
+ "modelName": "gemini-3-pro-preview",
266
+ "inputTokenLimit": 0,
267
+ "outputTokenLimit": 0,
268
+ "remaining": 1,
269
+ "remainingPercent": 100,
270
+ "resetTime": "01-25 00:38",
271
+ "resetTimeRaw": "2026-01-24T08:38:42Z"
272
+ },
273
+ {
274
+ "resourceType": "MODEL_USAGE",
275
+ "displayName": "gemini-claude-opus-4-5-thinking",
276
+ "displayNamePlural": "gemini-claude-opus-4-5-thinking",
277
+ "unit": "quota",
278
+ "currency": null,
279
+ "currentUsage": 0,
280
+ "usageLimit": 100,
281
+ "currentOverages": 0,
282
+ "overageCap": 0,
283
+ "overageRate": null,
284
+ "overageCharges": 0,
285
+ "nextDateReset": "2026-01-24T08:38:42.000Z",
286
+ "freeTrial": null,
287
+ "bonuses": [],
288
+ "modelName": "gemini-claude-opus-4-5-thinking",
289
+ "inputTokenLimit": 0,
290
+ "outputTokenLimit": 0,
291
+ "remaining": 1,
292
+ "remainingPercent": 100,
293
+ "resetTime": "01-25 00:38",
294
+ "resetTimeRaw": "2026-01-24T08:38:42Z"
295
+ },
296
+ {
297
+ "resourceType": "MODEL_USAGE",
298
+ "displayName": "gemini-claude-sonnet-4-5",
299
+ "displayNamePlural": "gemini-claude-sonnet-4-5",
300
+ "unit": "quota",
301
+ "currency": null,
302
+ "currentUsage": 0,
303
+ "usageLimit": 100,
304
+ "currentOverages": 0,
305
+ "overageCap": 0,
306
+ "overageRate": null,
307
+ "overageCharges": 0,
308
+ "nextDateReset": "2026-01-24T08:38:42.000Z",
309
+ "freeTrial": null,
310
+ "bonuses": [],
311
+ "modelName": "gemini-claude-sonnet-4-5",
312
+ "inputTokenLimit": 0,
313
+ "outputTokenLimit": 0,
314
+ "remaining": 1,
315
+ "remainingPercent": 100,
316
+ "resetTime": "01-25 00:38",
317
+ "resetTimeRaw": "2026-01-24T08:38:42Z"
318
+ },
319
+ {
320
+ "resourceType": "MODEL_USAGE",
321
+ "displayName": "gemini-claude-sonnet-4-5-thinking",
322
+ "displayNamePlural": "gemini-claude-sonnet-4-5-thinking",
323
+ "unit": "quota",
324
+ "currency": null,
325
+ "currentUsage": 0,
326
+ "usageLimit": 100,
327
+ "currentOverages": 0,
328
+ "overageCap": 0,
329
+ "overageRate": null,
330
+ "overageCharges": 0,
331
+ "nextDateReset": "2026-01-24T08:38:42.000Z",
332
+ "freeTrial": null,
333
+ "bonuses": [],
334
+ "modelName": "gemini-claude-sonnet-4-5-thinking",
335
+ "inputTokenLimit": 0,
336
+ "outputTokenLimit": 0,
337
+ "remaining": 1,
338
+ "remainingPercent": 100,
339
+ "resetTime": "01-25 00:38",
340
+ "resetTimeRaw": "2026-01-24T08:38:42Z"
341
+ }
342
+ ]
343
+ },
344
+ "error": null
345
+ },
346
+ {
347
+ "uuid": "74a34d43-2e2d-47a1-ae04-f7956f4eac32",
348
+ "name": "74a34d43-2e2d-47a1-ae04-f7956f4eac32",
349
+ "isHealthy": true,
350
+ "isDisabled": false,
351
+ "success": true,
352
+ "usage": {
353
+ "daysUntilReset": null,
354
+ "nextDateReset": null,
355
+ "subscription": {
356
+ "title": "Gemini Antigravity",
357
+ "type": "gemini-antigravity",
358
+ "upgradeCapability": null,
359
+ "overageCapability": null
360
+ },
361
+ "user": {
362
+ "email": null,
363
+ "userId": null
364
+ },
365
+ "usageBreakdown": [
366
+ {
367
+ "resourceType": "MODEL_USAGE",
368
+ "displayName": "gemini-2.5-flash-preview",
369
+ "displayNamePlural": "gemini-2.5-flash-preview",
370
+ "unit": "quota",
371
+ "currency": null,
372
+ "currentUsage": 0,
373
+ "usageLimit": 100,
374
+ "currentOverages": 0,
375
+ "overageCap": 0,
376
+ "overageRate": null,
377
+ "overageCharges": 0,
378
+ "nextDateReset": "2026-01-24T08:38:45.000Z",
379
+ "freeTrial": null,
380
+ "bonuses": [],
381
+ "modelName": "gemini-2.5-flash-preview",
382
+ "inputTokenLimit": 0,
383
+ "outputTokenLimit": 0,
384
+ "remaining": 1,
385
+ "remainingPercent": 100,
386
+ "resetTime": "01-25 00:38",
387
+ "resetTimeRaw": "2026-01-24T08:38:45Z"
388
+ },
389
+ {
390
+ "resourceType": "MODEL_USAGE",
391
+ "displayName": "gemini-3-flash-preview",
392
+ "displayNamePlural": "gemini-3-flash-preview",
393
+ "unit": "quota",
394
+ "currency": null,
395
+ "currentUsage": 0,
396
+ "usageLimit": 100,
397
+ "currentOverages": 0,
398
+ "overageCap": 0,
399
+ "overageRate": null,
400
+ "overageCharges": 0,
401
+ "nextDateReset": "2026-01-24T08:38:45.000Z",
402
+ "freeTrial": null,
403
+ "bonuses": [],
404
+ "modelName": "gemini-3-flash-preview",
405
+ "inputTokenLimit": 0,
406
+ "outputTokenLimit": 0,
407
+ "remaining": 1,
408
+ "remainingPercent": 100,
409
+ "resetTime": "01-25 00:38",
410
+ "resetTimeRaw": "2026-01-24T08:38:45Z"
411
+ },
412
+ {
413
+ "resourceType": "MODEL_USAGE",
414
+ "displayName": "gemini-3-pro-image-preview",
415
+ "displayNamePlural": "gemini-3-pro-image-preview",
416
+ "unit": "quota",
417
+ "currency": null,
418
+ "currentUsage": 0,
419
+ "usageLimit": 100,
420
+ "currentOverages": 0,
421
+ "overageCap": 0,
422
+ "overageRate": null,
423
+ "overageCharges": 0,
424
+ "nextDateReset": "2026-01-24T08:38:45.000Z",
425
+ "freeTrial": null,
426
+ "bonuses": [],
427
+ "modelName": "gemini-3-pro-image-preview",
428
+ "inputTokenLimit": 0,
429
+ "outputTokenLimit": 0,
430
+ "remaining": 1,
431
+ "remainingPercent": 100,
432
+ "resetTime": "01-25 00:38",
433
+ "resetTimeRaw": "2026-01-24T08:38:45Z"
434
+ },
435
+ {
436
+ "resourceType": "MODEL_USAGE",
437
+ "displayName": "gemini-3-pro-preview",
438
+ "displayNamePlural": "gemini-3-pro-preview",
439
+ "unit": "quota",
440
+ "currency": null,
441
+ "currentUsage": 0,
442
+ "usageLimit": 100,
443
+ "currentOverages": 0,
444
+ "overageCap": 0,
445
+ "overageRate": null,
446
+ "overageCharges": 0,
447
+ "nextDateReset": "2026-01-24T08:38:45.000Z",
448
+ "freeTrial": null,
449
+ "bonuses": [],
450
+ "modelName": "gemini-3-pro-preview",
451
+ "inputTokenLimit": 0,
452
+ "outputTokenLimit": 0,
453
+ "remaining": 1,
454
+ "remainingPercent": 100,
455
+ "resetTime": "01-25 00:38",
456
+ "resetTimeRaw": "2026-01-24T08:38:45Z"
457
+ },
458
+ {
459
+ "resourceType": "MODEL_USAGE",
460
+ "displayName": "gemini-claude-opus-4-5-thinking",
461
+ "displayNamePlural": "gemini-claude-opus-4-5-thinking",
462
+ "unit": "quota",
463
+ "currency": null,
464
+ "currentUsage": 0,
465
+ "usageLimit": 100,
466
+ "currentOverages": 0,
467
+ "overageCap": 0,
468
+ "overageRate": null,
469
+ "overageCharges": 0,
470
+ "nextDateReset": "2026-01-24T08:27:27.000Z",
471
+ "freeTrial": null,
472
+ "bonuses": [],
473
+ "modelName": "gemini-claude-opus-4-5-thinking",
474
+ "inputTokenLimit": 0,
475
+ "outputTokenLimit": 0,
476
+ "remaining": 1,
477
+ "remainingPercent": 100,
478
+ "resetTime": "01-25 00:27",
479
+ "resetTimeRaw": "2026-01-24T08:27:27Z"
480
+ },
481
+ {
482
+ "resourceType": "MODEL_USAGE",
483
+ "displayName": "gemini-claude-sonnet-4-5",
484
+ "displayNamePlural": "gemini-claude-sonnet-4-5",
485
+ "unit": "quota",
486
+ "currency": null,
487
+ "currentUsage": 0,
488
+ "usageLimit": 100,
489
+ "currentOverages": 0,
490
+ "overageCap": 0,
491
+ "overageRate": null,
492
+ "overageCharges": 0,
493
+ "nextDateReset": "2026-01-24T08:27:27.000Z",
494
+ "freeTrial": null,
495
+ "bonuses": [],
496
+ "modelName": "gemini-claude-sonnet-4-5",
497
+ "inputTokenLimit": 0,
498
+ "outputTokenLimit": 0,
499
+ "remaining": 1,
500
+ "remainingPercent": 100,
501
+ "resetTime": "01-25 00:27",
502
+ "resetTimeRaw": "2026-01-24T08:27:27Z"
503
+ },
504
+ {
505
+ "resourceType": "MODEL_USAGE",
506
+ "displayName": "gemini-claude-sonnet-4-5-thinking",
507
+ "displayNamePlural": "gemini-claude-sonnet-4-5-thinking",
508
+ "unit": "quota",
509
+ "currency": null,
510
+ "currentUsage": 0,
511
+ "usageLimit": 100,
512
+ "currentOverages": 0,
513
+ "overageCap": 0,
514
+ "overageRate": null,
515
+ "overageCharges": 0,
516
+ "nextDateReset": "2026-01-24T08:27:27.000Z",
517
+ "freeTrial": null,
518
+ "bonuses": [],
519
+ "modelName": "gemini-claude-sonnet-4-5-thinking",
520
+ "inputTokenLimit": 0,
521
+ "outputTokenLimit": 0,
522
+ "remaining": 1,
523
+ "remainingPercent": 100,
524
+ "resetTime": "01-25 00:27",
525
+ "resetTimeRaw": "2026-01-24T08:27:27Z"
526
+ }
527
+ ]
528
+ },
529
+ "error": null
530
+ },
531
+ {
532
+ "uuid": "2dc29208-0047-442a-a810-fd9af1290875",
533
+ "name": "2dc29208-0047-442a-a810-fd9af1290875",
534
+ "isHealthy": true,
535
+ "isDisabled": false,
536
+ "success": true,
537
+ "usage": {
538
+ "daysUntilReset": null,
539
+ "nextDateReset": null,
540
+ "subscription": {
541
+ "title": "Gemini Antigravity",
542
+ "type": "gemini-antigravity",
543
+ "upgradeCapability": null,
544
+ "overageCapability": null
545
+ },
546
+ "user": {
547
+ "email": null,
548
+ "userId": null
549
+ },
550
+ "usageBreakdown": [
551
+ {
552
+ "resourceType": "MODEL_USAGE",
553
+ "displayName": "gemini-2.5-flash-preview",
554
+ "displayNamePlural": "gemini-2.5-flash-preview",
555
+ "unit": "quota",
556
+ "currency": null,
557
+ "currentUsage": 0,
558
+ "usageLimit": 100,
559
+ "currentOverages": 0,
560
+ "overageCap": 0,
561
+ "overageRate": null,
562
+ "overageCharges": 0,
563
+ "nextDateReset": "2026-01-24T08:38:46.000Z",
564
+ "freeTrial": null,
565
+ "bonuses": [],
566
+ "modelName": "gemini-2.5-flash-preview",
567
+ "inputTokenLimit": 0,
568
+ "outputTokenLimit": 0,
569
+ "remaining": 1,
570
+ "remainingPercent": 100,
571
+ "resetTime": "01-25 00:38",
572
+ "resetTimeRaw": "2026-01-24T08:38:46Z"
573
+ },
574
+ {
575
+ "resourceType": "MODEL_USAGE",
576
+ "displayName": "gemini-3-flash-preview",
577
+ "displayNamePlural": "gemini-3-flash-preview",
578
+ "unit": "quota",
579
+ "currency": null,
580
+ "currentUsage": 0,
581
+ "usageLimit": 100,
582
+ "currentOverages": 0,
583
+ "overageCap": 0,
584
+ "overageRate": null,
585
+ "overageCharges": 0,
586
+ "nextDateReset": "2026-01-24T08:38:46.000Z",
587
+ "freeTrial": null,
588
+ "bonuses": [],
589
+ "modelName": "gemini-3-flash-preview",
590
+ "inputTokenLimit": 0,
591
+ "outputTokenLimit": 0,
592
+ "remaining": 1,
593
+ "remainingPercent": 100,
594
+ "resetTime": "01-25 00:38",
595
+ "resetTimeRaw": "2026-01-24T08:38:46Z"
596
+ },
597
+ {
598
+ "resourceType": "MODEL_USAGE",
599
+ "displayName": "gemini-3-pro-image-preview",
600
+ "displayNamePlural": "gemini-3-pro-image-preview",
601
+ "unit": "quota",
602
+ "currency": null,
603
+ "currentUsage": 0,
604
+ "usageLimit": 100,
605
+ "currentOverages": 0,
606
+ "overageCap": 0,
607
+ "overageRate": null,
608
+ "overageCharges": 0,
609
+ "nextDateReset": "2026-01-24T08:38:46.000Z",
610
+ "freeTrial": null,
611
+ "bonuses": [],
612
+ "modelName": "gemini-3-pro-image-preview",
613
+ "inputTokenLimit": 0,
614
+ "outputTokenLimit": 0,
615
+ "remaining": 1,
616
+ "remainingPercent": 100,
617
+ "resetTime": "01-25 00:38",
618
+ "resetTimeRaw": "2026-01-24T08:38:46Z"
619
+ },
620
+ {
621
+ "resourceType": "MODEL_USAGE",
622
+ "displayName": "gemini-3-pro-preview",
623
+ "displayNamePlural": "gemini-3-pro-preview",
624
+ "unit": "quota",
625
+ "currency": null,
626
+ "currentUsage": 0,
627
+ "usageLimit": 100,
628
+ "currentOverages": 0,
629
+ "overageCap": 0,
630
+ "overageRate": null,
631
+ "overageCharges": 0,
632
+ "nextDateReset": "2026-01-24T08:38:46.000Z",
633
+ "freeTrial": null,
634
+ "bonuses": [],
635
+ "modelName": "gemini-3-pro-preview",
636
+ "inputTokenLimit": 0,
637
+ "outputTokenLimit": 0,
638
+ "remaining": 1,
639
+ "remainingPercent": 100,
640
+ "resetTime": "01-25 00:38",
641
+ "resetTimeRaw": "2026-01-24T08:38:46Z"
642
+ },
643
+ {
644
+ "resourceType": "MODEL_USAGE",
645
+ "displayName": "gemini-claude-opus-4-5-thinking",
646
+ "displayNamePlural": "gemini-claude-opus-4-5-thinking",
647
+ "unit": "quota",
648
+ "currency": null,
649
+ "currentUsage": 0,
650
+ "usageLimit": 100,
651
+ "currentOverages": 0,
652
+ "overageCap": 0,
653
+ "overageRate": null,
654
+ "overageCharges": 0,
655
+ "nextDateReset": "2026-01-24T08:38:46.000Z",
656
+ "freeTrial": null,
657
+ "bonuses": [],
658
+ "modelName": "gemini-claude-opus-4-5-thinking",
659
+ "inputTokenLimit": 0,
660
+ "outputTokenLimit": 0,
661
+ "remaining": 1,
662
+ "remainingPercent": 100,
663
+ "resetTime": "01-25 00:38",
664
+ "resetTimeRaw": "2026-01-24T08:38:46Z"
665
+ },
666
+ {
667
+ "resourceType": "MODEL_USAGE",
668
+ "displayName": "gemini-claude-sonnet-4-5",
669
+ "displayNamePlural": "gemini-claude-sonnet-4-5",
670
+ "unit": "quota",
671
+ "currency": null,
672
+ "currentUsage": 0,
673
+ "usageLimit": 100,
674
+ "currentOverages": 0,
675
+ "overageCap": 0,
676
+ "overageRate": null,
677
+ "overageCharges": 0,
678
+ "nextDateReset": "2026-01-24T08:38:46.000Z",
679
+ "freeTrial": null,
680
+ "bonuses": [],
681
+ "modelName": "gemini-claude-sonnet-4-5",
682
+ "inputTokenLimit": 0,
683
+ "outputTokenLimit": 0,
684
+ "remaining": 1,
685
+ "remainingPercent": 100,
686
+ "resetTime": "01-25 00:38",
687
+ "resetTimeRaw": "2026-01-24T08:38:46Z"
688
+ },
689
+ {
690
+ "resourceType": "MODEL_USAGE",
691
+ "displayName": "gemini-claude-sonnet-4-5-thinking",
692
+ "displayNamePlural": "gemini-claude-sonnet-4-5-thinking",
693
+ "unit": "quota",
694
+ "currency": null,
695
+ "currentUsage": 0,
696
+ "usageLimit": 100,
697
+ "currentOverages": 0,
698
+ "overageCap": 0,
699
+ "overageRate": null,
700
+ "overageCharges": 0,
701
+ "nextDateReset": "2026-01-24T08:38:46.000Z",
702
+ "freeTrial": null,
703
+ "bonuses": [],
704
+ "modelName": "gemini-claude-sonnet-4-5-thinking",
705
+ "inputTokenLimit": 0,
706
+ "outputTokenLimit": 0,
707
+ "remaining": 1,
708
+ "remainingPercent": 100,
709
+ "resetTime": "01-25 00:38",
710
+ "resetTimeRaw": "2026-01-24T08:38:46Z"
711
+ }
712
+ ]
713
+ },
714
+ "error": null
715
+ },
716
+ {
717
+ "uuid": "6d7301c2-da5c-491c-8fb9-e822ec781bb8",
718
+ "name": "6d7301c2-da5c-491c-8fb9-e822ec781bb8",
719
+ "isHealthy": true,
720
+ "isDisabled": false,
721
+ "success": true,
722
+ "usage": {
723
+ "daysUntilReset": null,
724
+ "nextDateReset": null,
725
+ "subscription": {
726
+ "title": "Gemini Antigravity",
727
+ "type": "gemini-antigravity",
728
+ "upgradeCapability": null,
729
+ "overageCapability": null
730
+ },
731
+ "user": {
732
+ "email": null,
733
+ "userId": null
734
+ },
735
+ "usageBreakdown": [
736
+ {
737
+ "resourceType": "MODEL_USAGE",
738
+ "displayName": "gemini-2.5-flash-preview",
739
+ "displayNamePlural": "gemini-2.5-flash-preview",
740
+ "unit": "quota",
741
+ "currency": null,
742
+ "currentUsage": 0,
743
+ "usageLimit": 100,
744
+ "currentOverages": 0,
745
+ "overageCap": 0,
746
+ "overageRate": null,
747
+ "overageCharges": 0,
748
+ "nextDateReset": "2026-01-24T08:38:49.000Z",
749
+ "freeTrial": null,
750
+ "bonuses": [],
751
+ "modelName": "gemini-2.5-flash-preview",
752
+ "inputTokenLimit": 0,
753
+ "outputTokenLimit": 0,
754
+ "remaining": 1,
755
+ "remainingPercent": 100,
756
+ "resetTime": "01-25 00:38",
757
+ "resetTimeRaw": "2026-01-24T08:38:49Z"
758
+ },
759
+ {
760
+ "resourceType": "MODEL_USAGE",
761
+ "displayName": "gemini-3-flash-preview",
762
+ "displayNamePlural": "gemini-3-flash-preview",
763
+ "unit": "quota",
764
+ "currency": null,
765
+ "currentUsage": 0,
766
+ "usageLimit": 100,
767
+ "currentOverages": 0,
768
+ "overageCap": 0,
769
+ "overageRate": null,
770
+ "overageCharges": 0,
771
+ "nextDateReset": "2026-01-24T08:38:49.000Z",
772
+ "freeTrial": null,
773
+ "bonuses": [],
774
+ "modelName": "gemini-3-flash-preview",
775
+ "inputTokenLimit": 0,
776
+ "outputTokenLimit": 0,
777
+ "remaining": 1,
778
+ "remainingPercent": 100,
779
+ "resetTime": "01-25 00:38",
780
+ "resetTimeRaw": "2026-01-24T08:38:49Z"
781
+ },
782
+ {
783
+ "resourceType": "MODEL_USAGE",
784
+ "displayName": "gemini-3-pro-image-preview",
785
+ "displayNamePlural": "gemini-3-pro-image-preview",
786
+ "unit": "quota",
787
+ "currency": null,
788
+ "currentUsage": 0,
789
+ "usageLimit": 100,
790
+ "currentOverages": 0,
791
+ "overageCap": 0,
792
+ "overageRate": null,
793
+ "overageCharges": 0,
794
+ "nextDateReset": "2026-01-24T08:38:49.000Z",
795
+ "freeTrial": null,
796
+ "bonuses": [],
797
+ "modelName": "gemini-3-pro-image-preview",
798
+ "inputTokenLimit": 0,
799
+ "outputTokenLimit": 0,
800
+ "remaining": 1,
801
+ "remainingPercent": 100,
802
+ "resetTime": "01-25 00:38",
803
+ "resetTimeRaw": "2026-01-24T08:38:49Z"
804
+ },
805
+ {
806
+ "resourceType": "MODEL_USAGE",
807
+ "displayName": "gemini-3-pro-preview",
808
+ "displayNamePlural": "gemini-3-pro-preview",
809
+ "unit": "quota",
810
+ "currency": null,
811
+ "currentUsage": 0,
812
+ "usageLimit": 100,
813
+ "currentOverages": 0,
814
+ "overageCap": 0,
815
+ "overageRate": null,
816
+ "overageCharges": 0,
817
+ "nextDateReset": "2026-01-24T08:38:49.000Z",
818
+ "freeTrial": null,
819
+ "bonuses": [],
820
+ "modelName": "gemini-3-pro-preview",
821
+ "inputTokenLimit": 0,
822
+ "outputTokenLimit": 0,
823
+ "remaining": 1,
824
+ "remainingPercent": 100,
825
+ "resetTime": "01-25 00:38",
826
+ "resetTimeRaw": "2026-01-24T08:38:49Z"
827
+ },
828
+ {
829
+ "resourceType": "MODEL_USAGE",
830
+ "displayName": "gemini-claude-opus-4-5-thinking",
831
+ "displayNamePlural": "gemini-claude-opus-4-5-thinking",
832
+ "unit": "quota",
833
+ "currency": null,
834
+ "currentUsage": 0,
835
+ "usageLimit": 100,
836
+ "currentOverages": 0,
837
+ "overageCap": 0,
838
+ "overageRate": null,
839
+ "overageCharges": 0,
840
+ "nextDateReset": "2026-01-24T07:24:04.000Z",
841
+ "freeTrial": null,
842
+ "bonuses": [],
843
+ "modelName": "gemini-claude-opus-4-5-thinking",
844
+ "inputTokenLimit": 0,
845
+ "outputTokenLimit": 0,
846
+ "remaining": 1,
847
+ "remainingPercent": 100,
848
+ "resetTime": "01-24 23:24",
849
+ "resetTimeRaw": "2026-01-24T07:24:04Z"
850
+ },
851
+ {
852
+ "resourceType": "MODEL_USAGE",
853
+ "displayName": "gemini-claude-sonnet-4-5",
854
+ "displayNamePlural": "gemini-claude-sonnet-4-5",
855
+ "unit": "quota",
856
+ "currency": null,
857
+ "currentUsage": 0,
858
+ "usageLimit": 100,
859
+ "currentOverages": 0,
860
+ "overageCap": 0,
861
+ "overageRate": null,
862
+ "overageCharges": 0,
863
+ "nextDateReset": "2026-01-24T07:24:04.000Z",
864
+ "freeTrial": null,
865
+ "bonuses": [],
866
+ "modelName": "gemini-claude-sonnet-4-5",
867
+ "inputTokenLimit": 0,
868
+ "outputTokenLimit": 0,
869
+ "remaining": 1,
870
+ "remainingPercent": 100,
871
+ "resetTime": "01-24 23:24",
872
+ "resetTimeRaw": "2026-01-24T07:24:04Z"
873
+ },
874
+ {
875
+ "resourceType": "MODEL_USAGE",
876
+ "displayName": "gemini-claude-sonnet-4-5-thinking",
877
+ "displayNamePlural": "gemini-claude-sonnet-4-5-thinking",
878
+ "unit": "quota",
879
+ "currency": null,
880
+ "currentUsage": 0,
881
+ "usageLimit": 100,
882
+ "currentOverages": 0,
883
+ "overageCap": 0,
884
+ "overageRate": null,
885
+ "overageCharges": 0,
886
+ "nextDateReset": "2026-01-24T07:24:04.000Z",
887
+ "freeTrial": null,
888
+ "bonuses": [],
889
+ "modelName": "gemini-claude-sonnet-4-5-thinking",
890
+ "inputTokenLimit": 0,
891
+ "outputTokenLimit": 0,
892
+ "remaining": 1,
893
+ "remainingPercent": 100,
894
+ "resetTime": "01-24 23:24",
895
+ "resetTimeRaw": "2026-01-24T07:24:04Z"
896
+ }
897
+ ]
898
+ },
899
+ "error": null
900
+ }
901
+ ],
902
+ "totalCount": 4,
903
+ "successCount": 4,
904
+ "errorCount": 0
905
+ }
906
+ }
907
+ }
healthcheck.js ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Docker健康检查脚本
3
+ * 用于检查API服务器是否正常运行
4
+ */
5
+
6
+ import http from 'http';
7
+
8
+ // 从环境变量获取主机和端口,如果没有设置则使用默认值
9
+ const HOST = process.env.HOST || 'localhost';
10
+ const PORT = process.env.SERVER_PORT || 3000;
11
+
12
+ // 发送HTTP请求到健康检查端点
13
+ const options = {
14
+ hostname: HOST,
15
+ port: PORT,
16
+ path: '/health',
17
+ method: 'GET',
18
+ timeout: 2000 // 2秒超时
19
+ };
20
+
21
+ const req = http.request(options, (res) => {
22
+ // 如果状态码是200,表示服务健康
23
+ if (res.statusCode === 200) {
24
+ console.log('Health check passed');
25
+ process.exit(0);
26
+ } else {
27
+ console.log(`Health check failed with status code: ${res.statusCode}`);
28
+ process.exit(1);
29
+ }
30
+ });
31
+
32
+ // 处理请求错误
33
+ req.on('error', (e) => {
34
+ console.error(`Health check failed: ${e.message}`);
35
+ process.exit(1);
36
+ });
37
+
38
+ // 设置超时处理
39
+ req.on('timeout', () => {
40
+ console.error('Health check timed out');
41
+ req.destroy();
42
+ process.exit(1);
43
+ });
44
+
45
+ // 结束请求
46
+ req.end();
package-lock.json ADDED
The diff for this file is too large to render. See raw diff
 
package.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "module",
3
+ "dependencies": {
4
+ "@anthropic-ai/tokenizer": "^0.0.4",
5
+ "adm-zip": "^0.5.16",
6
+ "axios": "^1.10.0",
7
+ "deepmerge": "^4.3.1",
8
+ "dotenv": "^16.4.5",
9
+ "google-auth-library": "^10.1.0",
10
+ "http-proxy-agent": "^7.0.2",
11
+ "https-proxy-agent": "^7.0.6",
12
+ "lodash": "^4.17.21",
13
+ "multer": "^2.0.2",
14
+ "open": "^10.2.0",
15
+ "socks-proxy-agent": "^8.0.5",
16
+ "undici": "^7.12.0",
17
+ "uuid": "^11.1.0",
18
+ "ws": "^8.19.0"
19
+ },
20
+ "devDependencies": {
21
+ "@babel/preset-env": "^7.28.0",
22
+ "@jest/globals": "^29.7.0",
23
+ "babel-jest": "^30.0.5",
24
+ "babel-plugin-transform-import-meta": "^2.3.3",
25
+ "jest": "^29.7.0",
26
+ "jest-environment-node": "^29.7.0",
27
+ "supertest": "^6.3.3"
28
+ },
29
+ "scripts": {
30
+ "start": "node src/core/master.js",
31
+ "start:standalone": "node src/services/api-server.js",
32
+ "start:dev": "node src/core/master.js --dev",
33
+ "test": "jest",
34
+ "test:watch": "jest --watch",
35
+ "test:coverage": "jest --coverage",
36
+ "test:verbose": "jest --verbose",
37
+ "test:silent": "jest --silent",
38
+ "test:unit": "node run-tests.js --unit",
39
+ "test:integration": "node run-tests.js --integration",
40
+ "test:summary": "node test-summary.js"
41
+ }
42
+ }
src/auth/codex-oauth.js ADDED
@@ -0,0 +1,851 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import http from 'http';
2
+ import fs from 'fs';
3
+ import path from 'path';
4
+ import crypto from 'crypto';
5
+ import open from 'open';
6
+ import axios from 'axios';
7
+ import { broadcastEvent } from '../services/ui-manager.js';
8
+ import { autoLinkProviderConfigs } from '../services/service-manager.js';
9
+ import { CONFIG } from '../core/config-manager.js';
10
+ import { getProxyConfigForProvider } from '../utils/proxy-utils.js';
11
+
12
+ /**
13
+ * Codex OAuth 配置
14
+ */
15
+ const CODEX_OAUTH_CONFIG = {
16
+ clientId: 'app_EMoamEEZ73f0CkXaXp7hrann',
17
+ authUrl: 'https://auth.openai.com/oauth/authorize',
18
+ tokenUrl: 'https://auth.openai.com/oauth/token',
19
+ redirectUri: 'http://localhost:1455/auth/callback',
20
+ port: 1455,
21
+ scopes: 'openid email profile offline_access',
22
+ logPrefix: '[Codex Auth]'
23
+ };
24
+
25
+ /**
26
+ * Codex OAuth 认证类
27
+ * 实现 OAuth2 + PKCE 流程
28
+ */
29
+ class CodexAuth {
30
+ constructor(config) {
31
+ this.config = config;
32
+
33
+ // 配置代理支持
34
+ const axiosConfig = { timeout: 30000 };
35
+ const proxyConfig = getProxyConfigForProvider(config, 'openai-codex-oauth');
36
+ if (proxyConfig) {
37
+ axiosConfig.httpAgent = proxyConfig.httpAgent;
38
+ axiosConfig.httpsAgent = proxyConfig.httpsAgent;
39
+ console.log('[Codex Auth] Proxy enabled for OAuth requests');
40
+ }
41
+
42
+ this.httpClient = axios.create(axiosConfig);
43
+ this.server = null; // 存储服务器实例
44
+ }
45
+
46
+ /**
47
+ * 生成 PKCE 代码
48
+ * @returns {{verifier: string, challenge: string}}
49
+ */
50
+ generatePKCECodes() {
51
+ // 生成 code verifier (96 随机字节 → 128 base64url 字符)
52
+ const verifier = crypto.randomBytes(96)
53
+ .toString('base64url');
54
+
55
+ // 生成 code challenge (SHA256 of verifier)
56
+ const challenge = crypto.createHash('sha256')
57
+ .update(verifier)
58
+ .digest('base64url');
59
+
60
+ return { verifier, challenge };
61
+ }
62
+
63
+ /**
64
+ * 生成授权 URL(不启动完整流程)
65
+ * @returns {{authUrl: string, state: string, pkce: Object, server: Object}}
66
+ */
67
+ async generateAuthUrl() {
68
+ const pkce = this.generatePKCECodes();
69
+ const state = crypto.randomBytes(16).toString('hex');
70
+
71
+ console.log(`${CODEX_OAUTH_CONFIG.logPrefix} Generating auth URL...`);
72
+
73
+ // 如果已有服务器在运行,先关闭
74
+ if (this.server) {
75
+ console.log(`${CODEX_OAUTH_CONFIG.logPrefix} Closing existing callback server...`);
76
+ try {
77
+ this.server.close();
78
+ this.server = null;
79
+ } catch (error) {
80
+ console.warn(`${CODEX_OAUTH_CONFIG.logPrefix} Failed to close existing server:`, error.message);
81
+ }
82
+ }
83
+
84
+ // 启动本地回调服务器
85
+ const server = await this.startCallbackServer();
86
+ this.server = server;
87
+
88
+ // 构建授权 URL
89
+ const authUrl = new URL(CODEX_OAUTH_CONFIG.authUrl);
90
+ authUrl.searchParams.set('client_id', CODEX_OAUTH_CONFIG.clientId);
91
+ authUrl.searchParams.set('response_type', 'code');
92
+ authUrl.searchParams.set('redirect_uri', CODEX_OAUTH_CONFIG.redirectUri);
93
+ authUrl.searchParams.set('scope', CODEX_OAUTH_CONFIG.scopes);
94
+ authUrl.searchParams.set('state', state);
95
+ authUrl.searchParams.set('code_challenge', pkce.challenge);
96
+ authUrl.searchParams.set('code_challenge_method', 'S256');
97
+ authUrl.searchParams.set('prompt', 'login');
98
+ authUrl.searchParams.set('id_token_add_organizations', 'true');
99
+ authUrl.searchParams.set('codex_cli_simplified_flow', 'true');
100
+
101
+ return {
102
+ authUrl: authUrl.toString(),
103
+ state,
104
+ pkce,
105
+ server
106
+ };
107
+ }
108
+
109
+ /**
110
+ * 完成 OAuth 流程(在收到回调后调用)
111
+ * @param {string} code - 授权码
112
+ * @param {string} state - 状态参数
113
+ * @param {string} expectedState - 期望的状态参数
114
+ * @param {Object} pkce - PKCE 代码
115
+ * @returns {Promise<Object>} tokens 和凭据路径
116
+ */
117
+ async completeOAuthFlow(code, state, expectedState, pkce) {
118
+ // 验证 state
119
+ if (state !== expectedState) {
120
+ throw new Error('State mismatch - possible CSRF attack');
121
+ }
122
+
123
+ // 用 code 换取 tokens
124
+ const tokens = await this.exchangeCodeForTokens(code, pkce.verifier);
125
+
126
+ // 解析 JWT 提取账户信息
127
+ const claims = this.parseJWT(tokens.id_token);
128
+
129
+ // 保存凭据(遵循 CLIProxyAPI 格式)
130
+ const credentials = {
131
+ id_token: tokens.id_token,
132
+ access_token: tokens.access_token,
133
+ refresh_token: tokens.refresh_token,
134
+ account_id: claims['https://api.openai.com/auth']?.chatgpt_account_id || claims.sub,
135
+ last_refresh: new Date().toISOString(),
136
+ email: claims.email,
137
+ type: 'codex',
138
+ expired: new Date(Date.now() + (tokens.expires_in || 3600) * 1000).toISOString()
139
+ };
140
+
141
+ // 保存凭据并获取路径
142
+ const saveResult = await this.saveCredentials(credentials);
143
+ const credPath = saveResult.credsPath;
144
+ const relativePath = saveResult.relativePath;
145
+
146
+ console.log(`${CODEX_OAUTH_CONFIG.logPrefix} Authentication successful!`);
147
+ console.log(`${CODEX_OAUTH_CONFIG.logPrefix} Email: ${credentials.email}`);
148
+ console.log(`${CODEX_OAUTH_CONFIG.logPrefix} Account ID: ${credentials.account_id}`);
149
+
150
+ // 关闭服务器
151
+ if (this.server) {
152
+ this.server.close();
153
+ this.server = null;
154
+ }
155
+
156
+ return {
157
+ ...credentials,
158
+ credPath,
159
+ relativePath
160
+ };
161
+ }
162
+
163
+ /**
164
+ * 启动 OAuth 流程
165
+ * @returns {Promise<Object>} 返回 tokens
166
+ */
167
+ async startOAuthFlow() {
168
+ const pkce = this.generatePKCECodes();
169
+ const state = crypto.randomBytes(16).toString('hex');
170
+
171
+ console.log(`${CODEX_OAUTH_CONFIG.logPrefix} Starting OAuth flow...`);
172
+
173
+ // 启动本地回调服务器
174
+ const server = await this.startCallbackServer();
175
+
176
+ // 构建授权 URL
177
+ const authUrl = new URL(CODEX_OAUTH_CONFIG.authUrl);
178
+ authUrl.searchParams.set('client_id', CODEX_OAUTH_CONFIG.clientId);
179
+ authUrl.searchParams.set('response_type', 'code');
180
+ authUrl.searchParams.set('redirect_uri', CODEX_OAUTH_CONFIG.redirectUri);
181
+ authUrl.searchParams.set('scope', CODEX_OAUTH_CONFIG.scopes);
182
+ authUrl.searchParams.set('state', state);
183
+ authUrl.searchParams.set('code_challenge', pkce.challenge);
184
+ authUrl.searchParams.set('code_challenge_method', 'S256');
185
+ authUrl.searchParams.set('prompt', 'login');
186
+ authUrl.searchParams.set('id_token_add_organizations', 'true');
187
+ authUrl.searchParams.set('codex_cli_simplified_flow', 'true');
188
+
189
+ console.log(`${CODEX_OAUTH_CONFIG.logPrefix} Opening browser for authentication...`);
190
+ console.log(`${CODEX_OAUTH_CONFIG.logPrefix} If browser doesn't open, visit: ${authUrl.toString()}`);
191
+
192
+ try {
193
+ await open(authUrl.toString());
194
+ } catch (error) {
195
+ console.warn(`${CODEX_OAUTH_CONFIG.logPrefix} Failed to open browser automatically:`, error.message);
196
+ }
197
+
198
+ // 等待回调
199
+ const result = await this.waitForCallback(server, state);
200
+
201
+ // 用 code 换取 tokens
202
+ const tokens = await this.exchangeCodeForTokens(result.code, pkce.verifier);
203
+
204
+ // 解析 JWT 提取账户信息
205
+ const claims = this.parseJWT(tokens.id_token);
206
+
207
+ // 保存凭据(遵循 CLIProxyAPI 格式)
208
+ const credentials = {
209
+ id_token: tokens.id_token,
210
+ access_token: tokens.access_token,
211
+ refresh_token: tokens.refresh_token,
212
+ account_id: claims['https://api.openai.com/auth']?.chatgpt_account_id || claims.sub,
213
+ last_refresh: new Date().toISOString(),
214
+ email: claims.email,
215
+ type: 'codex',
216
+ expired: new Date(Date.now() + (tokens.expires_in || 3600) * 1000).toISOString()
217
+ };
218
+
219
+ await this.saveCredentials(credentials);
220
+
221
+ console.log(`${CODEX_OAUTH_CONFIG.logPrefix} Authentication successful!`);
222
+ console.log(`${CODEX_OAUTH_CONFIG.logPrefix} Email: ${credentials.email}`);
223
+ console.log(`${CODEX_OAUTH_CONFIG.logPrefix} Account ID: ${credentials.account_id}`);
224
+
225
+ return credentials;
226
+ }
227
+
228
+ /**
229
+ * 启动回调服务器
230
+ * @returns {Promise<http.Server>}
231
+ */
232
+ async startCallbackServer() {
233
+ return new Promise((resolve, reject) => {
234
+ const server = http.createServer();
235
+
236
+ server.on('request', (req, res) => {
237
+ if (req.url.startsWith('/auth/callback')) {
238
+ const url = new URL(req.url, `http://localhost:${CODEX_OAUTH_CONFIG.port}`);
239
+ const code = url.searchParams.get('code');
240
+ const state = url.searchParams.get('state');
241
+ const error = url.searchParams.get('error');
242
+ const errorDescription = url.searchParams.get('error_description');
243
+
244
+ if (error) {
245
+ res.writeHead(400, { 'Content-Type': 'text/html; charset=utf-8' });
246
+ res.end(`
247
+ <!DOCTYPE html>
248
+ <html>
249
+ <head>
250
+ <title>Authentication Failed</title>
251
+ <style>
252
+ body { font-family: Arial, sans-serif; text-align: center; padding: 50px; }
253
+ h1 { color: #d32f2f; }
254
+ p { color: #666; }
255
+ </style>
256
+ </head>
257
+ <body>
258
+ <h1>❌ Authentication Failed</h1>
259
+ <p>${errorDescription || error}</p>
260
+ <p>You can close this window and try again.</p>
261
+ </body>
262
+ </html>
263
+ `);
264
+ server.emit('auth-error', new Error(errorDescription || error));
265
+ } else if (code && state) {
266
+ res.writeHead(200, { 'Content-Type': 'text/html; charset=utf-8' });
267
+ res.end(`
268
+ <!DOCTYPE html>
269
+ <html>
270
+ <head>
271
+ <title>Authentication Successful</title>
272
+ <style>
273
+ body { font-family: Arial, sans-serif; text-align: center; padding: 50px; }
274
+ h1 { color: #4caf50; }
275
+ p { color: #666; }
276
+ .countdown { font-size: 24px; font-weight: bold; color: #2196f3; }
277
+ </style>
278
+ <script>
279
+ let countdown = 10;
280
+ setInterval(() => {
281
+ countdown--;
282
+ document.getElementById('countdown').textContent = countdown;
283
+ if (countdown <= 0) {
284
+ window.close();
285
+ }
286
+ }, 1000);
287
+ </script>
288
+ </head>
289
+ <body>
290
+ <h1>✅ Authentication Successful!</h1>
291
+ <p>You can now close this window and return to the application.</p>
292
+ <p>This window will close automatically in <span id="countdown" class="countdown">10</span> seconds.</p>
293
+ </body>
294
+ </html>
295
+ `);
296
+ server.emit('auth-success', { code, state });
297
+ }
298
+ } else if (req.url === '/success') {
299
+ res.writeHead(200, { 'Content-Type': 'text/html; charset=utf-8' });
300
+ res.end('<h1>Success!</h1>');
301
+ }
302
+ });
303
+
304
+ server.listen(CODEX_OAUTH_CONFIG.port, () => {
305
+ console.log(`${CODEX_OAUTH_CONFIG.logPrefix} Callback server listening on port ${CODEX_OAUTH_CONFIG.port}`);
306
+ resolve(server);
307
+ });
308
+
309
+ server.on('error', (error) => {
310
+ if (error.code === 'EADDRINUSE') {
311
+ reject(new Error(`Port ${CODEX_OAUTH_CONFIG.port} is already in use. Please close other applications using this port.`));
312
+ } else {
313
+ reject(error);
314
+ }
315
+ });
316
+ });
317
+ }
318
+
319
+ /**
320
+ * 等待 OAuth 回调
321
+ * @param {http.Server} server
322
+ * @param {string} expectedState
323
+ * @returns {Promise<{code: string, state: string}>}
324
+ */
325
+ async waitForCallback(server, expectedState) {
326
+ return new Promise((resolve, reject) => {
327
+ const timeout = setTimeout(() => {
328
+ server.close();
329
+ reject(new Error('Authentication timeout (10 minutes)'));
330
+ }, 10 * 60 * 1000); // 10 分钟
331
+
332
+ server.once('auth-success', (result) => {
333
+ clearTimeout(timeout);
334
+ server.close();
335
+
336
+ if (result.state !== expectedState) {
337
+ reject(new Error('State mismatch - possible CSRF attack'));
338
+ } else {
339
+ resolve(result);
340
+ }
341
+ });
342
+
343
+ server.once('auth-error', (error) => {
344
+ clearTimeout(timeout);
345
+ server.close();
346
+ reject(error);
347
+ });
348
+ });
349
+ }
350
+
351
+ /**
352
+ * 用授权码换取 tokens
353
+ * @param {string} code
354
+ * @param {string} codeVerifier
355
+ * @returns {Promise<Object>}
356
+ */
357
+ async exchangeCodeForTokens(code, codeVerifier) {
358
+ console.log(`${CODEX_OAUTH_CONFIG.logPrefix} Exchanging authorization code for tokens...`);
359
+
360
+ try {
361
+ const response = await this.httpClient.post(
362
+ CODEX_OAUTH_CONFIG.tokenUrl,
363
+ new URLSearchParams({
364
+ grant_type: 'authorization_code',
365
+ client_id: CODEX_OAUTH_CONFIG.clientId,
366
+ code: code,
367
+ redirect_uri: CODEX_OAUTH_CONFIG.redirectUri,
368
+ code_verifier: codeVerifier
369
+ }).toString(),
370
+ {
371
+ headers: {
372
+ 'Content-Type': 'application/x-www-form-urlencoded',
373
+ 'Accept': 'application/json'
374
+ }
375
+ }
376
+ );
377
+
378
+ return response.data;
379
+ } catch (error) {
380
+ console.error(`${CODEX_OAUTH_CONFIG.logPrefix} Token exchange failed:`, error.response?.data || error.message);
381
+ throw new Error(`Failed to exchange code for tokens: ${error.response?.data?.error_description || error.message}`);
382
+ }
383
+ }
384
+
385
+ /**
386
+ * 刷新 tokens
387
+ * @param {string} refreshToken
388
+ * @returns {Promise<Object>}
389
+ */
390
+ async refreshTokens(refreshToken) {
391
+ console.log(`${CODEX_OAUTH_CONFIG.logPrefix} Refreshing access token...`);
392
+
393
+ try {
394
+ const response = await this.httpClient.post(
395
+ CODEX_OAUTH_CONFIG.tokenUrl,
396
+ new URLSearchParams({
397
+ grant_type: 'refresh_token',
398
+ client_id: CODEX_OAUTH_CONFIG.clientId,
399
+ refresh_token: refreshToken
400
+ }).toString(),
401
+ {
402
+ headers: {
403
+ 'Content-Type': 'application/x-www-form-urlencoded',
404
+ 'Accept': 'application/json'
405
+ }
406
+ }
407
+ );
408
+
409
+ const tokens = response.data;
410
+ const claims = this.parseJWT(tokens.id_token);
411
+
412
+ return {
413
+ id_token: tokens.id_token,
414
+ access_token: tokens.access_token,
415
+ refresh_token: tokens.refresh_token || refreshToken,
416
+ account_id: claims['https://api.openai.com/auth']?.chatgpt_account_id || claims.sub,
417
+ last_refresh: new Date().toISOString(),
418
+ email: claims.email,
419
+ type: 'codex',
420
+ expired: new Date(Date.now() + (tokens.expires_in || 3600) * 1000).toISOString()
421
+ };
422
+ } catch (error) {
423
+ console.error(`${CODEX_OAUTH_CONFIG.logPrefix} Token refresh failed:`, error.response?.data || error.message);
424
+ throw new Error(`Failed to refresh tokens: ${error.response?.data?.error_description || error.message}`);
425
+ }
426
+ }
427
+
428
+ /**
429
+ * 解析 JWT token
430
+ * @param {string} token
431
+ * @returns {Object}
432
+ */
433
+ parseJWT(token) {
434
+ try {
435
+ const parts = token.split('.');
436
+ if (parts.length !== 3) {
437
+ throw new Error('Invalid JWT token format');
438
+ }
439
+
440
+ // 解码 payload (base64url)
441
+ const payload = Buffer.from(parts[1], 'base64url').toString('utf8');
442
+ return JSON.parse(payload);
443
+ } catch (error) {
444
+ console.error(`${CODEX_OAUTH_CONFIG.logPrefix} Failed to parse JWT:`, error.message);
445
+ throw new Error(`Failed to parse JWT token: ${error.message}`);
446
+ }
447
+ }
448
+
449
+ /**
450
+ * 保存凭据到文件
451
+ * @param {Object} creds
452
+ * @returns {Promise<Object>}
453
+ */
454
+ async saveCredentials(creds) {
455
+ const email = creds.email || this.config.CODEX_EMAIL || 'default';
456
+
457
+ // 优先使用配置中指定的路径,否则保存到 configs/codex 目录
458
+ let credsPath;
459
+ if (this.config.CODEX_OAUTH_CREDS_FILE_PATH) {
460
+ credsPath = this.config.CODEX_OAUTH_CREDS_FILE_PATH;
461
+ } else {
462
+ // 保存到 configs/codex 目录(与其他供应商一致)
463
+ const projectDir = process.cwd();
464
+ const targetDir = path.join(projectDir, 'configs', 'codex');
465
+ await fs.promises.mkdir(targetDir, { recursive: true });
466
+ const timestamp = Date.now();
467
+ const filename = `${timestamp}_codex-${email}.json`;
468
+ credsPath = path.join(targetDir, filename);
469
+ }
470
+
471
+ try {
472
+ const credsDir = path.dirname(credsPath);
473
+ await fs.promises.mkdir(credsDir, { recursive: true });
474
+ await fs.promises.writeFile(credsPath, JSON.stringify(creds, null, 2), { mode: 0o600 });
475
+
476
+ const relativePath = path.relative(process.cwd(), credsPath);
477
+ console.log(`${CODEX_OAUTH_CONFIG.logPrefix} Credentials saved to ${relativePath}`);
478
+
479
+ // 返回保存路径供后续使用
480
+ return { credsPath, relativePath };
481
+ } catch (error) {
482
+ console.error(`${CODEX_OAUTH_CONFIG.logPrefix} Failed to save credentials:`, error.message);
483
+ throw new Error(`Failed to save credentials: ${error.message}`);
484
+ }
485
+ }
486
+
487
+ /**
488
+ * 加载凭据
489
+ * @param {string} email
490
+ * @returns {Promise<Object|null>}
491
+ */
492
+ async loadCredentials(email) {
493
+ // 优先使用配置中指定的路径,否则从 configs/codex 目录加载
494
+ let credsPath;
495
+ if (this.config.CODEX_OAUTH_CREDS_FILE_PATH) {
496
+ credsPath = this.config.CODEX_OAUTH_CREDS_FILE_PATH;
497
+ } else {
498
+ // 从 configs/codex 目录加载(与其他供应商一致)
499
+ const projectDir = process.cwd();
500
+ const targetDir = path.join(projectDir, 'configs', 'codex');
501
+
502
+ // 扫描目录找到匹配的凭据文件
503
+ try {
504
+ const files = await fs.promises.readdir(targetDir);
505
+ const emailPattern = email || 'default';
506
+ const matchingFile = files
507
+ .filter(f => f.includes(`codex-${emailPattern}`) && f.endsWith('.json'))
508
+ .sort()
509
+ .pop(); // 获取最新的文件
510
+
511
+ if (matchingFile) {
512
+ credsPath = path.join(targetDir, matchingFile);
513
+ } else {
514
+ return null;
515
+ }
516
+ } catch (error) {
517
+ if (error.code === 'ENOENT') {
518
+ return null;
519
+ }
520
+ throw error;
521
+ }
522
+ }
523
+
524
+ try {
525
+ const data = await fs.promises.readFile(credsPath, 'utf8');
526
+ return JSON.parse(data);
527
+ } catch (error) {
528
+ if (error.code === 'ENOENT') {
529
+ return null; // 文件不存在
530
+ }
531
+ throw error;
532
+ }
533
+ }
534
+
535
+ /**
536
+ * 检查凭据文件是否存在
537
+ * @param {string} email
538
+ * @returns {Promise<boolean>}
539
+ */
540
+ async credentialsExist(email) {
541
+ // 优先使用配置中指定的路径,否则从 configs/codex 目录检查
542
+ let credsPath;
543
+ if (this.config.CODEX_OAUTH_CREDS_FILE_PATH) {
544
+ credsPath = this.config.CODEX_OAUTH_CREDS_FILE_PATH;
545
+ } else {
546
+ const projectDir = process.cwd();
547
+ const targetDir = path.join(projectDir, 'configs', 'codex');
548
+
549
+ try {
550
+ const files = await fs.promises.readdir(targetDir);
551
+ const emailPattern = email || 'default';
552
+ const hasMatch = files.some(f =>
553
+ f.includes(`codex-${emailPattern}`) && f.endsWith('.json')
554
+ );
555
+ return hasMatch;
556
+ } catch (error) {
557
+ return false;
558
+ }
559
+ }
560
+
561
+ try {
562
+ await fs.promises.access(credsPath);
563
+ return true;
564
+ } catch {
565
+ return false;
566
+ }
567
+ }
568
+ }
569
+
570
+ /**
571
+ * 带重试的 Codex token 刷新
572
+ * @param {string} refreshToken
573
+ * @param {Object} config
574
+ * @param {number} maxRetries
575
+ * @returns {Promise<Object>}
576
+ */
577
+ export async function refreshCodexTokensWithRetry(refreshToken, config = {}, maxRetries = 3) {
578
+ const auth = new CodexAuth(config);
579
+ let lastError;
580
+
581
+ for (let i = 0; i < maxRetries; i++) {
582
+ try {
583
+ return await auth.refreshTokens(refreshToken);
584
+ } catch (error) {
585
+ lastError = error;
586
+ console.warn(`${CODEX_OAUTH_CONFIG.logPrefix} Retry ${i + 1}/${maxRetries} failed:`, error.message);
587
+
588
+ if (i < maxRetries - 1) {
589
+ // 指数退避
590
+ const delay = Math.min(1000 * Math.pow(2, i), 10000);
591
+ await new Promise(resolve => setTimeout(resolve, delay));
592
+ }
593
+ }
594
+ }
595
+
596
+ throw lastError;
597
+ }
598
+
599
+ /**
600
+ * 处理 Codex OAuth 认证
601
+ * @param {Object} currentConfig - 当前配置
602
+ * @param {Object} options - 选项
603
+ * @returns {Promise<Object>} 返回认证结果
604
+ */
605
+ export async function handleCodexOAuth(currentConfig, options = {}) {
606
+ const auth = new CodexAuth(currentConfig);
607
+
608
+ try {
609
+ console.log('[Codex Auth] Generating OAuth URL...');
610
+
611
+ // 清理所有旧的会话和服务器
612
+ if (global.codexOAuthSessions && global.codexOAuthSessions.size > 0) {
613
+ console.log('[Codex Auth] Cleaning up old OAuth sessions...');
614
+ for (const [sessionId, session] of global.codexOAuthSessions.entries()) {
615
+ try {
616
+ // 清理定时器
617
+ if (session.pollTimer) {
618
+ clearInterval(session.pollTimer);
619
+ }
620
+ // 关闭服务器
621
+ if (session.server) {
622
+ session.server.close();
623
+ }
624
+ global.codexOAuthSessions.delete(sessionId);
625
+ } catch (error) {
626
+ console.warn(`[Codex Auth] Failed to clean up session ${sessionId}:`, error.message);
627
+ }
628
+ }
629
+ }
630
+
631
+ // 生成授权 URL 和启动回调服务器
632
+ const { authUrl, state, pkce, server } = await auth.generateAuthUrl();
633
+
634
+ console.log('[Codex Auth] OAuth URL generated successfully');
635
+
636
+ // 存储 OAuth 会话信息,供后续回调使用
637
+ if (!global.codexOAuthSessions) {
638
+ global.codexOAuthSessions = new Map();
639
+ }
640
+
641
+ const sessionId = state; // 使用 state 作为 session ID
642
+
643
+ // 轮询计数器
644
+ let pollCount = 0;
645
+ const maxPollCount = 30; // 最多轮询次数(可随意更改)
646
+ const pollInterval = 3000; // 轮询间隔(毫秒)
647
+ let pollTimer = null;
648
+ let isCompleted = false;
649
+
650
+ // 创建会话对象
651
+ const session = {
652
+ auth,
653
+ state,
654
+ pkce,
655
+ server,
656
+ pollTimer: null,
657
+ createdAt: Date.now()
658
+ };
659
+
660
+ global.codexOAuthSessions.set(sessionId, session);
661
+
662
+ // 启动轮询日志
663
+ pollTimer = setInterval(() => {
664
+ pollCount++;
665
+ if (pollCount <= maxPollCount && !isCompleted) {
666
+ console.log(`[Codex Auth] Waiting for callback... (${pollCount}/${maxPollCount})`);
667
+ }
668
+
669
+ if (pollCount >= maxPollCount && !isCompleted) {
670
+ clearInterval(pollTimer);
671
+ const totalSeconds = (maxPollCount * pollInterval) / 1000;
672
+ console.log(`[Codex Auth] Polling timeout (${totalSeconds}s), releasing session for next authorization`);
673
+
674
+ // 清理会话和服务器
675
+ if (global.codexOAuthSessions.has(sessionId)) {
676
+ const session = global.codexOAuthSessions.get(sessionId);
677
+ if (session.server) {
678
+ session.server.close();
679
+ }
680
+ global.codexOAuthSessions.delete(sessionId);
681
+ }
682
+ }
683
+ }, pollInterval);
684
+
685
+ // 将 pollTimer 存储到会话中
686
+ session.pollTimer = pollTimer;
687
+
688
+ // 监听回调服务器的 auth-success 事件,自动完成 OAuth 流程
689
+ server.once('auth-success', async (result) => {
690
+ isCompleted = true;
691
+ if (pollTimer) {
692
+ clearInterval(pollTimer);
693
+ }
694
+
695
+ try {
696
+ console.log('[Codex Auth] Received auth callback, completing OAuth flow...');
697
+
698
+ const session = global.codexOAuthSessions.get(sessionId);
699
+ if (!session) {
700
+ console.error('[Codex Auth] Session not found');
701
+ return;
702
+ }
703
+
704
+ // 完成 OAuth 流程
705
+ const credentials = await auth.completeOAuthFlow(result.code, result.state, session.state, session.pkce);
706
+
707
+ // 清理会话
708
+ global.codexOAuthSessions.delete(sessionId);
709
+
710
+ // 广播认证成功事件
711
+ broadcastEvent('oauth_success', {
712
+ provider: 'openai-codex-oauth',
713
+ credPath: credentials.credPath,
714
+ relativePath: credentials.relativePath,
715
+ timestamp: new Date().toISOString(),
716
+ email: credentials.email,
717
+ accountId: credentials.account_id
718
+ });
719
+
720
+ // 自动关联新生成的凭据到 Pools
721
+ await autoLinkProviderConfigs(CONFIG);
722
+
723
+ console.log('[Codex Auth] OAuth flow completed successfully');
724
+ } catch (error) {
725
+ console.error('[Codex Auth] Failed to complete OAuth flow:', error.message);
726
+
727
+ // 广播认证失败事件
728
+ broadcastEvent('oauth_error', {
729
+ provider: 'openai-codex-oauth',
730
+ error: error.message
731
+ });
732
+ }
733
+ });
734
+
735
+ // 监听 auth-error 事件
736
+ server.once('auth-error', (error) => {
737
+ isCompleted = true;
738
+ if (pollTimer) {
739
+ clearInterval(pollTimer);
740
+ }
741
+
742
+ console.error('[Codex Auth] Auth error:', error.message);
743
+ global.codexOAuthSessions.delete(sessionId);
744
+
745
+ broadcastEvent('oauth_error', {
746
+ provider: 'openai-codex-oauth',
747
+ error: error.message
748
+ });
749
+ });
750
+
751
+ return {
752
+ success: true,
753
+ authUrl: authUrl,
754
+ authInfo: {
755
+ provider: 'openai-codex-oauth',
756
+ method: 'oauth2-pkce',
757
+ sessionId: sessionId,
758
+ redirectUri: CODEX_OAUTH_CONFIG.redirectUri,
759
+ port: CODEX_OAUTH_CONFIG.port,
760
+ instructions: [
761
+ '1. 点击下方按钮在浏览器中打开授权链接',
762
+ '2. 使用您的 OpenAI 账户登录',
763
+ '3. 授权应用访问您的 Codex API',
764
+ '4. 授权成功后会自动保存凭据',
765
+ '5. 如果浏览器未自动跳转,请手动复制回调 URL'
766
+ ]
767
+ }
768
+ };
769
+ } catch (error) {
770
+ console.error('[Codex Auth] Failed to generate OAuth URL:', error.message);
771
+
772
+ return {
773
+ success: false,
774
+ error: error.message,
775
+ authInfo: {
776
+ provider: 'openai-codex-oauth',
777
+ method: 'oauth2-pkce',
778
+ instructions: [
779
+ `1. 确保端口 ${CODEX_OAUTH_CONFIG.port} 未被占用`,
780
+ '2. 确保可以访问 auth.openai.com',
781
+ '3. 确保浏览器可以正常打开',
782
+ '4. 如果问题持续,请检查网络连接'
783
+ ]
784
+ }
785
+ };
786
+ }
787
+ }
788
+
789
+ /**
790
+ * 处理 Codex OAuth 回调
791
+ * @param {string} code - 授权码
792
+ * @param {string} state - 状态参数
793
+ * @returns {Promise<Object>} 返回认证结果
794
+ */
795
+ export async function handleCodexOAuthCallback(code, state) {
796
+ try {
797
+ if (!global.codexOAuthSessions || !global.codexOAuthSessions.has(state)) {
798
+ throw new Error('Invalid or expired OAuth session');
799
+ }
800
+
801
+ const session = global.codexOAuthSessions.get(state);
802
+ const { auth, state: expectedState, pkce } = session;
803
+
804
+ console.log('[Codex Auth] Processing OAuth callback...');
805
+
806
+ // 完成 OAuth 流程
807
+ const result = await auth.completeOAuthFlow(code, state, expectedState, pkce);
808
+
809
+ // 清理会话
810
+ global.codexOAuthSessions.delete(state);
811
+
812
+ // 广播认证成功事件(与 gemini 格式一致)
813
+ broadcastEvent('oauth_success', {
814
+ provider: 'openai-codex-oauth',
815
+ credPath: result.credPath,
816
+ relativePath: result.relativePath,
817
+ timestamp: new Date().toISOString(),
818
+ email: result.email,
819
+ accountId: result.account_id
820
+ });
821
+
822
+ // 自动关联新生成的凭据到 Pools
823
+ await autoLinkProviderConfigs(CONFIG);
824
+
825
+ console.log('[Codex Auth] OAuth callback processed successfully');
826
+
827
+ return {
828
+ success: true,
829
+ message: 'Codex authentication successful',
830
+ credentials: result,
831
+ email: result.email,
832
+ accountId: result.account_id,
833
+ credPath: result.credPath,
834
+ relativePath: result.relativePath
835
+ };
836
+ } catch (error) {
837
+ console.error('[Codex Auth] OAuth callback failed:', error.message);
838
+
839
+ // 广播认证失败事件
840
+ broadcastEvent({
841
+ type: 'oauth-error',
842
+ provider: 'openai-codex-oauth',
843
+ error: error.message
844
+ });
845
+
846
+ return {
847
+ success: false,
848
+ error: error.message
849
+ };
850
+ }
851
+ }
src/auth/gemini-oauth.js ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { OAuth2Client } from 'google-auth-library';
2
+ import http from 'http';
3
+ import fs from 'fs';
4
+ import path from 'path';
5
+ import os from 'os';
6
+ import { broadcastEvent } from '../services/ui-manager.js';
7
+ import { autoLinkProviderConfigs } from '../services/service-manager.js';
8
+ import { CONFIG } from '../core/config-manager.js';
9
+ import { getGoogleAuthProxyConfig } from '../utils/proxy-utils.js';
10
+
11
+ /**
12
+ * OAuth 提供商配置
13
+ */
14
+ const OAUTH_PROVIDERS = {
15
+ 'gemini-cli-oauth': {
16
+ clientId: '681255809395-oo8ft2oprdrnp9e3aqf6av3hmdib135j.apps.googleusercontent.com',
17
+ clientSecret: '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',
18
+ port: 8085,
19
+ credentialsDir: '.gemini',
20
+ credentialsFile: 'oauth_creds.json',
21
+ scope: ['https://www.googleapis.com/auth/cloud-platform'],
22
+ logPrefix: '[Gemini Auth]'
23
+ },
24
+ 'gemini-antigravity': {
25
+ clientId: '1071006060591-tmhssin2h21lcre235vtolojh4g403ep.apps.googleusercontent.com',
26
+ clientSecret: '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',
27
+ port: 8086,
28
+ credentialsDir: '.antigravity',
29
+ credentialsFile: 'oauth_creds.json',
30
+ scope: ['https://www.googleapis.com/auth/cloud-platform'],
31
+ logPrefix: '[Antigravity Auth]'
32
+ }
33
+ };
34
+
35
+ /**
36
+ * 活动的服务器实例管理
37
+ */
38
+ const activeServers = new Map();
39
+
40
+ /**
41
+ * 生成 HTML 响应页面
42
+ * @param {boolean} isSuccess - 是否成功
43
+ * @param {string} message - 显示消息
44
+ * @returns {string} HTML 内容
45
+ */
46
+ function generateResponsePage(isSuccess, message) {
47
+ const title = isSuccess ? '授权成功!' : '授权失败';
48
+
49
+ return `<!DOCTYPE html>
50
+ <html lang="zh-CN">
51
+ <head>
52
+ <meta charset="utf-8">
53
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
54
+ <title>${title}</title>
55
+ </head>
56
+ <body>
57
+ <div class="container">
58
+ <h1>${title}</h1>
59
+ <p>${message}</p>
60
+ </div>
61
+ </body>
62
+ </html>`;
63
+ }
64
+
65
+ /**
66
+ * 关闭指定端口的活动服务器
67
+ * @param {number} port - 端口号
68
+ * @returns {Promise<void>}
69
+ */
70
+ async function closeActiveServer(provider, port = null) {
71
+ // 1. 关闭该提供商之前的所有服务器
72
+ const existing = activeServers.get(provider);
73
+ if (existing) {
74
+ await new Promise((resolve) => {
75
+ existing.server.close(() => {
76
+ activeServers.delete(provider);
77
+ console.log(`[OAuth] 已关闭提供商 ${provider} 在端口 ${existing.port} 上的旧服务器`);
78
+ resolve();
79
+ });
80
+ });
81
+ }
82
+
83
+ // 2. 如果指定了端口,检查是否有其他提供商占用了该端口
84
+ if (port) {
85
+ for (const [p, info] of activeServers.entries()) {
86
+ if (info.port === port) {
87
+ await new Promise((resolve) => {
88
+ info.server.close(() => {
89
+ activeServers.delete(p);
90
+ console.log(`[OAuth] 已关闭端口 ${port} 上被占用(提供商: ${p})的旧服务器`);
91
+ resolve();
92
+ });
93
+ });
94
+ }
95
+ }
96
+ }
97
+ }
98
+
99
+ /**
100
+ * 创建 OAuth 回调服务器
101
+ * @param {Object} config - OAuth 提供商配置
102
+ * @param {string} redirectUri - 重定向 URI
103
+ * @param {OAuth2Client} authClient - OAuth2 客户端
104
+ * @param {string} credPath - 凭据保存路径
105
+ * @param {string} provider - 提供商标识
106
+ * @returns {Promise<http.Server>} HTTP 服务器实例
107
+ */
108
+ async function createOAuthCallbackServer(config, redirectUri, authClient, credPath, provider, options = {}) {
109
+ const port = parseInt(options.port) || config.port;
110
+ // 先关闭该提供商之前可能运行的所有服务器,或该端口上的旧服务器
111
+ await closeActiveServer(provider, port);
112
+
113
+ return new Promise((resolve, reject) => {
114
+ const server = http.createServer(async (req, res) => {
115
+ try {
116
+ const url = new URL(req.url, redirectUri);
117
+ const code = url.searchParams.get('code');
118
+ const errorParam = url.searchParams.get('error');
119
+
120
+ if (code) {
121
+ console.log(`${config.logPrefix} 收到来自 Google 的成功回调: ${req.url}`);
122
+
123
+ try {
124
+ const { tokens } = await authClient.getToken(code);
125
+ let finalCredPath = credPath;
126
+
127
+ // 如果指定了保存到 configs 目录
128
+ if (options.saveToConfigs) {
129
+ const providerDir = options.providerDir;
130
+ const targetDir = path.join(process.cwd(), 'configs', providerDir);
131
+ await fs.promises.mkdir(targetDir, { recursive: true });
132
+ const timestamp = Date.now();
133
+ const filename = `${timestamp}_oauth_creds.json`;
134
+ finalCredPath = path.join(targetDir, filename);
135
+ }
136
+
137
+ await fs.promises.mkdir(path.dirname(finalCredPath), { recursive: true });
138
+ await fs.promises.writeFile(finalCredPath, JSON.stringify(tokens, null, 2));
139
+ console.log(`${config.logPrefix} 新令牌已接收并保存到文件: ${finalCredPath}`);
140
+
141
+ const relativePath = path.relative(process.cwd(), finalCredPath);
142
+
143
+ // 广播授权成功事件
144
+ broadcastEvent('oauth_success', {
145
+ provider: provider,
146
+ credPath: finalCredPath,
147
+ relativePath: relativePath,
148
+ timestamp: new Date().toISOString()
149
+ });
150
+
151
+ // 自动关联新生成的凭据到 Pools
152
+ await autoLinkProviderConfigs(CONFIG);
153
+
154
+ res.writeHead(200, { 'Content-Type': 'text/html; charset=utf-8' });
155
+ res.end(generateResponsePage(true, '您可以关闭此页面'));
156
+ } catch (tokenError) {
157
+ console.error(`${config.logPrefix} 获取令牌失败:`, tokenError);
158
+ res.writeHead(500, { 'Content-Type': 'text/html; charset=utf-8' });
159
+ res.end(generateResponsePage(false, `获取令牌失败: ${tokenError.message}`));
160
+ } finally {
161
+ server.close(() => {
162
+ activeServers.delete(provider);
163
+ });
164
+ }
165
+ } else if (errorParam) {
166
+ const errorMessage = `授权失败。Google 返回错误: ${errorParam}`;
167
+ console.error(`${config.logPrefix}`, errorMessage);
168
+
169
+ res.writeHead(400, { 'Content-Type': 'text/html; charset=utf-8' });
170
+ res.end(generateResponsePage(false, errorMessage));
171
+ server.close(() => {
172
+ activeServers.delete(provider);
173
+ });
174
+ } else {
175
+ console.log(`${config.logPrefix} 忽略无关请求: ${req.url}`);
176
+ res.writeHead(204);
177
+ res.end();
178
+ }
179
+ } catch (error) {
180
+ console.error(`${config.logPrefix} 处理回调时出错:`, error);
181
+ res.writeHead(500, { 'Content-Type': 'text/html; charset=utf-8' });
182
+ res.end(generateResponsePage(false, `服务器错误: ${error.message}`));
183
+
184
+ if (server.listening) {
185
+ server.close(() => {
186
+ activeServers.delete(provider);
187
+ });
188
+ }
189
+ }
190
+ });
191
+
192
+ server.on('error', (err) => {
193
+ if (err.code === 'EADDRINUSE') {
194
+ console.error(`${config.logPrefix} 端口 ${port} 已被占用`);
195
+ reject(new Error(`端口 ${port} 已被占用`));
196
+ } else {
197
+ console.error(`${config.logPrefix} 服务器错误:`, err);
198
+ reject(err);
199
+ }
200
+ });
201
+
202
+ const host = '0.0.0.0';
203
+ server.listen(port, host, () => {
204
+ console.log(`${config.logPrefix} OAuth 回调服务器已启动于 ${host}:${port}`);
205
+ activeServers.set(provider, { server, port });
206
+ resolve(server);
207
+ });
208
+ });
209
+ }
210
+
211
+ /**
212
+ * 处理 Google OAuth 授权(通用函数)
213
+ * @param {string} providerKey - 提供商键名
214
+ * @param {Object} currentConfig - 当前配置对象
215
+ * @param {Object} options - 额外选项
216
+ * @returns {Promise<Object>} 返回授权URL和相关信息
217
+ */
218
+ async function handleGoogleOAuth(providerKey, currentConfig, options = {}) {
219
+ const config = OAUTH_PROVIDERS[providerKey];
220
+ if (!config) {
221
+ throw new Error(`未知的提供商: ${providerKey}`);
222
+ }
223
+
224
+ const port = parseInt(options.port) || config.port;
225
+ const host = 'localhost';
226
+ const redirectUri = `http://${host}:${port}`;
227
+
228
+ // 获取代理配置
229
+ const proxyConfig = getGoogleAuthProxyConfig(currentConfig, providerKey);
230
+
231
+ // 构建 OAuth2Client 选项
232
+ const oauth2Options = {
233
+ clientId: config.clientId,
234
+ clientSecret: config.clientSecret,
235
+ };
236
+
237
+ if (proxyConfig) {
238
+ oauth2Options.transporterOptions = proxyConfig;
239
+ console.log(`${config.logPrefix} Using proxy for OAuth token exchange`);
240
+ }
241
+
242
+ const authClient = new OAuth2Client(oauth2Options);
243
+ authClient.redirectUri = redirectUri;
244
+
245
+ const authUrl = authClient.generateAuthUrl({
246
+ access_type: 'offline',
247
+ prompt: 'select_account',
248
+ scope: config.scope
249
+ });
250
+
251
+ // 启动回调服务器
252
+ const credPath = path.join(os.homedir(), config.credentialsDir, config.credentialsFile);
253
+
254
+ try {
255
+ await createOAuthCallbackServer(config, redirectUri, authClient, credPath, providerKey, options);
256
+ } catch (error) {
257
+ throw new Error(`启动回调服务器失败: ${error.message}`);
258
+ }
259
+
260
+ return {
261
+ authUrl,
262
+ authInfo: {
263
+ provider: providerKey,
264
+ redirectUri: redirectUri,
265
+ port: port,
266
+ ...options
267
+ }
268
+ };
269
+ }
270
+
271
+ /**
272
+ * 处理 Gemini CLI OAuth 授权
273
+ * @param {Object} currentConfig - 当前配置对象
274
+ * @param {Object} options - 额外选项
275
+ * @returns {Promise<Object>} 返回授权URL和相关信息
276
+ */
277
+ export async function handleGeminiCliOAuth(currentConfig, options = {}) {
278
+ return handleGoogleOAuth('gemini-cli-oauth', currentConfig, options);
279
+ }
280
+
281
+ /**
282
+ * 处理 Gemini Antigravity OAuth 授权
283
+ * @param {Object} currentConfig - 当前配置对象
284
+ * @param {Object} options - 额外选项
285
+ * @returns {Promise<Object>} 返回授权URL和相关信息
286
+ */
287
+ export async function handleGeminiAntigravityOAuth(currentConfig, options = {}) {
288
+ return handleGoogleOAuth('gemini-antigravity', currentConfig, options);
289
+ }
src/auth/iflow-oauth.js ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import http from 'http';
2
+ import fs from 'fs';
3
+ import path from 'path';
4
+ import os from 'os';
5
+ import crypto from 'crypto';
6
+ import { broadcastEvent } from '../services/ui-manager.js';
7
+ import { autoLinkProviderConfigs } from '../services/service-manager.js';
8
+ import { CONFIG } from '../core/config-manager.js';
9
+ import { getProxyConfigForProvider } from '../utils/proxy-utils.js';
10
+
11
+ /**
12
+ * iFlow OAuth 配置
13
+ */
14
+ const IFLOW_OAUTH_CONFIG = {
15
+ // OAuth 端点
16
+ tokenEndpoint: 'https://iflow.cn/oauth/token',
17
+ authorizeEndpoint: 'https://iflow.cn/oauth',
18
+ userInfoEndpoint: 'https://iflow.cn/api/oauth/getUserInfo',
19
+ successRedirectURL: 'https://iflow.cn/oauth/success',
20
+
21
+ // 客户端凭据
22
+ clientId: '10009311001',
23
+ clientSecret: '4Z3YjXycVsQvyGF1etiNlIBB4RsqSDtW',
24
+
25
+ // 本地回调端口
26
+ callbackPort: 8087,
27
+
28
+ // 凭据存储
29
+ credentialsDir: '.iflow',
30
+ credentialsFile: 'oauth_creds.json',
31
+
32
+ // 日志前缀
33
+ logPrefix: '[iFlow Auth]'
34
+ };
35
+
36
+ /**
37
+ * 活动的 iFlow 回调服务器管理
38
+ */
39
+ const activeIFlowServers = new Map();
40
+
41
+ /**
42
+ * 创建带代理支持的 fetch 请求
43
+ * 使用 axios 替代原生 fetch,以正确支持代理配置
44
+ * @param {string} url - 请求 URL
45
+ * @param {Object} options - fetch 选项(兼容 fetch API 格式)
46
+ * @param {string} providerType - 提供商类型,用于获取代理配置
47
+ * @returns {Promise<Object>} 返回类似 fetch Response 的对象
48
+ */
49
+ async function fetchWithProxy(url, options = {}, providerType) {
50
+ const proxyConfig = getProxyConfigForProvider(CONFIG, providerType);
51
+
52
+ // 构建 axios 配置
53
+ const axiosConfig = {
54
+ url,
55
+ method: options.method || 'GET',
56
+ headers: options.headers || {},
57
+ timeout: 30000, // 30 秒超时
58
+ };
59
+
60
+ // 处理请求体
61
+ if (options.body) {
62
+ axiosConfig.data = options.body;
63
+ }
64
+
65
+ // 配置代理
66
+ if (proxyConfig) {
67
+ axiosConfig.httpAgent = proxyConfig.httpAgent;
68
+ axiosConfig.httpsAgent = proxyConfig.httpsAgent;
69
+ axiosConfig.proxy = false; // 禁用 axios 内置代理,使用我们的 agent
70
+ console.log(`[OAuth] Using proxy for ${providerType}: ${CONFIG.PROXY_URL}`);
71
+ }
72
+
73
+ try {
74
+ const axios = (await import('axios')).default;
75
+ const response = await axios(axiosConfig);
76
+
77
+ // 返回类似 fetch Response 的对象
78
+ return {
79
+ ok: response.status >= 200 && response.status < 300,
80
+ status: response.status,
81
+ statusText: response.statusText,
82
+ headers: response.headers,
83
+ json: async () => response.data,
84
+ text: async () => typeof response.data === 'string' ? response.data : JSON.stringify(response.data),
85
+ };
86
+ } catch (error) {
87
+ // 处理 axios 错误,转换为类似 fetch 的响应格式
88
+ if (error.response) {
89
+ // 服务器返回了错误状态码
90
+ return {
91
+ ok: false,
92
+ status: error.response.status,
93
+ statusText: error.response.statusText,
94
+ headers: error.response.headers,
95
+ json: async () => error.response.data,
96
+ text: async () => typeof error.response.data === 'string' ? error.response.data : JSON.stringify(error.response.data),
97
+ };
98
+ }
99
+ // 网络错误或其他错误
100
+ throw error;
101
+ }
102
+ }
103
+
104
+ /**
105
+ * 生成 HTML 响应页面
106
+ * @param {boolean} isSuccess - 是否成功
107
+ * @param {string} message - 显示消息
108
+ * @returns {string} HTML 内容
109
+ */
110
+ function generateResponsePage(isSuccess, message) {
111
+ const title = isSuccess ? '授权成功!' : '授权失败';
112
+
113
+ return `<!DOCTYPE html>
114
+ <html lang="zh-CN">
115
+ <head>
116
+ <meta charset="utf-8">
117
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
118
+ <title>${title}</title>
119
+ </head>
120
+ <body>
121
+ <div class="container">
122
+ <h1>${title}</h1>
123
+ <p>${message}</p>
124
+ </div>
125
+ </body>
126
+ </html>`;
127
+ }
128
+
129
+ /**
130
+ * 生成 iFlow 授权链接
131
+ * @param {string} state - 状态参数
132
+ * @param {number} port - 回调端口
133
+ * @returns {Object} 包含 authUrl 和 redirectUri
134
+ */
135
+ function generateIFlowAuthorizationURL(state, port) {
136
+ const redirectUri = `http://localhost:${port}/oauth2callback`;
137
+ const params = new URLSearchParams({
138
+ loginMethod: 'phone',
139
+ type: 'phone',
140
+ redirect: redirectUri,
141
+ state: state,
142
+ client_id: IFLOW_OAUTH_CONFIG.clientId
143
+ });
144
+ const authUrl = `${IFLOW_OAUTH_CONFIG.authorizeEndpoint}?${params.toString()}`;
145
+ return { authUrl, redirectUri };
146
+ }
147
+
148
+ /**
149
+ * 交换授权码获取 iFlow 令牌
150
+ * @param {string} code - 授权码
151
+ * @param {string} redirectUri - 重定向 URI
152
+ * @returns {Promise<Object>} 令牌数据
153
+ */
154
+ async function exchangeIFlowCodeForTokens(code, redirectUri) {
155
+ const form = new URLSearchParams({
156
+ grant_type: 'authorization_code',
157
+ code: code,
158
+ redirect_uri: redirectUri,
159
+ client_id: IFLOW_OAUTH_CONFIG.clientId,
160
+ client_secret: IFLOW_OAUTH_CONFIG.clientSecret
161
+ });
162
+
163
+ // 生成 Basic Auth 头
164
+ const basicAuth = Buffer.from(`${IFLOW_OAUTH_CONFIG.clientId}:${IFLOW_OAUTH_CONFIG.clientSecret}`).toString('base64');
165
+
166
+ const response = await fetchWithProxy(IFLOW_OAUTH_CONFIG.tokenEndpoint, {
167
+ method: 'POST',
168
+ headers: {
169
+ 'Content-Type': 'application/x-www-form-urlencoded',
170
+ 'Accept': 'application/json',
171
+ 'Authorization': `Basic ${basicAuth}`
172
+ },
173
+ body: form.toString()
174
+ }, 'openai-iflow');
175
+
176
+ if (!response.ok) {
177
+ const errorText = await response.text();
178
+ throw new Error(`iFlow token exchange failed: ${response.status} ${errorText}`);
179
+ }
180
+
181
+ const tokenData = await response.json();
182
+
183
+ if (!tokenData.access_token) {
184
+ throw new Error('iFlow token: missing access token in response');
185
+ }
186
+
187
+ return {
188
+ accessToken: tokenData.access_token,
189
+ refreshToken: tokenData.refresh_token,
190
+ tokenType: tokenData.token_type,
191
+ scope: tokenData.scope,
192
+ expiresIn: tokenData.expires_in,
193
+ expiresAt: new Date(Date.now() + tokenData.expires_in * 1000).toISOString()
194
+ };
195
+ }
196
+
197
+ /**
198
+ * 获取 iFlow 用户信息(包含 API Key)
199
+ * @param {string} accessToken - 访问令牌
200
+ * @returns {Promise<Object>} 用户信息
201
+ */
202
+ async function fetchIFlowUserInfo(accessToken) {
203
+ if (!accessToken || accessToken.trim() === '') {
204
+ throw new Error('iFlow api key: access token is empty');
205
+ }
206
+
207
+ const endpoint = `${IFLOW_OAUTH_CONFIG.userInfoEndpoint}?accessToken=${encodeURIComponent(accessToken)}`;
208
+
209
+ const response = await fetchWithProxy(endpoint, {
210
+ method: 'GET',
211
+ headers: {
212
+ 'Accept': 'application/json'
213
+ }
214
+ }, 'openai-iflow');
215
+
216
+ if (!response.ok) {
217
+ const errorText = await response.text();
218
+ throw new Error(`iFlow user info failed: ${response.status} ${errorText}`);
219
+ }
220
+
221
+ const result = await response.json();
222
+
223
+ if (!result.success) {
224
+ throw new Error('iFlow api key: request not successful');
225
+ }
226
+
227
+ if (!result.data || !result.data.apiKey) {
228
+ throw new Error('iFlow api key: missing api key in response');
229
+ }
230
+
231
+ // 获取邮箱或手机号作为账户标识
232
+ let email = (result.data.email || '').trim();
233
+ if (!email) {
234
+ email = (result.data.phone || '').trim();
235
+ }
236
+ if (!email) {
237
+ throw new Error('iFlow token: missing account email/phone in user info');
238
+ }
239
+
240
+ return {
241
+ apiKey: result.data.apiKey,
242
+ email: email,
243
+ phone: result.data.phone || ''
244
+ };
245
+ }
246
+
247
+ /**
248
+ * 关闭 iFlow 服务器
249
+ * @param {string} provider - 提供商标识
250
+ * @param {number} port - 端口号(可选)
251
+ */
252
+ async function closeIFlowServer(provider, port = null) {
253
+ const existing = activeIFlowServers.get(provider);
254
+ if (existing) {
255
+ await new Promise((resolve) => {
256
+ existing.server.close(() => {
257
+ activeIFlowServers.delete(provider);
258
+ console.log(`${IFLOW_OAUTH_CONFIG.logPrefix} 已关闭提供商 ${provider} 在端口 ${existing.port} 上的旧服务器`);
259
+ resolve();
260
+ });
261
+ });
262
+ }
263
+
264
+ if (port) {
265
+ for (const [p, info] of activeIFlowServers.entries()) {
266
+ if (info.port === port) {
267
+ await new Promise((resolve) => {
268
+ info.server.close(() => {
269
+ activeIFlowServers.delete(p);
270
+ console.log(`${IFLOW_OAUTH_CONFIG.logPrefix} 已关闭端口 ${port} 上的旧服务器`);
271
+ resolve();
272
+ });
273
+ });
274
+ }
275
+ }
276
+ }
277
+ }
278
+
279
+ /**
280
+ * 创建 iFlow OAuth 回调服务器
281
+ * @param {number} port - 端口号
282
+ * @param {string} redirectUri - 重定向 URI
283
+ * @param {string} expectedState - 预期的 state 参数
284
+ * @param {Object} options - 额外选项
285
+ * @returns {Promise<http.Server>} HTTP 服务器实例
286
+ */
287
+ function createIFlowCallbackServer(port, redirectUri, expectedState, options = {}) {
288
+ return new Promise((resolve, reject) => {
289
+ const server = http.createServer(async (req, res) => {
290
+ try {
291
+ const url = new URL(req.url, `http://localhost:${port}`);
292
+
293
+ if (url.pathname === '/oauth2callback') {
294
+ const code = url.searchParams.get('code');
295
+ const state = url.searchParams.get('state');
296
+ const errorParam = url.searchParams.get('error');
297
+
298
+ if (errorParam) {
299
+ console.error(`${IFLOW_OAUTH_CONFIG.logPrefix} 授权失败: ${errorParam}`);
300
+ res.writeHead(400, { 'Content-Type': 'text/html; charset=utf-8' });
301
+ res.end(generateResponsePage(false, `授权失败: ${errorParam}`));
302
+ server.close(() => {
303
+ activeIFlowServers.delete('openai-iflow');
304
+ });
305
+ return;
306
+ }
307
+
308
+ if (state !== expectedState) {
309
+ console.error(`${IFLOW_OAUTH_CONFIG.logPrefix} State 验证失败`);
310
+ res.writeHead(400, { 'Content-Type': 'text/html; charset=utf-8' });
311
+ res.end(generateResponsePage(false, 'State 验证失败'));
312
+ server.close(() => {
313
+ activeIFlowServers.delete('openai-iflow');
314
+ });
315
+ return;
316
+ }
317
+
318
+ if (!code) {
319
+ console.error(`${IFLOW_OAUTH_CONFIG.logPrefix} 缺少授权码`);
320
+ res.writeHead(400, { 'Content-Type': 'text/html; charset=utf-8' });
321
+ res.end(generateResponsePage(false, '缺少授权码'));
322
+ server.close(() => {
323
+ activeIFlowServers.delete('openai-iflow');
324
+ });
325
+ return;
326
+ }
327
+
328
+ console.log(`${IFLOW_OAUTH_CONFIG.logPrefix} 收到授权回调,正在交换令牌...`);
329
+
330
+ try {
331
+ // 1. 交换授权码获取令牌
332
+ const tokenData = await exchangeIFlowCodeForTokens(code, redirectUri);
333
+ console.log(`${IFLOW_OAUTH_CONFIG.logPrefix} 令牌交换成功`);
334
+
335
+ // 2. 获取用户信息(包含 API Key)
336
+ const userInfo = await fetchIFlowUserInfo(tokenData.accessToken);
337
+ console.log(`${IFLOW_OAUTH_CONFIG.logPrefix} 用户信息获取成功: ${userInfo.email}`);
338
+
339
+ // 3. 组合完整的凭据数据
340
+ const credentialsData = {
341
+ access_token: tokenData.accessToken,
342
+ refresh_token: tokenData.refreshToken,
343
+ expiry_date: new Date(tokenData.expiresAt).getTime(),
344
+ token_type: tokenData.tokenType,
345
+ scope: tokenData.scope,
346
+ apiKey: userInfo.apiKey
347
+ };
348
+
349
+ // 4. 保存凭据
350
+ let credPath = path.join(os.homedir(), IFLOW_OAUTH_CONFIG.credentialsDir, IFLOW_OAUTH_CONFIG.credentialsFile);
351
+
352
+ if (options.saveToConfigs) {
353
+ const providerDir = options.providerDir || 'iflow';
354
+ const targetDir = path.join(process.cwd(), 'configs', providerDir);
355
+ await fs.promises.mkdir(targetDir, { recursive: true });
356
+ const timestamp = Date.now();
357
+ const filename = `${timestamp}_oauth_creds.json`;
358
+ credPath = path.join(targetDir, filename);
359
+ }
360
+
361
+ await fs.promises.mkdir(path.dirname(credPath), { recursive: true });
362
+ await fs.promises.writeFile(credPath, JSON.stringify(credentialsData, null, 2));
363
+ console.log(`${IFLOW_OAUTH_CONFIG.logPrefix} 凭据已保存: ${credPath}`);
364
+
365
+ const relativePath = path.relative(process.cwd(), credPath);
366
+
367
+ // 5. 广播授权成功事件
368
+ broadcastEvent('oauth_success', {
369
+ provider: 'openai-iflow',
370
+ credPath: credPath,
371
+ relativePath: relativePath,
372
+ email: userInfo.email,
373
+ timestamp: new Date().toISOString()
374
+ });
375
+
376
+ // 6. 自动关联新生成的凭据到 Pools
377
+ await autoLinkProviderConfigs(CONFIG);
378
+
379
+ res.writeHead(200, { 'Content-Type': 'text/html; charset=utf-8' });
380
+ res.end(generateResponsePage(true, `授权成功!账户: ${userInfo.email},您可以关闭此页面`));
381
+
382
+ } catch (tokenError) {
383
+ console.error(`${IFLOW_OAUTH_CONFIG.logPrefix} 令牌处理失败:`, tokenError);
384
+ res.writeHead(500, { 'Content-Type': 'text/html; charset=utf-8' });
385
+ res.end(generateResponsePage(false, `令牌处理失败: ${tokenError.message}`));
386
+ } finally {
387
+ server.close(() => {
388
+ activeIFlowServers.delete('openai-iflow');
389
+ });
390
+ }
391
+ } else {
392
+ // 忽略其他请求
393
+ res.writeHead(204);
394
+ res.end();
395
+ }
396
+ } catch (error) {
397
+ console.error(`${IFLOW_OAUTH_CONFIG.logPrefix} 处理回调出错:`, error);
398
+ res.writeHead(500, { 'Content-Type': 'text/html; charset=utf-8' });
399
+ res.end(generateResponsePage(false, `服务器错误: ${error.message}`));
400
+
401
+ if (server.listening) {
402
+ server.close(() => {
403
+ activeIFlowServers.delete('openai-iflow');
404
+ });
405
+ }
406
+ }
407
+ });
408
+
409
+ server.on('error', (err) => {
410
+ if (err.code === 'EADDRINUSE') {
411
+ console.error(`${IFLOW_OAUTH_CONFIG.logPrefix} 端口 ${port} 已被占用`);
412
+ reject(new Error(`端口 ${port} 已被占用`));
413
+ } else {
414
+ console.error(`${IFLOW_OAUTH_CONFIG.logPrefix} 服务器错误:`, err);
415
+ reject(err);
416
+ }
417
+ });
418
+
419
+ const host = '0.0.0.0';
420
+ server.listen(port, host, () => {
421
+ console.log(`${IFLOW_OAUTH_CONFIG.logPrefix} OAuth 回调服务器已启动于 ${host}:${port}`);
422
+ resolve(server);
423
+ });
424
+
425
+ // 10 分钟超时自动关闭
426
+ setTimeout(() => {
427
+ if (server.listening) {
428
+ console.log(`${IFLOW_OAUTH_CONFIG.logPrefix} 回调服务器超时,自动关闭`);
429
+ server.close(() => {
430
+ activeIFlowServers.delete('openai-iflow');
431
+ });
432
+ }
433
+ }, 10 * 60 * 1000);
434
+ });
435
+ }
436
+
437
+ /**
438
+ * 处理 iFlow OAuth 授权
439
+ * @param {Object} currentConfig - 当前配置对象
440
+ * @param {Object} options - 额外选项
441
+ * - port: 自定义端口号
442
+ * - saveToConfigs: 是否保存到 configs 目录
443
+ * - providerDir: 提供商目录名
444
+ * @returns {Promise<Object>} 返回授权URL和相关信息
445
+ */
446
+ export async function handleIFlowOAuth(currentConfig, options = {}) {
447
+ const port = parseInt(options.port) || IFLOW_OAUTH_CONFIG.callbackPort;
448
+ const providerKey = 'openai-iflow';
449
+
450
+ // 生成 state 参数
451
+ const state = crypto.randomBytes(16).toString('base64url');
452
+
453
+ // 生成授权链接
454
+ const { authUrl, redirectUri } = generateIFlowAuthorizationURL(state, port);
455
+
456
+ console.log(`${IFLOW_OAUTH_CONFIG.logPrefix} 生成授权链接: ${authUrl}`);
457
+
458
+ // 关闭之前可能存在的服务器
459
+ await closeIFlowServer(providerKey, port);
460
+
461
+ // 启动回调服务器
462
+ try {
463
+ const server = await createIFlowCallbackServer(port, redirectUri, state, options);
464
+ activeIFlowServers.set(providerKey, { server, port });
465
+ } catch (error) {
466
+ throw new Error(`启动 iFlow 回调服务器失败: ${error.message}`);
467
+ }
468
+
469
+ return {
470
+ authUrl,
471
+ authInfo: {
472
+ provider: 'openai-iflow',
473
+ redirectUri: redirectUri,
474
+ callbackPort: port,
475
+ state: state,
476
+ ...options
477
+ }
478
+ };
479
+ }
480
+
481
+ /**
482
+ * 使用 refresh_token 刷新 iFlow 令牌
483
+ * @param {string} refreshToken - 刷新令牌
484
+ * @returns {Promise<Object>} 新的令牌数据
485
+ */
486
+ export async function refreshIFlowTokens(refreshToken) {
487
+ const form = new URLSearchParams({
488
+ grant_type: 'refresh_token',
489
+ refresh_token: refreshToken,
490
+ client_id: IFLOW_OAUTH_CONFIG.clientId,
491
+ client_secret: IFLOW_OAUTH_CONFIG.clientSecret
492
+ });
493
+
494
+ // 生成 Basic Auth 头
495
+ const basicAuth = Buffer.from(`${IFLOW_OAUTH_CONFIG.clientId}:${IFLOW_OAUTH_CONFIG.clientSecret}`).toString('base64');
496
+
497
+ const response = await fetchWithProxy(IFLOW_OAUTH_CONFIG.tokenEndpoint, {
498
+ method: 'POST',
499
+ headers: {
500
+ 'Content-Type': 'application/x-www-form-urlencoded',
501
+ 'Accept': 'application/json',
502
+ 'Authorization': `Basic ${basicAuth}`
503
+ },
504
+ body: form.toString()
505
+ }, 'openai-iflow');
506
+
507
+ if (!response.ok) {
508
+ const errorText = await response.text();
509
+ throw new Error(`iFlow token refresh failed: ${response.status} ${errorText}`);
510
+ }
511
+
512
+ const tokenData = await response.json();
513
+
514
+ if (!tokenData.access_token) {
515
+ throw new Error('iFlow token refresh: missing access token in response');
516
+ }
517
+
518
+ // 获取用户信息以更新 API Key
519
+ const userInfo = await fetchIFlowUserInfo(tokenData.access_token);
520
+
521
+ return {
522
+ access_token: tokenData.access_token,
523
+ refresh_token: tokenData.refresh_token,
524
+ expiry_date: Date.now() + tokenData.expires_in * 1000,
525
+ token_type: tokenData.token_type,
526
+ scope: tokenData.scope,
527
+ apiKey: userInfo.apiKey
528
+ };
529
+ }
src/auth/index.js ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Codex OAuth
2
+ export {
3
+ refreshCodexTokensWithRetry,
4
+ handleCodexOAuth,
5
+ handleCodexOAuthCallback
6
+ } from './codex-oauth.js';
7
+
8
+ // Gemini OAuth
9
+ export {
10
+ handleGeminiCliOAuth,
11
+ handleGeminiAntigravityOAuth
12
+ } from './gemini-oauth.js';
13
+
14
+ // Qwen OAuth
15
+ export {
16
+ handleQwenOAuth
17
+ } from './qwen-oauth.js';
18
+
19
+ // Kiro OAuth
20
+ export {
21
+ handleKiroOAuth,
22
+ checkKiroCredentialsDuplicate,
23
+ batchImportKiroRefreshTokens,
24
+ batchImportKiroRefreshTokensStream,
25
+ importAwsCredentials
26
+ } from './kiro-oauth.js';
27
+
28
+ // iFlow OAuth
29
+ export {
30
+ handleIFlowOAuth,
31
+ refreshIFlowTokens
32
+ } from './iflow-oauth.js';
src/auth/kiro-oauth.js ADDED
@@ -0,0 +1,1117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import http from 'http';
2
+ import fs from 'fs';
3
+ import path from 'path';
4
+ import crypto from 'crypto';
5
+ import os from 'os';
6
+ import { broadcastEvent } from '../services/ui-manager.js';
7
+ import { autoLinkProviderConfigs } from '../services/service-manager.js';
8
+ import { CONFIG } from '../core/config-manager.js';
9
+ import { getProxyConfigForProvider } from '../utils/proxy-utils.js';
10
+
11
+ /**
12
+ * Kiro OAuth 配置(支持多种认证方式)
13
+ */
14
+ const KIRO_OAUTH_CONFIG = {
15
+ // Kiro Auth Service 端点 (用于 Social Auth)
16
+ authServiceEndpoint: 'https://prod.us-east-1.auth.desktop.kiro.dev',
17
+
18
+ // AWS SSO OIDC 端点 (用于 Builder ID)
19
+ ssoOIDCEndpoint: 'https://oidc.{{region}}.amazonaws.com',
20
+
21
+ // AWS Builder ID 起始 URL
22
+ builderIDStartURL: 'https://view.awsapps.com/start',
23
+
24
+ // 本地回调端口范围(用于 Social Auth HTTP 回调)
25
+ callbackPortStart: 19876,
26
+ callbackPortEnd: 19880,
27
+
28
+ // 超时配置
29
+ authTimeout: 10 * 60 * 1000, // 10 分钟
30
+ pollInterval: 5000, // 5 秒
31
+
32
+ // CodeWhisperer Scopes
33
+ scopes: [
34
+ 'codewhisperer:completions',
35
+ 'codewhisperer:analysis',
36
+ 'codewhisperer:conversations',
37
+ // 'codewhisperer:transformations',
38
+ // 'codewhisperer:taskassist'
39
+ ],
40
+
41
+ // 凭据存储(符合现有规范)
42
+ credentialsDir: '.kiro',
43
+ credentialsFile: 'oauth_creds.json',
44
+
45
+ // 日志前缀
46
+ logPrefix: '[Kiro Auth]'
47
+ };
48
+
49
+ /**
50
+ * 活动的 Kiro 回调服务器管理
51
+ */
52
+ const activeKiroServers = new Map();
53
+
54
+ /**
55
+ * 活动的 Kiro 轮询任务管理(用于 Builder ID Device Code)
56
+ */
57
+ const activeKiroPollingTasks = new Map();
58
+
59
+ /**
60
+ * 创建带代理支持的 fetch 请求
61
+ * 使用 axios 替代原生 fetch,以正确支持代理配置
62
+ * @param {string} url - 请求 URL
63
+ * @param {Object} options - fetch 选项(兼容 fetch API 格式)
64
+ * @param {string} providerType - 提供商类型,用于获取代理配置
65
+ * @returns {Promise<Object>} 返回类似 fetch Response 的对象
66
+ */
67
+ async function fetchWithProxy(url, options = {}, providerType) {
68
+ const proxyConfig = getProxyConfigForProvider(CONFIG, providerType);
69
+
70
+ // 构建 axios 配置
71
+ const axiosConfig = {
72
+ url,
73
+ method: options.method || 'GET',
74
+ headers: options.headers || {},
75
+ timeout: 30000, // 30 秒超时
76
+ };
77
+
78
+ // 处理请求体
79
+ if (options.body) {
80
+ axiosConfig.data = options.body;
81
+ }
82
+
83
+ // 配置代理
84
+ if (proxyConfig) {
85
+ axiosConfig.httpAgent = proxyConfig.httpAgent;
86
+ axiosConfig.httpsAgent = proxyConfig.httpsAgent;
87
+ axiosConfig.proxy = false; // 禁用 axios 内置代理,使用我们的 agent
88
+ console.log(`[OAuth] Using proxy for ${providerType}: ${CONFIG.PROXY_URL}`);
89
+ }
90
+
91
+ try {
92
+ const axios = (await import('axios')).default;
93
+ const response = await axios(axiosConfig);
94
+
95
+ // 返回类似 fetch Response 的对象
96
+ return {
97
+ ok: response.status >= 200 && response.status < 300,
98
+ status: response.status,
99
+ statusText: response.statusText,
100
+ headers: response.headers,
101
+ json: async () => response.data,
102
+ text: async () => typeof response.data === 'string' ? response.data : JSON.stringify(response.data),
103
+ };
104
+ } catch (error) {
105
+ // 处理 axios 错误,转换为类似 fetch 的响应格式
106
+ if (error.response) {
107
+ // 服务器返回了错误状态码
108
+ return {
109
+ ok: false,
110
+ status: error.response.status,
111
+ statusText: error.response.statusText,
112
+ headers: error.response.headers,
113
+ json: async () => error.response.data,
114
+ text: async () => typeof error.response.data === 'string' ? error.response.data : JSON.stringify(error.response.data),
115
+ };
116
+ }
117
+ // 网络错误或其他错误
118
+ throw error;
119
+ }
120
+ }
121
+
122
+ /**
123
+ * 生成 HTML 响应页面
124
+ * @param {boolean} isSuccess - 是否成功
125
+ * @param {string} message - 显示消息
126
+ * @returns {string} HTML 内容
127
+ */
128
+ function generateResponsePage(isSuccess, message) {
129
+ const title = isSuccess ? '授权成功!' : '授权失败';
130
+
131
+ return `<!DOCTYPE html>
132
+ <html lang="zh-CN">
133
+ <head>
134
+ <meta charset="utf-8">
135
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
136
+ <title>${title}</title>
137
+ </head>
138
+ <body>
139
+ <div class="container">
140
+ <h1>${title}</h1>
141
+ <p>${message}</p>
142
+ </div>
143
+ </body>
144
+ </html>`;
145
+ }
146
+
147
+ /**
148
+ * 生成 PKCE 代码验证器
149
+ * @returns {string} Base64URL 编码的随机字符串
150
+ */
151
+ function generateCodeVerifier() {
152
+ return crypto.randomBytes(32).toString('base64url');
153
+ }
154
+
155
+ /**
156
+ * 生成 PKCE 代码挑战
157
+ * @param {string} codeVerifier - 代码验证器
158
+ * @returns {string} Base64URL 编码的 SHA256 哈希
159
+ */
160
+ function generateCodeChallenge(codeVerifier) {
161
+ const hash = crypto.createHash('sha256');
162
+ hash.update(codeVerifier);
163
+ return hash.digest('base64url');
164
+ }
165
+
166
+ /**
167
+ * 处理 Kiro OAuth 授权(统一入口)
168
+ * @param {Object} currentConfig - 当前配置对象
169
+ * @param {Object} options - 额外选项
170
+ * - method: 'google' | 'github' | 'builder-id'
171
+ * - saveToConfigs: boolean
172
+ * @returns {Promise<Object>} 返回授权URL和相关信息
173
+ */
174
+ export async function handleKiroOAuth(currentConfig, options = {}) {
175
+ const method = options.method || options.authMethod || 'google'; // 默认使用 Google,同时支持 authMethod 参数
176
+
177
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} Starting OAuth with method: ${method}`);
178
+
179
+ switch (method) {
180
+ case 'google':
181
+ return handleKiroSocialAuth('Google', currentConfig, options);
182
+ case 'github':
183
+ return handleKiroSocialAuth('Github', currentConfig, options);
184
+ case 'builder-id':
185
+ return handleKiroBuilderIDDeviceCode(currentConfig, options);
186
+ default:
187
+ throw new Error(`不支持的认证方式: ${method}`);
188
+ }
189
+ }
190
+
191
+ /**
192
+ * Kiro Social Auth (Google/GitHub) - 使用 HTTP localhost 回调
193
+ */
194
+ async function handleKiroSocialAuth(provider, currentConfig, options = {}) {
195
+ // 生成 PKCE 参数
196
+ const codeVerifier = generateCodeVerifier();
197
+ const codeChallenge = generateCodeChallenge(codeVerifier);
198
+ const state = crypto.randomBytes(16).toString('base64url');
199
+
200
+ // 启动本地回调服务器并获取端口
201
+ let handlerPort;
202
+ const providerKey = 'claude-kiro-oauth';
203
+ if (options.port) {
204
+ const port = parseInt(options.port);
205
+ await closeKiroServer(providerKey, port);
206
+ const server = await createKiroHttpCallbackServer(port, codeVerifier, state, options);
207
+ activeKiroServers.set(providerKey, { server, port });
208
+ handlerPort = port;
209
+ } else {
210
+ handlerPort = await startKiroCallbackServer(codeVerifier, state, options);
211
+ }
212
+
213
+ // 使用 HTTP localhost 作为 redirect_uri
214
+ const redirectUri = `http://127.0.0.1:${handlerPort}/oauth/callback`;
215
+
216
+ // 构建授权 URL
217
+ const authUrl = `${KIRO_OAUTH_CONFIG.authServiceEndpoint}/login?` +
218
+ `idp=${provider}&` +
219
+ `redirect_uri=${encodeURIComponent(redirectUri)}&` +
220
+ `code_challenge=${codeChallenge}&` +
221
+ `code_challenge_method=S256&` +
222
+ `state=${state}&` +
223
+ `prompt=select_account`;
224
+
225
+ return {
226
+ authUrl,
227
+ authInfo: {
228
+ provider: 'claude-kiro-oauth',
229
+ authMethod: 'social',
230
+ socialProvider: provider,
231
+ port: handlerPort,
232
+ redirectUri: redirectUri,
233
+ state: state,
234
+ ...options
235
+ }
236
+ };
237
+ }
238
+
239
+ /**
240
+ * Kiro Builder ID - Device Code Flow(类似 Qwen OAuth 模式)
241
+ */
242
+ async function handleKiroBuilderIDDeviceCode(currentConfig, options = {}) {
243
+ // 停止之前的轮询任务
244
+ for (const [existingTaskId] of activeKiroPollingTasks.entries()) {
245
+ if (existingTaskId.startsWith('kiro-')) {
246
+ stopKiroPollingTask(existingTaskId);
247
+ }
248
+ }
249
+
250
+ // 获取 Builder ID Start URL(优先使用前端传入的值,否则使用默认值)
251
+ const builderIDStartURL = options.builderIDStartURL || KIRO_OAUTH_CONFIG.builderIDStartURL;
252
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} Using Builder ID Start URL: ${builderIDStartURL}`);
253
+
254
+ // 1. 注册 OIDC 客户端
255
+ const region = options.region || 'us-east-1';
256
+ const ssoOIDCEndpoint = KIRO_OAUTH_CONFIG.ssoOIDCEndpoint.replace('{{region}}', region);
257
+
258
+ const regResponse = await fetchWithProxy(`${ssoOIDCEndpoint}/client/register`, {
259
+ method: 'POST',
260
+ headers: {
261
+ 'Content-Type': 'application/json',
262
+ 'User-Agent': 'KiroIDE'
263
+ },
264
+ body: JSON.stringify({
265
+ clientName: 'Kiro IDE',
266
+ clientType: 'public',
267
+ scopes: KIRO_OAUTH_CONFIG.scopes,
268
+ // grantTypes: ['urn:ietf:params:oauth:grant-type:device_code', 'refresh_token']
269
+ })
270
+ }, 'claude-kiro-oauth');
271
+
272
+ if (!regResponse.ok) {
273
+ throw new Error(`Kiro OAuth 客户端注册失败: ${regResponse.status}`);
274
+ }
275
+
276
+ const regData = await regResponse.json();
277
+
278
+ // 2. 启动设备授权
279
+ const authResponse = await fetchWithProxy(`${ssoOIDCEndpoint}/device_authorization`, {
280
+ method: 'POST',
281
+ headers: {
282
+ 'Content-Type': 'application/json'
283
+ },
284
+ body: JSON.stringify({
285
+ clientId: regData.clientId,
286
+ clientSecret: regData.clientSecret,
287
+ startUrl: builderIDStartURL
288
+ })
289
+ }, 'claude-kiro-oauth');
290
+
291
+ if (!authResponse.ok) {
292
+ throw new Error(`Kiro OAuth 设备授权失败: ${authResponse.status}`);
293
+ }
294
+
295
+ const deviceAuth = await authResponse.json();
296
+
297
+ // 3. 启动后台轮询(类似 Qwen OAuth ��模式)
298
+ const taskId = `kiro-${deviceAuth.deviceCode.substring(0, 8)}-${Date.now()}`;
299
+
300
+
301
+ // 异步轮询
302
+ pollKiroBuilderIDToken(
303
+ regData.clientId,
304
+ regData.clientSecret,
305
+ deviceAuth.deviceCode,
306
+ 5,
307
+ 300,
308
+ taskId,
309
+ { ...options, region }
310
+ ).catch(error => {
311
+ console.error(`${KIRO_OAUTH_CONFIG.logPrefix} 轮询失败 [${taskId}]:`, error);
312
+ broadcastEvent('oauth_error', {
313
+ provider: 'claude-kiro-oauth',
314
+ error: error.message,
315
+ timestamp: new Date().toISOString()
316
+ });
317
+ });
318
+
319
+ return {
320
+ authUrl: deviceAuth.verificationUriComplete,
321
+ authInfo: {
322
+ provider: 'claude-kiro-oauth',
323
+ authMethod: 'builder-id',
324
+ deviceCode: deviceAuth.deviceCode,
325
+ userCode: deviceAuth.userCode,
326
+ verificationUri: deviceAuth.verificationUri,
327
+ verificationUriComplete: deviceAuth.verificationUriComplete,
328
+ expiresIn: deviceAuth.expiresIn,
329
+ interval: deviceAuth.interval,
330
+ ...options
331
+ }
332
+ };
333
+ }
334
+
335
+ /**
336
+ * 轮询获取 Kiro Builder ID Token
337
+ */
338
+ async function pollKiroBuilderIDToken(clientId, clientSecret, deviceCode, interval, expiresIn, taskId, options = {}) {
339
+ let credPath = path.join(os.homedir(), KIRO_OAUTH_CONFIG.credentialsDir, KIRO_OAUTH_CONFIG.credentialsFile);
340
+ const maxAttempts = Math.floor(expiresIn / interval);
341
+ let attempts = 0;
342
+
343
+ const taskControl = { shouldStop: false };
344
+ activeKiroPollingTasks.set(taskId, taskControl);
345
+
346
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} 开始轮询令牌 [${taskId}]`);
347
+
348
+ const poll = async () => {
349
+ if (taskControl.shouldStop) {
350
+ throw new Error('轮询任务已被取消');
351
+ }
352
+
353
+ if (attempts >= maxAttempts) {
354
+ activeKiroPollingTasks.delete(taskId);
355
+ throw new Error('授权超时');
356
+ }
357
+
358
+ attempts++;
359
+
360
+ try {
361
+ const region = options.region || 'us-east-1';
362
+ const ssoOIDCEndpoint = KIRO_OAUTH_CONFIG.ssoOIDCEndpoint.replace('{{region}}', region);
363
+ const response = await fetchWithProxy(`${ssoOIDCEndpoint}/token`, {
364
+ method: 'POST',
365
+ headers: {
366
+ 'Content-Type': 'application/json',
367
+ 'User-Agent': 'KiroIDE'
368
+ },
369
+ body: JSON.stringify({
370
+ clientId,
371
+ clientSecret,
372
+ deviceCode,
373
+ grantType: 'urn:ietf:params:oauth:grant-type:device_code'
374
+ })
375
+ }, 'claude-kiro-oauth');
376
+
377
+ const data = await response.json();
378
+
379
+ if (response.ok && data.accessToken) {
380
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} 成功获取令牌 [${taskId}]`);
381
+
382
+ // 保存令牌(符合现有规范)
383
+ if (options.saveToConfigs) {
384
+ const timestamp = Date.now();
385
+ const folderName = `${timestamp}_kiro-auth-token`;
386
+ const targetDir = path.join(process.cwd(), 'configs', 'kiro', folderName);
387
+ await fs.promises.mkdir(targetDir, { recursive: true });
388
+ credPath = path.join(targetDir, `${folderName}.json`);
389
+ }
390
+
391
+ const tokenData = {
392
+ accessToken: data.accessToken,
393
+ refreshToken: data.refreshToken,
394
+ expiresAt: new Date(Date.now() + data.expiresIn * 1000).toISOString(),
395
+ authMethod: 'builder-id',
396
+ clientId,
397
+ clientSecret,
398
+ idcRegion: options.region || 'us-east-1'
399
+ };
400
+
401
+ await fs.promises.mkdir(path.dirname(credPath), { recursive: true });
402
+ await fs.promises.writeFile(credPath, JSON.stringify(tokenData, null, 2));
403
+
404
+ activeKiroPollingTasks.delete(taskId);
405
+
406
+ // 广播成功事件(符合现有规范)
407
+ broadcastEvent('oauth_success', {
408
+ provider: 'claude-kiro-oauth',
409
+ credPath,
410
+ relativePath: path.relative(process.cwd(), credPath),
411
+ timestamp: new Date().toISOString()
412
+ });
413
+
414
+ // 自动关联新生成的凭据到 Pools
415
+ await autoLinkProviderConfigs(CONFIG);
416
+
417
+ return tokenData;
418
+ }
419
+
420
+ // 检查错误类型
421
+ if (data.error === 'authorization_pending') {
422
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} 等待用户授权 [${taskId}]... (${attempts}/${maxAttempts})`);
423
+ await new Promise(resolve => setTimeout(resolve, interval * 1000));
424
+ return poll();
425
+ } else if (data.error === 'slow_down') {
426
+ await new Promise(resolve => setTimeout(resolve, (interval + 5) * 1000));
427
+ return poll();
428
+ } else {
429
+ activeKiroPollingTasks.delete(taskId);
430
+ throw new Error(`授权失败: ${data.error || '未知错误'}`);
431
+ }
432
+ } catch (error) {
433
+ if (error.message.includes('授权') || error.message.includes('取消')) {
434
+ throw error;
435
+ }
436
+ await new Promise(resolve => setTimeout(resolve, interval * 1000));
437
+ return poll();
438
+ }
439
+ };
440
+
441
+ return poll();
442
+ }
443
+
444
+ /**
445
+ * 停止 Kiro 轮询任务
446
+ */
447
+ function stopKiroPollingTask(taskId) {
448
+ const task = activeKiroPollingTasks.get(taskId);
449
+ if (task) {
450
+ task.shouldStop = true;
451
+ activeKiroPollingTasks.delete(taskId);
452
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} 已停止轮询任务: ${taskId}`);
453
+ }
454
+ }
455
+
456
+ /**
457
+ * 启动 Kiro 回调服务器(用于 Social Auth HTTP 回调)
458
+ */
459
+ async function startKiroCallbackServer(codeVerifier, expectedState, options = {}) {
460
+ const portStart = KIRO_OAUTH_CONFIG.callbackPortStart;
461
+ const portEnd = KIRO_OAUTH_CONFIG.callbackPortEnd;
462
+
463
+ for (let port = portStart; port <= portEnd; port++) {
464
+ // 关闭已存在的服务器
465
+ await closeKiroServer(port);
466
+
467
+ try {
468
+ const server = await createKiroHttpCallbackServer(port, codeVerifier, expectedState, options);
469
+ activeKiroServers.set('claude-kiro-oauth', { server, port });
470
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} 回调服务器已启动于端口 ${port}`);
471
+ return port;
472
+ } catch (err) {
473
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} 端口 ${port} 被占用,尝试下一个...`);
474
+ }
475
+ }
476
+
477
+ throw new Error('所有端口都被占用');
478
+ }
479
+
480
+ /**
481
+ * 关闭 Kiro 服务器
482
+ */
483
+ async function closeKiroServer(provider, port = null) {
484
+ const existing = activeKiroServers.get(provider);
485
+ if (existing) {
486
+ await new Promise((resolve) => {
487
+ existing.server.close(() => {
488
+ activeKiroServers.delete(provider);
489
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} 已关闭提供商 ${provider} 在端口 ${existing.port} 上的旧服务器`);
490
+ resolve();
491
+ });
492
+ });
493
+ }
494
+
495
+ if (port) {
496
+ for (const [p, info] of activeKiroServers.entries()) {
497
+ if (info.port === port) {
498
+ await new Promise((resolve) => {
499
+ info.server.close(() => {
500
+ activeKiroServers.delete(p);
501
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} 已关闭端口 ${port} 上的旧服务器`);
502
+ resolve();
503
+ });
504
+ });
505
+ }
506
+ }
507
+ }
508
+ }
509
+
510
+ /**
511
+ * 创建 Kiro HTTP 回调服务器
512
+ */
513
+ function createKiroHttpCallbackServer(port, codeVerifier, expectedState, options = {}) {
514
+ const redirectUri = `http://127.0.0.1:${port}/oauth/callback`;
515
+
516
+ return new Promise((resolve, reject) => {
517
+ const server = http.createServer(async (req, res) => {
518
+ try {
519
+ const url = new URL(req.url, `http://127.0.0.1:${port}`);
520
+
521
+ if (url.pathname === '/oauth/callback') {
522
+ const code = url.searchParams.get('code');
523
+ const state = url.searchParams.get('state');
524
+ const errorParam = url.searchParams.get('error');
525
+
526
+ if (errorParam) {
527
+ res.writeHead(400, { 'Content-Type': 'text/html; charset=utf-8' });
528
+ res.end(generateResponsePage(false, `授权失败: ${errorParam}`));
529
+ return;
530
+ }
531
+
532
+ if (state !== expectedState) {
533
+ res.writeHead(400, { 'Content-Type': 'text/html; charset=utf-8' });
534
+ res.end(generateResponsePage(false, 'State 验证失败'));
535
+ return;
536
+ }
537
+
538
+ // 交换 Code 获取 Token(使用动态的 redirect_uri)
539
+ const tokenResponse = await fetchWithProxy(`${KIRO_OAUTH_CONFIG.authServiceEndpoint}/oauth/token`, {
540
+ method: 'POST',
541
+ headers: {
542
+ 'Content-Type': 'application/json',
543
+ 'User-Agent': 'AIClient-2-API/1.0.0'
544
+ },
545
+ body: JSON.stringify({
546
+ code,
547
+ code_verifier: codeVerifier,
548
+ redirect_uri: redirectUri
549
+ })
550
+ }, 'claude-kiro-oauth');
551
+
552
+ if (!tokenResponse.ok) {
553
+ const errorText = await tokenResponse.text();
554
+ console.error(`${KIRO_OAUTH_CONFIG.logPrefix} Token exchange failed:`, errorText);
555
+ res.writeHead(500, { 'Content-Type': 'text/html; charset=utf-8' });
556
+ res.end(generateResponsePage(false, `获取令牌失败: ${tokenResponse.status}`));
557
+ return;
558
+ }
559
+
560
+ const tokenData = await tokenResponse.json();
561
+
562
+ // 保存令牌
563
+ let credPath = path.join(os.homedir(), KIRO_OAUTH_CONFIG.credentialsDir, KIRO_OAUTH_CONFIG.credentialsFile);
564
+
565
+ if (options.saveToConfigs) {
566
+ const timestamp = Date.now();
567
+ const folderName = `${timestamp}_kiro-auth-token`;
568
+ const targetDir = path.join(process.cwd(), 'configs', 'kiro', folderName);
569
+ await fs.promises.mkdir(targetDir, { recursive: true });
570
+ credPath = path.join(targetDir, `${folderName}.json`);
571
+ }
572
+
573
+ const saveData = {
574
+ accessToken: tokenData.accessToken,
575
+ refreshToken: tokenData.refreshToken,
576
+ profileArn: tokenData.profileArn,
577
+ expiresAt: new Date(Date.now() + (tokenData.expiresIn || 3600) * 1000).toISOString(),
578
+ authMethod: 'social',
579
+ region: 'us-east-1'
580
+ };
581
+
582
+ await fs.promises.mkdir(path.dirname(credPath), { recursive: true });
583
+ await fs.promises.writeFile(credPath, JSON.stringify(saveData, null, 2));
584
+
585
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} 令牌已保存: ${credPath}`);
586
+
587
+ // 广播成功事件
588
+ broadcastEvent('oauth_success', {
589
+ provider: 'claude-kiro-oauth',
590
+ credPath,
591
+ relativePath: path.relative(process.cwd(), credPath),
592
+ timestamp: new Date().toISOString()
593
+ });
594
+
595
+ // 自动关联新生成的凭据到 Pools
596
+ await autoLinkProviderConfigs(CONFIG);
597
+
598
+ res.writeHead(200, { 'Content-Type': 'text/html; charset=utf-8' });
599
+ res.end(generateResponsePage(true, '授权成功!您可以关闭此页面'));
600
+
601
+ // 关闭服务器
602
+ server.close(() => {
603
+ activeKiroServers.delete('claude-kiro-oauth');
604
+ });
605
+
606
+ } else {
607
+ res.writeHead(204);
608
+ res.end();
609
+ }
610
+ } catch (error) {
611
+ console.error(`${KIRO_OAUTH_CONFIG.logPrefix} 处理回调出错:`, error);
612
+ res.writeHead(500, { 'Content-Type': 'text/html; charset=utf-8' });
613
+ res.end(generateResponsePage(false, `服务器错误: ${error.message}`));
614
+ }
615
+ });
616
+
617
+ server.on('error', reject);
618
+ server.listen(port, '127.0.0.1', () => resolve(server));
619
+
620
+ // 超时自动关闭
621
+ setTimeout(() => {
622
+ if (server.listening) {
623
+ server.close(() => {
624
+ activeKiroServers.delete('claude-kiro-oauth');
625
+ });
626
+ }
627
+ }, KIRO_OAUTH_CONFIG.authTimeout);
628
+ });
629
+ }
630
+
631
+ /**
632
+ * Kiro Token 刷新常量
633
+ */
634
+ const KIRO_REFRESH_CONSTANTS = {
635
+ REFRESH_URL: 'https://prod.{{region}}.auth.desktop.kiro.dev/refreshToken',
636
+ REFRESH_IDC_URL: 'https://oidc.{{region}}.amazonaws.com/token',
637
+ CONTENT_TYPE_JSON: 'application/json',
638
+ AUTH_METHOD_SOCIAL: 'social',
639
+ DEFAULT_PROVIDER: 'Google',
640
+ REQUEST_TIMEOUT: 30000,
641
+ DEFAULT_REGION: 'us-east-1',
642
+ IDC_REGION: 'us-east-1' // 用于 REFRESH_IDC_URL 的区域配置
643
+ };
644
+
645
+ /**
646
+ * 通过 refreshToken 获取 accessToken
647
+ * @param {string} refreshToken - Kiro 的 refresh token
648
+ * @param {string} region - AWS 区域 (默认: us-east-1)
649
+ * @returns {Promise<Object>} 包含 accessToken 等信息的对象
650
+ */
651
+ async function refreshKiroToken(refreshToken, region = KIRO_REFRESH_CONSTANTS.DEFAULT_REGION) {
652
+ const refreshUrl = KIRO_REFRESH_CONSTANTS.REFRESH_URL.replace('{{region}}', region);
653
+
654
+ const controller = new AbortController();
655
+ const timeoutId = setTimeout(() => controller.abort(), KIRO_REFRESH_CONSTANTS.REQUEST_TIMEOUT);
656
+
657
+ try {
658
+ const response = await fetchWithProxy(refreshUrl, {
659
+ method: 'POST',
660
+ headers: {
661
+ 'Content-Type': KIRO_REFRESH_CONSTANTS.CONTENT_TYPE_JSON
662
+ },
663
+ body: JSON.stringify({ refreshToken }),
664
+ signal: controller.signal
665
+ }, 'claude-kiro-oauth');
666
+
667
+ clearTimeout(timeoutId);
668
+
669
+ if (!response.ok) {
670
+ const errorText = await response.text();
671
+ throw new Error(`HTTP ${response.status}: ${errorText}`);
672
+ }
673
+
674
+ const data = await response.json();
675
+
676
+ if (!data.accessToken) {
677
+ throw new Error('Invalid refresh response: Missing accessToken');
678
+ }
679
+
680
+ const expiresIn = data.expiresIn || 3600;
681
+ const expiresAt = new Date(Date.now() + expiresIn * 1000).toISOString();
682
+
683
+ return {
684
+ accessToken: data.accessToken,
685
+ refreshToken: data.refreshToken || refreshToken,
686
+ profileArn: data.profileArn || '',
687
+ expiresAt: expiresAt,
688
+ authMethod: KIRO_REFRESH_CONSTANTS.AUTH_METHOD_SOCIAL,
689
+ provider: KIRO_REFRESH_CONSTANTS.DEFAULT_PROVIDER,
690
+ region: region
691
+ };
692
+ } catch (error) {
693
+ clearTimeout(timeoutId);
694
+ if (error.name === 'AbortError') {
695
+ throw new Error('Request timeout');
696
+ }
697
+ throw error;
698
+ }
699
+ }
700
+
701
+ /**
702
+ * 检查 Kiro 凭据是否已存在(基于 refreshToken + provider 组合)
703
+ * @param {string} refreshToken - 要检查的 refreshToken
704
+ * @param {string} provider - 提供商名称 (默认: 'claude-kiro-oauth')
705
+ * @returns {Promise<{isDuplicate: boolean, existingPath?: string}>} 检查结果
706
+ */
707
+ export async function checkKiroCredentialsDuplicate(refreshToken, provider = 'claude-kiro-oauth') {
708
+ const kiroDir = path.join(process.cwd(), 'configs', 'kiro');
709
+
710
+ try {
711
+ // 检查 configs/kiro 目录是否存在
712
+ if (!fs.existsSync(kiroDir)) {
713
+ return { isDuplicate: false };
714
+ }
715
+
716
+ // 递归扫描所有 JSON 文件
717
+ const scanDirectory = async (dirPath) => {
718
+ const entries = await fs.promises.readdir(dirPath, { withFileTypes: true });
719
+
720
+ for (const entry of entries) {
721
+ const fullPath = path.join(dirPath, entry.name);
722
+
723
+ if (entry.isDirectory()) {
724
+ const result = await scanDirectory(fullPath);
725
+ if (result.isDuplicate) {
726
+ return result;
727
+ }
728
+ } else if (entry.isFile() && entry.name.endsWith('.json')) {
729
+ try {
730
+ const content = await fs.promises.readFile(fullPath, 'utf8');
731
+ const credentials = JSON.parse(content);
732
+
733
+ // 检查 refreshToken 是否匹配
734
+ if (credentials.refreshToken && credentials.refreshToken === refreshToken) {
735
+ const relativePath = path.relative(process.cwd(), fullPath);
736
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} Found duplicate refreshToken in: ${relativePath}`);
737
+ return {
738
+ isDuplicate: true,
739
+ existingPath: relativePath
740
+ };
741
+ }
742
+ } catch (parseError) {
743
+ // 忽略解析错误的文件
744
+ }
745
+ }
746
+ }
747
+
748
+ return { isDuplicate: false };
749
+ };
750
+
751
+ return await scanDirectory(kiroDir);
752
+
753
+ } catch (error) {
754
+ console.warn(`${KIRO_OAUTH_CONFIG.logPrefix} Error checking duplicates:`, error.message);
755
+ return { isDuplicate: false };
756
+ }
757
+ }
758
+
759
+ /**
760
+ * 批量导入 Kiro refreshToken 并生成凭据文件
761
+ * @param {string[]} refreshTokens - refreshToken 数组
762
+ * @param {string} region - AWS 区域 (默认: us-east-1)
763
+ * @param {boolean} skipDuplicateCheck - 是否跳过重复检查 (默认: false)
764
+ * @returns {Promise<Object>} 批量处理结果
765
+ */
766
+ export async function batchImportKiroRefreshTokens(refreshTokens, region = KIRO_REFRESH_CONSTANTS.DEFAULT_REGION, skipDuplicateCheck = false) {
767
+ const results = {
768
+ total: refreshTokens.length,
769
+ success: 0,
770
+ failed: 0,
771
+ details: []
772
+ };
773
+
774
+ for (let i = 0; i < refreshTokens.length; i++) {
775
+ const refreshToken = refreshTokens[i].trim();
776
+
777
+ if (!refreshToken) {
778
+ results.details.push({
779
+ index: i + 1,
780
+ success: false,
781
+ error: 'Empty token'
782
+ });
783
+ results.failed++;
784
+ continue;
785
+ }
786
+
787
+ // 检查重复
788
+ if (!skipDuplicateCheck) {
789
+ const duplicateCheck = await checkKiroCredentialsDuplicate(refreshToken);
790
+ if (duplicateCheck.isDuplicate) {
791
+ results.details.push({
792
+ index: i + 1,
793
+ success: false,
794
+ error: 'duplicate',
795
+ existingPath: duplicateCheck.existingPath
796
+ });
797
+ results.failed++;
798
+ continue;
799
+ }
800
+ }
801
+
802
+ try {
803
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} 正在刷新第 ${i + 1}/${refreshTokens.length} 个 token...`);
804
+
805
+ const tokenData = await refreshKiroToken(refreshToken, region);
806
+
807
+ // 生成文件路径: configs/kiro/{timestamp}_kiro-auth-token/{timestamp}_kiro-auth-token.json
808
+ const timestamp = Date.now();
809
+ const folderName = `${timestamp}_kiro-auth-token`;
810
+ const targetDir = path.join(process.cwd(), 'configs', 'kiro', folderName);
811
+ await fs.promises.mkdir(targetDir, { recursive: true });
812
+
813
+ const credPath = path.join(targetDir, `${folderName}.json`);
814
+ await fs.promises.writeFile(credPath, JSON.stringify(tokenData, null, 2));
815
+
816
+ const relativePath = path.relative(process.cwd(), credPath);
817
+
818
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} Token ${i + 1} 已保存: ${relativePath}`);
819
+
820
+ results.details.push({
821
+ index: i + 1,
822
+ success: true,
823
+ path: relativePath,
824
+ expiresAt: tokenData.expiresAt
825
+ });
826
+ results.success++;
827
+
828
+ } catch (error) {
829
+ console.error(`${KIRO_OAUTH_CONFIG.logPrefix} Token ${i + 1} 刷新失败:`, error.message);
830
+
831
+ results.details.push({
832
+ index: i + 1,
833
+ success: false,
834
+ error: error.message
835
+ });
836
+ results.failed++;
837
+ }
838
+ }
839
+
840
+ // 如果有成功的,广播事件并自动关联
841
+ if (results.success > 0) {
842
+ broadcastEvent('oauth_batch_success', {
843
+ provider: 'claude-kiro-oauth',
844
+ count: results.success,
845
+ timestamp: new Date().toISOString()
846
+ });
847
+
848
+ // 自动关联新生成的凭据到 Pools
849
+ await autoLinkProviderConfigs(CONFIG);
850
+ }
851
+
852
+ return results;
853
+ }
854
+
855
+ /**
856
+ * 批量导入 Kiro refreshToken 并生成凭据文件(流式版本,支持实时进度回调)
857
+ * @param {string[]} refreshTokens - refreshToken 数组
858
+ * @param {string} region - AWS 区域 (默认: us-east-1)
859
+ * @param {Function} onProgress - 进度回调函数,每处理完一个 token 调用
860
+ * @param {boolean} skipDuplicateCheck - 是否跳过重复检查 (默认: false)
861
+ * @returns {Promise<Object>} 批量处理结果
862
+ */
863
+ export async function batchImportKiroRefreshTokensStream(refreshTokens, region = KIRO_REFRESH_CONSTANTS.DEFAULT_REGION, onProgress = null, skipDuplicateCheck = false) {
864
+ const results = {
865
+ total: refreshTokens.length,
866
+ success: 0,
867
+ failed: 0,
868
+ details: []
869
+ };
870
+
871
+ for (let i = 0; i < refreshTokens.length; i++) {
872
+ const refreshToken = refreshTokens[i].trim();
873
+ const progressData = {
874
+ index: i + 1,
875
+ total: refreshTokens.length,
876
+ current: null
877
+ };
878
+
879
+ if (!refreshToken) {
880
+ progressData.current = {
881
+ index: i + 1,
882
+ success: false,
883
+ error: 'Empty token'
884
+ };
885
+ results.details.push(progressData.current);
886
+ results.failed++;
887
+
888
+ // 发送进度更新
889
+ if (onProgress) {
890
+ onProgress({
891
+ ...progressData,
892
+ successCount: results.success,
893
+ failedCount: results.failed
894
+ });
895
+ }
896
+ continue;
897
+ }
898
+
899
+ // 检查重复
900
+ if (!skipDuplicateCheck) {
901
+ const duplicateCheck = await checkKiroCredentialsDuplicate(refreshToken);
902
+ if (duplicateCheck.isDuplicate) {
903
+ progressData.current = {
904
+ index: i + 1,
905
+ success: false,
906
+ error: 'duplicate',
907
+ existingPath: duplicateCheck.existingPath
908
+ };
909
+ results.details.push(progressData.current);
910
+ results.failed++;
911
+
912
+ // 发送进度更新
913
+ if (onProgress) {
914
+ onProgress({
915
+ ...progressData,
916
+ successCount: results.success,
917
+ failedCount: results.failed
918
+ });
919
+ }
920
+ continue;
921
+ }
922
+ }
923
+
924
+ try {
925
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} 正在刷新第 ${i + 1}/${refreshTokens.length} 个 token...`);
926
+
927
+ const tokenData = await refreshKiroToken(refreshToken, region);
928
+
929
+ // 生成文件路径: configs/kiro/{timestamp}_kiro-auth-token/{timestamp}_kiro-auth-token.json
930
+ const timestamp = Date.now();
931
+ const folderName = `${timestamp}_kiro-auth-token`;
932
+ const targetDir = path.join(process.cwd(), 'configs', 'kiro', folderName);
933
+ await fs.promises.mkdir(targetDir, { recursive: true });
934
+
935
+ const credPath = path.join(targetDir, `${folderName}.json`);
936
+ await fs.promises.writeFile(credPath, JSON.stringify(tokenData, null, 2));
937
+
938
+ const relativePath = path.relative(process.cwd(), credPath);
939
+
940
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} Token ${i + 1} 已保存: ${relativePath}`);
941
+
942
+ progressData.current = {
943
+ index: i + 1,
944
+ success: true,
945
+ path: relativePath,
946
+ expiresAt: tokenData.expiresAt
947
+ };
948
+ results.details.push(progressData.current);
949
+ results.success++;
950
+
951
+ } catch (error) {
952
+ console.error(`${KIRO_OAUTH_CONFIG.logPrefix} Token ${i + 1} 刷新失败:`, error.message);
953
+
954
+ progressData.current = {
955
+ index: i + 1,
956
+ success: false,
957
+ error: error.message
958
+ };
959
+ results.details.push(progressData.current);
960
+ results.failed++;
961
+ }
962
+
963
+ // 发送进度更新
964
+ if (onProgress) {
965
+ onProgress({
966
+ ...progressData,
967
+ successCount: results.success,
968
+ failedCount: results.failed
969
+ });
970
+ }
971
+ }
972
+
973
+ // 如果有成功的,广播事件并自动关联
974
+ if (results.success > 0) {
975
+ broadcastEvent('oauth_batch_success', {
976
+ provider: 'claude-kiro-oauth',
977
+ count: results.success,
978
+ timestamp: new Date().toISOString()
979
+ });
980
+
981
+ // 自动关联新生成的凭据到 Pools
982
+ await autoLinkProviderConfigs(CONFIG);
983
+ }
984
+
985
+ return results;
986
+ }
987
+
988
+ /**
989
+ * 导入 AWS SSO 凭据用于 Kiro (Builder ID 模式)
990
+ * 从用户上传的 AWS SSO cache 文件中导入凭据
991
+ * @param {Object} credentials - 合并后的凭据对象,需包含 clientId 和 clientSecret
992
+ * @param {boolean} skipDuplicateCheck - 是否跳过重复检查 (默认: false)
993
+ * @returns {Promise<Object>} 导入结果
994
+ */
995
+ export async function importAwsCredentials(credentials, skipDuplicateCheck = false) {
996
+ try {
997
+ // 验证必需字段 - 需要四个字段都存在
998
+ const missingFields = [];
999
+ if (!credentials.clientId) missingFields.push('clientId');
1000
+ if (!credentials.clientSecret) missingFields.push('clientSecret');
1001
+ if (!credentials.accessToken) missingFields.push('accessToken');
1002
+ if (!credentials.refreshToken) missingFields.push('refreshToken');
1003
+
1004
+ if (missingFields.length > 0) {
1005
+ return {
1006
+ success: false,
1007
+ error: `Missing required fields: ${missingFields.join(', ')}`
1008
+ };
1009
+ }
1010
+
1011
+ // 检查重复凭据
1012
+ if (!skipDuplicateCheck) {
1013
+ const duplicateCheck = await checkKiroCredentialsDuplicate(credentials.refreshToken);
1014
+ if (duplicateCheck.isDuplicate) {
1015
+ return {
1016
+ success: false,
1017
+ error: 'duplicate',
1018
+ existingPath: duplicateCheck.existingPath
1019
+ };
1020
+ }
1021
+ }
1022
+
1023
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} Importing AWS credentials...`);
1024
+
1025
+ // 准备凭据数据 - 四个字段都是必需的
1026
+ const credentialsData = {
1027
+ clientId: credentials.clientId,
1028
+ clientSecret: credentials.clientSecret,
1029
+ accessToken: credentials.accessToken,
1030
+ refreshToken: credentials.refreshToken,
1031
+ authMethod: credentials.authMethod || 'builder-id',
1032
+ // region: credentials.region || KIRO_REFRESH_CONSTANTS.DEFAULT_REGION,
1033
+ idcRegion: credentials.idcRegion || KIRO_REFRESH_CONSTANTS.IDC_REGION
1034
+ };
1035
+
1036
+ // 可选字段
1037
+ if (credentials.expiresAt) {
1038
+ credentialsData.expiresAt = credentials.expiresAt;
1039
+ }
1040
+ if (credentials.startUrl) {
1041
+ credentialsData.startUrl = credentials.startUrl;
1042
+ }
1043
+ if (credentials.registrationExpiresAt) {
1044
+ credentialsData.registrationExpiresAt = credentials.registrationExpiresAt;
1045
+ }
1046
+
1047
+ // 尝试刷新获取最新的 accessToken
1048
+ try {
1049
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} Attempting to refresh token with provided credentials...`);
1050
+
1051
+ const refreshRegion = credentials.idcRegion || KIRO_REFRESH_CONSTANTS.IDC_REGION;
1052
+ const refreshUrl = KIRO_REFRESH_CONSTANTS.REFRESH_IDC_URL.replace('{{region}}', refreshRegion);
1053
+
1054
+ const refreshResponse = await fetchWithProxy(refreshUrl, {
1055
+ method: 'POST',
1056
+ headers: {
1057
+ 'Content-Type': 'application/json'
1058
+ },
1059
+ body: JSON.stringify({
1060
+ refreshToken: credentials.refreshToken,
1061
+ clientId: credentials.clientId,
1062
+ clientSecret: credentials.clientSecret,
1063
+ grantType: 'refresh_token'
1064
+ })
1065
+ }, 'claude-kiro-oauth');
1066
+
1067
+ if (refreshResponse.ok) {
1068
+ const tokenData = await refreshResponse.json();
1069
+ credentialsData.accessToken = tokenData.accessToken;
1070
+ credentialsData.refreshToken = tokenData.refreshToken;
1071
+ const expiresIn = tokenData.expiresIn || 3600;
1072
+ credentialsData.expiresAt = new Date(Date.now() + expiresIn * 1000).toISOString();
1073
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} Token refreshed successfully`);
1074
+ } else {
1075
+ console.warn(`${KIRO_OAUTH_CONFIG.logPrefix} Token refresh failed, saving original credentials`);
1076
+ }
1077
+ } catch (refreshError) {
1078
+ console.warn(`${KIRO_OAUTH_CONFIG.logPrefix} Token refresh error:`, refreshError.message);
1079
+ // 继续保存原始凭据
1080
+ }
1081
+
1082
+ // 生成文件路径: configs/kiro/{timestamp}_kiro-auth-token/{timestamp}_kiro-auth-token.json
1083
+ const timestamp = Date.now();
1084
+ const folderName = `${timestamp}_kiro-auth-token`;
1085
+ const targetDir = path.join(process.cwd(), 'configs', 'kiro', folderName);
1086
+ await fs.promises.mkdir(targetDir, { recursive: true });
1087
+
1088
+ const credPath = path.join(targetDir, `${folderName}.json`);
1089
+ await fs.promises.writeFile(credPath, JSON.stringify(credentialsData, null, 2));
1090
+
1091
+ const relativePath = path.relative(process.cwd(), credPath);
1092
+
1093
+ console.log(`${KIRO_OAUTH_CONFIG.logPrefix} AWS credentials saved to: ${relativePath}`);
1094
+
1095
+ // 广播事件
1096
+ broadcastEvent('oauth_success', {
1097
+ provider: 'claude-kiro-oauth',
1098
+ relativePath: relativePath,
1099
+ timestamp: new Date().toISOString()
1100
+ });
1101
+
1102
+ // 自动关联新生成的凭据到 Pools
1103
+ await autoLinkProviderConfigs(CONFIG);
1104
+
1105
+ return {
1106
+ success: true,
1107
+ path: relativePath
1108
+ };
1109
+
1110
+ } catch (error) {
1111
+ console.error(`${KIRO_OAUTH_CONFIG.logPrefix} AWS credentials import failed:`, error);
1112
+ return {
1113
+ success: false,
1114
+ error: error.message
1115
+ };
1116
+ }
1117
+ }
src/auth/oauth-handlers.js ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // OAuth 处理器统一导出文件
2
+ // 此文件已按提供商拆分为多个独立文件,请从 index.js 导入
3
+
4
+ // 重新导出所有 OAuth 处理函数以保持向后兼容
5
+ export {
6
+ // Codex OAuth
7
+ refreshCodexTokensWithRetry,
8
+ handleCodexOAuth,
9
+ handleCodexOAuthCallback,
10
+ // Gemini OAuth
11
+ handleGeminiCliOAuth,
12
+ handleGeminiAntigravityOAuth,
13
+ // Qwen OAuth
14
+ handleQwenOAuth,
15
+ // Kiro OAuth
16
+ handleKiroOAuth,
17
+ checkKiroCredentialsDuplicate,
18
+ batchImportKiroRefreshTokens,
19
+ batchImportKiroRefreshTokensStream,
20
+ importAwsCredentials,
21
+ // iFlow OAuth
22
+ handleIFlowOAuth,
23
+ refreshIFlowTokens,
24
+ } from './index.js';
src/auth/qwen-oauth.js ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fs from 'fs';
2
+ import path from 'path';
3
+ import os from 'os';
4
+ import crypto from 'crypto';
5
+ import { broadcastEvent } from '../services/ui-manager.js';
6
+ import { autoLinkProviderConfigs } from '../services/service-manager.js';
7
+ import { CONFIG } from '../core/config-manager.js';
8
+ import { getProxyConfigForProvider } from '../utils/proxy-utils.js';
9
+
10
+ /**
11
+ * Qwen OAuth 配置
12
+ */
13
+ const QWEN_OAUTH_CONFIG = {
14
+ clientId: 'f0304373b74a44d2b584a3fb70ca9e56',
15
+ scope: 'openid profile email model.completion',
16
+ deviceCodeEndpoint: 'https://chat.qwen.ai/api/v1/oauth2/device/code',
17
+ tokenEndpoint: 'https://chat.qwen.ai/api/v1/oauth2/token',
18
+ grantType: 'urn:ietf:params:oauth:grant-type:device_code',
19
+ credentialsDir: '.qwen',
20
+ credentialsFile: 'oauth_creds.json',
21
+ logPrefix: '[Qwen Auth]'
22
+ };
23
+
24
+ /**
25
+ * 活动的轮询任务管理
26
+ */
27
+ const activePollingTasks = new Map();
28
+
29
+ /**
30
+ * 创建带代理支持的 fetch 请求
31
+ * 使用 axios 替代原生 fetch,以正确支持代理配置
32
+ * @param {string} url - 请求 URL
33
+ * @param {Object} options - fetch 选项(兼容 fetch API 格式)
34
+ * @param {string} providerType - 提供商类型,用于获取代理配置
35
+ * @returns {Promise<Object>} 返回类似 fetch Response 的对象
36
+ */
37
+ async function fetchWithProxy(url, options = {}, providerType) {
38
+ const proxyConfig = getProxyConfigForProvider(CONFIG, providerType);
39
+
40
+ // 构建 axios 配置
41
+ const axiosConfig = {
42
+ url,
43
+ method: options.method || 'GET',
44
+ headers: options.headers || {},
45
+ timeout: 30000, // 30 秒超时
46
+ };
47
+
48
+ // 处理请求体
49
+ if (options.body) {
50
+ axiosConfig.data = options.body;
51
+ }
52
+
53
+ // 配置代理
54
+ if (proxyConfig) {
55
+ axiosConfig.httpAgent = proxyConfig.httpAgent;
56
+ axiosConfig.httpsAgent = proxyConfig.httpsAgent;
57
+ axiosConfig.proxy = false; // 禁用 axios 内置代理,使用我们的 agent
58
+ console.log(`[OAuth] Using proxy for ${providerType}: ${CONFIG.PROXY_URL}`);
59
+ }
60
+
61
+ try {
62
+ const axios = (await import('axios')).default;
63
+ const response = await axios(axiosConfig);
64
+
65
+ // 返回类似 fetch Response 的对象
66
+ return {
67
+ ok: response.status >= 200 && response.status < 300,
68
+ status: response.status,
69
+ statusText: response.statusText,
70
+ headers: response.headers,
71
+ json: async () => response.data,
72
+ text: async () => typeof response.data === 'string' ? response.data : JSON.stringify(response.data),
73
+ };
74
+ } catch (error) {
75
+ // 处理 axios 错误,转换为类似 fetch 的响应格式
76
+ if (error.response) {
77
+ // 服务器返回了错误状态码
78
+ return {
79
+ ok: false,
80
+ status: error.response.status,
81
+ statusText: error.response.statusText,
82
+ headers: error.response.headers,
83
+ json: async () => error.response.data,
84
+ text: async () => typeof error.response.data === 'string' ? error.response.data : JSON.stringify(error.response.data),
85
+ };
86
+ }
87
+ // 网络错误或其他错误
88
+ throw error;
89
+ }
90
+ }
91
+
92
+ /**
93
+ * 生成 PKCE 代码验证器
94
+ * @returns {string} Base64URL 编码的随机字符串
95
+ */
96
+ function generateCodeVerifier() {
97
+ return crypto.randomBytes(32).toString('base64url');
98
+ }
99
+
100
+ /**
101
+ * 生成 PKCE 代码挑战
102
+ * @param {string} codeVerifier - 代码验证器
103
+ * @returns {string} Base64URL 编码的 SHA256 哈希
104
+ */
105
+ function generateCodeChallenge(codeVerifier) {
106
+ const hash = crypto.createHash('sha256');
107
+ hash.update(codeVerifier);
108
+ return hash.digest('base64url');
109
+ }
110
+
111
+ /**
112
+ * 停止活动的轮询任务
113
+ * @param {string} taskId - 任务标识符
114
+ */
115
+ function stopPollingTask(taskId) {
116
+ const task = activePollingTasks.get(taskId);
117
+ if (task) {
118
+ task.shouldStop = true;
119
+ activePollingTasks.delete(taskId);
120
+ console.log(`${QWEN_OAUTH_CONFIG.logPrefix} 已停止轮询任务: ${taskId}`);
121
+ }
122
+ }
123
+
124
+ /**
125
+ * 轮询获取 Qwen OAuth 令牌
126
+ * @param {string} deviceCode - 设备代码
127
+ * @param {string} codeVerifier - PKCE 代码验证器
128
+ * @param {number} interval - 轮询间隔(秒)
129
+ * @param {number} expiresIn - 过期时间(秒)
130
+ * @param {string} taskId - 任务标识符
131
+ * @param {Object} options - 额外选项
132
+ * @returns {Promise<Object>} 返回令牌信息
133
+ */
134
+ async function pollQwenToken(deviceCode, codeVerifier, interval = 5, expiresIn = 300, taskId = 'default', options = {}) {
135
+ let credPath = path.join(os.homedir(), QWEN_OAUTH_CONFIG.credentialsDir, QWEN_OAUTH_CONFIG.credentialsFile);
136
+ const maxAttempts = Math.floor(expiresIn / interval);
137
+ let attempts = 0;
138
+
139
+ // 创建任务控制对象
140
+ const taskControl = { shouldStop: false };
141
+ activePollingTasks.set(taskId, taskControl);
142
+
143
+ console.log(`${QWEN_OAUTH_CONFIG.logPrefix} 开���轮询令牌 [${taskId}],间隔 ${interval} 秒,最多尝试 ${maxAttempts} 次`);
144
+
145
+ const poll = async () => {
146
+ // 检查是否需要停止
147
+ if (taskControl.shouldStop) {
148
+ console.log(`${QWEN_OAUTH_CONFIG.logPrefix} 轮询任务 [${taskId}] 已被停止`);
149
+ throw new Error('轮询任务已被取消');
150
+ }
151
+
152
+ if (attempts >= maxAttempts) {
153
+ activePollingTasks.delete(taskId);
154
+ throw new Error('授权超时,请重新开始授权流程');
155
+ }
156
+
157
+ attempts++;
158
+
159
+ const bodyData = {
160
+ client_id: QWEN_OAUTH_CONFIG.clientId,
161
+ device_code: deviceCode,
162
+ grant_type: QWEN_OAUTH_CONFIG.grantType,
163
+ code_verifier: codeVerifier
164
+ };
165
+
166
+ const formBody = Object.entries(bodyData)
167
+ .map(([key, value]) => `${encodeURIComponent(key)}=${encodeURIComponent(value)}`)
168
+ .join('&');
169
+
170
+ try {
171
+ const response = await fetchWithProxy(QWEN_OAUTH_CONFIG.tokenEndpoint, {
172
+ method: 'POST',
173
+ headers: {
174
+ 'Content-Type': 'application/x-www-form-urlencoded',
175
+ 'Accept': 'application/json'
176
+ },
177
+ body: formBody
178
+ }, 'openai-qwen-oauth');
179
+
180
+ const data = await response.json();
181
+
182
+ if (response.ok && data.access_token) {
183
+ // 成功获取令牌
184
+ console.log(`${QWEN_OAUTH_CONFIG.logPrefix} 成功获取令牌 [${taskId}]`);
185
+
186
+ // 如果指定了保存到 configs 目录
187
+ if (options.saveToConfigs) {
188
+ const targetDir = path.join(process.cwd(), 'configs', options.providerDir);
189
+ await fs.promises.mkdir(targetDir, { recursive: true });
190
+ const timestamp = Date.now();
191
+ const filename = `${timestamp}_oauth_creds.json`;
192
+ credPath = path.join(targetDir, filename);
193
+ }
194
+
195
+ // 保存令牌到文件
196
+ await fs.promises.mkdir(path.dirname(credPath), { recursive: true });
197
+ await fs.promises.writeFile(credPath, JSON.stringify(data, null, 2));
198
+ console.log(`${QWEN_OAUTH_CONFIG.logPrefix} 令牌已保存到 ${credPath}`);
199
+
200
+ const relativePath = path.relative(process.cwd(), credPath);
201
+
202
+ // 清理任务
203
+ activePollingTasks.delete(taskId);
204
+
205
+ // 广播授权成功事件
206
+ broadcastEvent('oauth_success', {
207
+ provider: 'openai-qwen-oauth',
208
+ credPath: credPath,
209
+ relativePath: relativePath,
210
+ timestamp: new Date().toISOString()
211
+ });
212
+
213
+ // 自动关联新生成的凭据到 Pools
214
+ await autoLinkProviderConfigs(CONFIG);
215
+
216
+ return data;
217
+ }
218
+
219
+ // 检查错误类型
220
+ if (data.error === 'authorization_pending') {
221
+ // 用户尚未完成授权,继续轮询
222
+ console.log(`${QWEN_OAUTH_CONFIG.logPrefix} 等待用户授权 [${taskId}]... (第 ${attempts}/${maxAttempts} 次尝试)`);
223
+ await new Promise(resolve => setTimeout(resolve, interval * 1000));
224
+ return poll();
225
+ } else if (data.error === 'slow_down') {
226
+ // 需要降低轮询频率
227
+ console.log(`${QWEN_OAUTH_CONFIG.logPrefix} 降低轮询频率`);
228
+ await new Promise(resolve => setTimeout(resolve, (interval + 5) * 1000));
229
+ return poll();
230
+ } else if (data.error === 'expired_token') {
231
+ activePollingTasks.delete(taskId);
232
+ throw new Error('设备代码已过期,请重新开始授权流程');
233
+ } else if (data.error === 'access_denied') {
234
+ activePollingTasks.delete(taskId);
235
+ throw new Error('用户拒绝了授权请求');
236
+ } else {
237
+ activePollingTasks.delete(taskId);
238
+ throw new Error(`授权失败: ${data.error || '未知错误'}`);
239
+ }
240
+ } catch (error) {
241
+ if (error.message.includes('授权') || error.message.includes('过期') || error.message.includes('拒绝')) {
242
+ throw error;
243
+ }
244
+ console.error(`${QWEN_OAUTH_CONFIG.logPrefix} 轮询出错:`, error);
245
+ // 网络错误,继续重试
246
+ await new Promise(resolve => setTimeout(resolve, interval * 1000));
247
+ return poll();
248
+ }
249
+ };
250
+
251
+ return poll();
252
+ }
253
+
254
+ /**
255
+ * 处理 Qwen OAuth 授权(设备授权流程)
256
+ * @param {Object} currentConfig - 当前配置对象
257
+ * @param {Object} options - 额外选项
258
+ * @returns {Promise<Object>} 返回授权URL和相关信息
259
+ */
260
+ export async function handleQwenOAuth(currentConfig, options = {}) {
261
+ const codeVerifier = generateCodeVerifier();
262
+ const codeChallenge = generateCodeChallenge(codeVerifier);
263
+
264
+ const bodyData = {
265
+ client_id: QWEN_OAUTH_CONFIG.clientId,
266
+ scope: QWEN_OAUTH_CONFIG.scope,
267
+ code_challenge: codeChallenge,
268
+ code_challenge_method: 'S256'
269
+ };
270
+
271
+ const formBody = Object.entries(bodyData)
272
+ .map(([key, value]) => `${encodeURIComponent(key)}=${encodeURIComponent(value)}`)
273
+ .join('&');
274
+
275
+ try {
276
+ const response = await fetchWithProxy(QWEN_OAUTH_CONFIG.deviceCodeEndpoint, {
277
+ method: 'POST',
278
+ headers: {
279
+ 'Content-Type': 'application/x-www-form-urlencoded',
280
+ 'Accept': 'application/json'
281
+ },
282
+ body: formBody
283
+ }, 'openai-qwen-oauth');
284
+
285
+ if (!response.ok) {
286
+ throw new Error(`Qwen OAuth请求失败: ${response.status} ${response.statusText}`);
287
+ }
288
+
289
+ const deviceAuth = await response.json();
290
+
291
+ if (!deviceAuth.device_code || !deviceAuth.verification_uri_complete) {
292
+ throw new Error('Qwen OAuth响应格式错误,缺少必要字段');
293
+ }
294
+
295
+ // 启动后台轮询获取令牌
296
+ const interval = 5;
297
+ // const expiresIn = deviceAuth.expires_in || 1800;
298
+ const expiresIn = 300;
299
+
300
+ // 生成唯一的任务ID
301
+ const taskId = `qwen-${deviceAuth.device_code.substring(0, 8)}-${Date.now()}`;
302
+
303
+ // 先停止之前可能存在的所有 Qwen 轮询任务
304
+ for (const [existingTaskId] of activePollingTasks.entries()) {
305
+ if (existingTaskId.startsWith('qwen-')) {
306
+ stopPollingTask(existingTaskId);
307
+ }
308
+ }
309
+
310
+ // 不等待轮询完成,立即返回授权信息
311
+ pollQwenToken(deviceAuth.device_code, codeVerifier, interval, expiresIn, taskId, options)
312
+ .catch(error => {
313
+ console.error(`${QWEN_OAUTH_CONFIG.logPrefix} 轮询失败 [${taskId}]:`, error);
314
+ // 广播授权失败事件
315
+ broadcastEvent('oauth_error', {
316
+ provider: 'openai-qwen-oauth',
317
+ error: error.message,
318
+ timestamp: new Date().toISOString()
319
+ });
320
+ });
321
+
322
+ return {
323
+ authUrl: deviceAuth.verification_uri_complete,
324
+ authInfo: {
325
+ provider: 'openai-qwen-oauth',
326
+ deviceCode: deviceAuth.device_code,
327
+ userCode: deviceAuth.user_code,
328
+ verificationUri: deviceAuth.verification_uri,
329
+ verificationUriComplete: deviceAuth.verification_uri_complete,
330
+ expiresIn: expiresIn,
331
+ interval: interval,
332
+ codeVerifier: codeVerifier
333
+ }
334
+ };
335
+ } catch (error) {
336
+ console.error(`${QWEN_OAUTH_CONFIG.logPrefix} 请求失败:`, error);
337
+ throw new Error(`Qwen OAuth 授权失败: ${error.message}`);
338
+ }
339
+ }
src/convert/convert-old.js ADDED
The diff for this file is too large to render. See raw diff
 
src/convert/convert.js ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * 协议转换模块 - 新架构版本
3
+ * 使用重构后的转换器架构
4
+ *
5
+ * 这个文件展示了如何使用新的转换器架构
6
+ * 可以逐步替换原有的 convert.js
7
+ */
8
+
9
+ import { v4 as uuidv4 } from 'uuid';
10
+ import { MODEL_PROTOCOL_PREFIX, getProtocolPrefix } from '../utils/common.js';
11
+ import { ConverterFactory } from '../converters/ConverterFactory.js';
12
+ import {
13
+ generateResponseCreated,
14
+ generateResponseInProgress,
15
+ generateOutputItemAdded,
16
+ generateContentPartAdded,
17
+ generateOutputTextDone,
18
+ generateContentPartDone,
19
+ generateOutputItemDone,
20
+ generateResponseCompleted
21
+ } from '../providers/openai/openai-responses-core.mjs';
22
+
23
+ // =============================================================================
24
+ // 初始化:注册所有转换器
25
+ // =============================================================================
26
+
27
+ // =============================================================================
28
+ // 主转换函数
29
+ // =============================================================================
30
+
31
+ /**
32
+ * 通用数据转换函数(新架构版本)
33
+ * @param {object} data - 要转换的数据(请求体或响应)
34
+ * @param {string} type - 转换类型:'request', 'response', 'streamChunk', 'modelList'
35
+ * @param {string} fromProvider - 源模型提供商
36
+ * @param {string} toProvider - 目标模型提供商
37
+ * @param {string} [model] - 可选的模型名称(用于响应转换)
38
+ * @returns {object} 转换后的数据
39
+ * @throws {Error} 如果找不到合适的转换函数
40
+ */
41
+ export function convertData(data, type, fromProvider, toProvider, model) {
42
+ try {
43
+ // 获取协议前缀
44
+ const fromProtocol = getProtocolPrefix(fromProvider);
45
+ const toProtocol = getProtocolPrefix(toProvider);
46
+
47
+ // 如果目标协议为 forward,直接返回原始数据,无需转换
48
+ if (toProtocol === MODEL_PROTOCOL_PREFIX.FORWARD || fromProtocol === MODEL_PROTOCOL_PREFIX.FORWARD) {
49
+ console.log(`[Convert] Target protocol is forward, skipping conversion`);
50
+ return data;
51
+ }
52
+
53
+ // 从工厂获取转换器
54
+ const converter = ConverterFactory.getConverter(fromProtocol);
55
+
56
+ if (!converter) {
57
+ throw new Error(`No converter found for protocol: ${fromProtocol}`);
58
+ }
59
+
60
+ // 根据类型调用相应的转换方法
61
+ switch (type) {
62
+ case 'request':
63
+ return converter.convertRequest(data, toProtocol);
64
+
65
+ case 'response':
66
+ return converter.convertResponse(data, toProtocol, model);
67
+
68
+ case 'streamChunk':
69
+ return converter.convertStreamChunk(data, toProtocol, model);
70
+
71
+ case 'modelList':
72
+ return converter.convertModelList(data, toProtocol);
73
+
74
+ default:
75
+ throw new Error(`Unsupported conversion type: ${type}`);
76
+ }
77
+ } catch (error) {
78
+ console.error(`Conversion error: ${error.message}`);
79
+ throw error;
80
+ }
81
+ }
82
+
83
+ // =============================================================================
84
+ // 向后兼容的导出函数
85
+ // =============================================================================
86
+
87
+ /**
88
+ * 以下函数保持与原有API的兼容性
89
+ * 内部使用新的转换器架构
90
+ */
91
+
92
+ // OpenAI 相关转换
93
+ export function toOpenAIRequestFromGemini(geminiRequest) {
94
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.GEMINI);
95
+ return converter.toOpenAIRequest(geminiRequest);
96
+ }
97
+
98
+ export function toOpenAIRequestFromClaude(claudeRequest) {
99
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.CLAUDE);
100
+ return converter.toOpenAIRequest(claudeRequest);
101
+ }
102
+
103
+ export function toOpenAIChatCompletionFromGemini(geminiResponse, model) {
104
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.GEMINI);
105
+ return converter.toOpenAIResponse(geminiResponse, model);
106
+ }
107
+
108
+ export function toOpenAIChatCompletionFromClaude(claudeResponse, model) {
109
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.CLAUDE);
110
+ return converter.toOpenAIResponse(claudeResponse, model);
111
+ }
112
+
113
+ export function toOpenAIStreamChunkFromGemini(geminiChunk, model) {
114
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.GEMINI);
115
+ return converter.toOpenAIStreamChunk(geminiChunk, model);
116
+ }
117
+
118
+ export function toOpenAIStreamChunkFromClaude(claudeChunk, model) {
119
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.CLAUDE);
120
+ return converter.toOpenAIStreamChunk(claudeChunk, model);
121
+ }
122
+
123
+ export function toOpenAIModelListFromGemini(geminiModels) {
124
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.GEMINI);
125
+ return converter.toOpenAIModelList(geminiModels);
126
+ }
127
+
128
+ export function toOpenAIModelListFromClaude(claudeModels) {
129
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.CLAUDE);
130
+ return converter.toOpenAIModelList(claudeModels);
131
+ }
132
+
133
+ // Claude 相关转换
134
+ export function toClaudeRequestFromOpenAI(openaiRequest) {
135
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.OPENAI);
136
+ return converter.toClaudeRequest(openaiRequest);
137
+ }
138
+
139
+ export function toClaudeRequestFromOpenAIResponses(responsesRequest) {
140
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.OPENAI_RESPONSES);
141
+ return converter.toClaudeRequest(responsesRequest);
142
+ }
143
+
144
+ export function toClaudeChatCompletionFromOpenAI(openaiResponse, model) {
145
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.OPENAI);
146
+ return converter.toClaudeResponse(openaiResponse, model);
147
+ }
148
+
149
+ export function toClaudeChatCompletionFromGemini(geminiResponse, model) {
150
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.GEMINI);
151
+ return converter.toClaudeResponse(geminiResponse, model);
152
+ }
153
+
154
+ export function toClaudeStreamChunkFromOpenAI(openaiChunk, model) {
155
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.OPENAI);
156
+ return converter.toClaudeStreamChunk(openaiChunk, model);
157
+ }
158
+
159
+ export function toClaudeStreamChunkFromGemini(geminiChunk, model) {
160
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.GEMINI);
161
+ return converter.toClaudeStreamChunk(geminiChunk, model);
162
+ }
163
+
164
+ export function toClaudeModelListFromOpenAI(openaiModels) {
165
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.OPENAI);
166
+ return converter.toClaudeModelList(openaiModels);
167
+ }
168
+
169
+ export function toClaudeModelListFromGemini(geminiModels) {
170
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.GEMINI);
171
+ return converter.toClaudeModelList(geminiModels);
172
+ }
173
+
174
+ // Gemini 相关转换
175
+ export function toGeminiRequestFromOpenAI(openaiRequest) {
176
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.OPENAI);
177
+ return converter.toGeminiRequest(openaiRequest);
178
+ }
179
+
180
+ export function toGeminiRequestFromClaude(claudeRequest) {
181
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.CLAUDE);
182
+ return converter.toGeminiRequest(claudeRequest);
183
+ }
184
+
185
+ export function toGeminiRequestFromOpenAIResponses(responsesRequest) {
186
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.OPENAI_RESPONSES);
187
+ return converter.toGeminiRequest(responsesRequest);
188
+ }
189
+
190
+ // OpenAI Responses 相关转换
191
+ export function toOpenAIResponsesFromOpenAI(openaiResponse, model) {
192
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.OPENAI);
193
+ return converter.toOpenAIResponsesResponse(openaiResponse, model);
194
+ }
195
+
196
+ export function toOpenAIResponsesFromClaude(claudeResponse, model) {
197
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.CLAUDE);
198
+ return converter.toOpenAIResponsesResponse(claudeResponse, model);
199
+ }
200
+
201
+ export function toOpenAIResponsesFromGemini(geminiResponse, model) {
202
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.GEMINI);
203
+ return converter.toOpenAIResponsesResponse(geminiResponse, model);
204
+ }
205
+
206
+ export function toOpenAIResponsesStreamChunkFromOpenAI(openaiChunk, model, requestId) {
207
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.OPENAI);
208
+ return converter.toOpenAIResponsesStreamChunk(openaiChunk, model, requestId);
209
+ }
210
+
211
+ export function toOpenAIResponsesStreamChunkFromClaude(claudeChunk, model, requestId) {
212
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.CLAUDE);
213
+ return converter.toOpenAIResponsesStreamChunk(claudeChunk, model, requestId);
214
+ }
215
+
216
+ export function toOpenAIResponsesStreamChunkFromGemini(geminiChunk, model, requestId) {
217
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.GEMINI);
218
+ return converter.toOpenAIResponsesStreamChunk(geminiChunk, model, requestId);
219
+ }
220
+
221
+ // 从 OpenAI Responses 转换到其他格式
222
+ export function toOpenAIRequestFromOpenAIResponses(responsesRequest) {
223
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.OPENAI_RESPONSES);
224
+ return converter.toOpenAIRequest(responsesRequest);
225
+ }
226
+
227
+ export function toOpenAIChatCompletionFromOpenAIResponses(responsesResponse, model) {
228
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.OPENAI_RESPONSES);
229
+ return converter.toOpenAIResponse(responsesResponse, model);
230
+ }
231
+
232
+ export function toOpenAIStreamChunkFromOpenAIResponses(responsesChunk, model) {
233
+ const converter = ConverterFactory.getConverter(MODEL_PROTOCOL_PREFIX.OPENAI_RESPONSES);
234
+ return converter.toOpenAIStreamChunk(responsesChunk, model);
235
+ }
236
+
237
+ // 辅助函数导出
238
+ export async function extractAndProcessSystemMessages(messages) {
239
+ const { Utils } = await import('../converters/utils.js');
240
+ return Utils.extractSystemMessages(messages);
241
+ }
242
+
243
+ export async function extractTextFromMessageContent(content) {
244
+ const { Utils } = await import('../converters/utils.js');
245
+ return Utils.extractText(content);
246
+ }
247
+
248
+ // =============================================================================
249
+ // 工具函数
250
+ // =============================================================================
251
+
252
+ /**
253
+ * 获取所有已注册的协议
254
+ * @returns {Array<string>} 协议前缀数组
255
+ */
256
+ export function getRegisteredProtocols() {
257
+ return ConverterFactory.getRegisteredProtocols();
258
+ }
259
+
260
+ /**
261
+ * 检查协议是否已注册
262
+ * @param {string} protocol - 协议前缀
263
+ * @returns {boolean} 是否已注册
264
+ */
265
+ export function isProtocolRegistered(protocol) {
266
+ return ConverterFactory.isProtocolRegistered(protocol);
267
+ }
268
+
269
+ /**
270
+ * 清除所有转换器缓存
271
+ */
272
+ export function clearConverterCache() {
273
+ ConverterFactory.clearCache();
274
+ }
275
+
276
+ /**
277
+ * 获取转换器实例(用于高级用法)
278
+ * @param {string} protocol - 协议前缀
279
+ * @returns {BaseConverter} 转换器实例
280
+ */
281
+ export function getConverter(protocol) {
282
+ return ConverterFactory.getConverter(protocol);
283
+ }
284
+
285
+ // =============================================================================
286
+ // 辅助函数 - 从原 convert.js 迁移
287
+ // =============================================================================
288
+
289
+ /**
290
+ * 生成 OpenAI 流式响应的停止块
291
+ * @param {string} model - 模型名称
292
+ * @returns {Object} OpenAI 流式停止块
293
+ */
294
+ export function getOpenAIStreamChunkStop(model) {
295
+ return {
296
+ id: `chatcmpl-${uuidv4()}`,
297
+ object: "chat.completion.chunk",
298
+ created: Math.floor(Date.now() / 1000),
299
+ model: model,
300
+ system_fingerprint: "",
301
+ choices: [{
302
+ index: 0,
303
+ delta: {
304
+ content: "",
305
+ reasoning_content: ""
306
+ },
307
+ finish_reason: 'stop',
308
+ message: {
309
+ content: "",
310
+ reasoning_content: ""
311
+ }
312
+ }],
313
+ usage:{
314
+ prompt_tokens: 0,
315
+ completion_tokens: 0,
316
+ total_tokens: 0,
317
+ },
318
+ };
319
+ }
320
+
321
+ /**
322
+ * 生成 OpenAI Responses 流式响应的开始事件
323
+ * @param {string} id - 响应 ID
324
+ * @param {string} model - 模型名称
325
+ * @returns {Array} 开始事件数组
326
+ */
327
+ export function getOpenAIResponsesStreamChunkBegin(id, model) {
328
+ return [
329
+ generateResponseCreated(id, model),
330
+ generateResponseInProgress(id),
331
+ generateOutputItemAdded(id),
332
+ generateContentPartAdded(id)
333
+ ];
334
+ }
335
+
336
+ /**
337
+ * 生成 OpenAI Responses 流式响应的结束事件
338
+ * @param {string} id - 响应 ID
339
+ * @returns {Array} 结束事件数组
340
+ */
341
+ export function getOpenAIResponsesStreamChunkEnd(id) {
342
+ return [
343
+ generateOutputTextDone(id),
344
+ generateContentPartDone(id),
345
+ generateOutputItemDone(id),
346
+ generateResponseCompleted(id)
347
+ ];
348
+ }
349
+
350
+ // =============================================================================
351
+ // 默认导出
352
+ // =============================================================================
353
+
354
+ export default {
355
+ convertData,
356
+ getRegisteredProtocols,
357
+ isProtocolRegistered,
358
+ clearConverterCache,
359
+ getConverter,
360
+ // 向后兼容的函数
361
+ toOpenAIRequestFromGemini,
362
+ toOpenAIRequestFromClaude,
363
+ toOpenAIChatCompletionFromGemini,
364
+ toOpenAIChatCompletionFromClaude,
365
+ toOpenAIStreamChunkFromGemini,
366
+ toOpenAIStreamChunkFromClaude,
367
+ toOpenAIModelListFromGemini,
368
+ toOpenAIModelListFromClaude,
369
+ toClaudeRequestFromOpenAI,
370
+ toClaudeChatCompletionFromOpenAI,
371
+ toClaudeChatCompletionFromGemini,
372
+ toClaudeStreamChunkFromOpenAI,
373
+ toClaudeStreamChunkFromGemini,
374
+ toClaudeModelListFromOpenAI,
375
+ toClaudeModelListFromGemini,
376
+ toGeminiRequestFromOpenAI,
377
+ toGeminiRequestFromClaude,
378
+ toOpenAIResponsesFromOpenAI,
379
+ toOpenAIResponsesFromClaude,
380
+ toOpenAIResponsesFromGemini,
381
+ toOpenAIResponsesStreamChunkFromOpenAI,
382
+ toOpenAIResponsesStreamChunkFromClaude,
383
+ toOpenAIResponsesStreamChunkFromGemini,
384
+ toOpenAIRequestFromOpenAIResponses,
385
+ toOpenAIChatCompletionFromOpenAIResponses,
386
+ toOpenAIStreamChunkFromOpenAIResponses,
387
+ toClaudeRequestFromOpenAIResponses,
388
+ toGeminiRequestFromOpenAIResponses,
389
+ };
src/converters/BaseConverter.js ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * 转换器基类
3
+ * 使用策略模式定义转换器的通用接口
4
+ */
5
+
6
+ /**
7
+ * 抽象转换器基类
8
+ * 所有具体的协议转换器都应继承此类
9
+ */
10
+ export class BaseConverter {
11
+ constructor(protocolName) {
12
+ if (new.target === BaseConverter) {
13
+ throw new Error('BaseConverter是抽象类,不能直接实例化');
14
+ }
15
+ this.protocolName = protocolName;
16
+ }
17
+
18
+ /**
19
+ * 转换请求
20
+ * @param {Object} data - 请求数据
21
+ * @param {string} targetProtocol - 目标协议
22
+ * @returns {Object} 转换后的请求
23
+ */
24
+ convertRequest(data, targetProtocol) {
25
+ throw new Error('convertRequest方法必须被子类实现');
26
+ }
27
+
28
+ /**
29
+ * 转换响应
30
+ * @param {Object} data - 响应数据
31
+ * @param {string} targetProtocol - 目标协议
32
+ * @param {string} model - 模型名称
33
+ * @returns {Object} 转换后的响应
34
+ */
35
+ convertResponse(data, targetProtocol, model) {
36
+ throw new Error('convertResponse方法必须被子类实现');
37
+ }
38
+
39
+ /**
40
+ * 转换流式响应块
41
+ * @param {Object} chunk - 流式响应块
42
+ * @param {string} targetProtocol - 目标协议
43
+ * @param {string} model - 模型名称
44
+ * @returns {Object} 转换后的流式响应块
45
+ */
46
+ convertStreamChunk(chunk, targetProtocol, model) {
47
+ throw new Error('convertStreamChunk方法必须被子类实现');
48
+ }
49
+
50
+ /**
51
+ * 转换模型列表
52
+ * @param {Object} data - 模型列表数据
53
+ * @param {string} targetProtocol - 目标协议
54
+ * @returns {Object} 转换后的模型列表
55
+ */
56
+ convertModelList(data, targetProtocol) {
57
+ throw new Error('convertModelList方法必须被子类实现');
58
+ }
59
+
60
+ /**
61
+ * 获取协议名称
62
+ * @returns {string} 协议名称
63
+ */
64
+ getProtocolName() {
65
+ return this.protocolName;
66
+ }
67
+ }
68
+
69
+ /**
70
+ * 内容处理器接口
71
+ * 用于处理不同类型的内容(文本、图片、音频等)
72
+ */
73
+ export class ContentProcessor {
74
+ /**
75
+ * 处理内容
76
+ * @param {*} content - 内容数据
77
+ * @returns {*} 处理后的内容
78
+ */
79
+ process(content) {
80
+ throw new Error('process方法必须被子类实现');
81
+ }
82
+ }
83
+
84
+ /**
85
+ * 工具处理器接口
86
+ * 用于处理工具调用相关的转换
87
+ */
88
+ export class ToolProcessor {
89
+ /**
90
+ * 处理工具定义
91
+ * @param {Array} tools - 工具定义数组
92
+ * @returns {Array} 处理后的工具定义
93
+ */
94
+ processToolDefinitions(tools) {
95
+ throw new Error('processToolDefinitions方法必须被子类实现');
96
+ }
97
+
98
+ /**
99
+ * 处理工具调用
100
+ * @param {Object} toolCall - 工具调用数据
101
+ * @returns {Object} 处理后的工具调用
102
+ */
103
+ processToolCall(toolCall) {
104
+ throw new Error('processToolCall方法必须被子类实现');
105
+ }
106
+
107
+ /**
108
+ * 处理工具结果
109
+ * @param {Object} toolResult - 工具结果数据
110
+ * @returns {Object} 处理后的工具结果
111
+ */
112
+ processToolResult(toolResult) {
113
+ throw new Error('processToolResult方法必须被子类实现');
114
+ }
115
+ }
src/converters/ConverterFactory.js ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * 转换器工厂类
3
+ * 使用工厂模式管理转换器实例的创建和缓存
4
+ */
5
+
6
+ import { MODEL_PROTOCOL_PREFIX } from '../utils/common.js';
7
+
8
+ /**
9
+ * 转换器工厂(单例模式 + 工厂模式)
10
+ */
11
+ export class ConverterFactory {
12
+ // 私有静态属性:存储转换器实例
13
+ static #converters = new Map();
14
+
15
+ // 私有静态属性:存储转换器类
16
+ static #converterClasses = new Map();
17
+
18
+ /**
19
+ * 注册转换器类
20
+ * @param {string} protocolPrefix - 协议前缀
21
+ * @param {Class} ConverterClass - 转换器类
22
+ */
23
+ static registerConverter(protocolPrefix, ConverterClass) {
24
+ this.#converterClasses.set(protocolPrefix, ConverterClass);
25
+ }
26
+
27
+ /**
28
+ * 获取转换器实例(带缓存)
29
+ * @param {string} protocolPrefix - 协议前缀
30
+ * @returns {BaseConverter} 转换器实例
31
+ */
32
+ static getConverter(protocolPrefix) {
33
+ // 检查缓存
34
+ if (this.#converters.has(protocolPrefix)) {
35
+ return this.#converters.get(protocolPrefix);
36
+ }
37
+
38
+ // 创建新实例
39
+ const converter = this.createConverter(protocolPrefix);
40
+
41
+ // 缓存实例
42
+ if (converter) {
43
+ this.#converters.set(protocolPrefix, converter);
44
+ }
45
+
46
+ return converter;
47
+ }
48
+
49
+ /**
50
+ * 创建转换器实例
51
+ * @param {string} protocolPrefix - 协议前缀
52
+ * @returns {BaseConverter} 转换器实例
53
+ */
54
+ static createConverter(protocolPrefix) {
55
+ const ConverterClass = this.#converterClasses.get(protocolPrefix);
56
+
57
+ if (!ConverterClass) {
58
+ throw new Error(`No converter registered for protocol: ${protocolPrefix}`);
59
+ }
60
+
61
+ return new ConverterClass();
62
+ }
63
+
64
+ /**
65
+ * 清除所有缓存的转换器
66
+ */
67
+ static clearCache() {
68
+ this.#converters.clear();
69
+ }
70
+
71
+ /**
72
+ * 清除特定协议的转换器缓存
73
+ * @param {string} protocolPrefix - 协议前缀
74
+ */
75
+ static clearConverterCache(protocolPrefix) {
76
+ this.#converters.delete(protocolPrefix);
77
+ }
78
+
79
+ /**
80
+ * 获取所有已注册的协议
81
+ * @returns {Array<string>} 协议前缀数组
82
+ */
83
+ static getRegisteredProtocols() {
84
+ return Array.from(this.#converterClasses.keys());
85
+ }
86
+
87
+ /**
88
+ * 检查协议是否已注册
89
+ * @param {string} protocolPrefix - 协议前缀
90
+ * @returns {boolean} 是否已注册
91
+ */
92
+ static isProtocolRegistered(protocolPrefix) {
93
+ return this.#converterClasses.has(protocolPrefix);
94
+ }
95
+ }
96
+
97
+ /**
98
+ * 内容处理器工厂
99
+ */
100
+ export class ContentProcessorFactory {
101
+ static #processors = new Map();
102
+
103
+ /**
104
+ * 获取内容处理器
105
+ * @param {string} sourceFormat - 源格式
106
+ * @param {string} targetFormat - 目标格式
107
+ * @returns {ContentProcessor} 内容处理器实例
108
+ */
109
+ static getProcessor(sourceFormat, targetFormat) {
110
+ const key = `${sourceFormat}_to_${targetFormat}`;
111
+
112
+ if (!this.#processors.has(key)) {
113
+ this.#processors.set(key, this.createProcessor(sourceFormat, targetFormat));
114
+ }
115
+
116
+ return this.#processors.get(key);
117
+ }
118
+
119
+ /**
120
+ * 创建内容处理器
121
+ * @param {string} sourceFormat - 源格式
122
+ * @param {string} targetFormat - 目标格式
123
+ * @returns {ContentProcessor} 内容处理器实例
124
+ */
125
+ static createProcessor(sourceFormat, targetFormat) {
126
+ // 这里返回null,实际使用时需要导入具体的处理器类
127
+ // 为了避免循环依赖,处理器类应该在使用时动态导入
128
+ console.warn(`Content processor for ${sourceFormat} to ${targetFormat} not yet implemented`);
129
+ return null;
130
+ }
131
+
132
+ /**
133
+ * 清除所有缓存的处理器
134
+ */
135
+ static clearCache() {
136
+ this.#processors.clear();
137
+ }
138
+ }
139
+
140
+ /**
141
+ * 工具处理器工厂
142
+ */
143
+ export class ToolProcessorFactory {
144
+ static #processors = new Map();
145
+
146
+ /**
147
+ * 获取工具处理器
148
+ * @param {string} sourceFormat - 源格式
149
+ * @param {string} targetFormat - 目标格式
150
+ * @returns {ToolProcessor} 工具处理器实例
151
+ */
152
+ static getProcessor(sourceFormat, targetFormat) {
153
+ const key = `${sourceFormat}_to_${targetFormat}`;
154
+
155
+ if (!this.#processors.has(key)) {
156
+ this.#processors.set(key, this.createProcessor(sourceFormat, targetFormat));
157
+ }
158
+
159
+ return this.#processors.get(key);
160
+ }
161
+
162
+ /**
163
+ * 创建工具处理器
164
+ * @param {string} sourceFormat - 源格式
165
+ * @param {string} targetFormat - 目标格式
166
+ * @returns {ToolProcessor} 工具处理器实例
167
+ */
168
+ static createProcessor(sourceFormat, targetFormat) {
169
+ console.warn(`Tool processor for ${sourceFormat} to ${targetFormat} not yet implemented`);
170
+ return null;
171
+ }
172
+
173
+ /**
174
+ * 清除所有缓存的处理器
175
+ */
176
+ static clearCache() {
177
+ this.#processors.clear();
178
+ }
179
+ }
180
+
181
+ // 导出工厂类
182
+ export default ConverterFactory;
src/converters/register-converters.js ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * 转换器注册模块
3
+ * 用于注册所有转换器到工厂,避免循环依赖问题
4
+ */
5
+
6
+ import { MODEL_PROTOCOL_PREFIX } from '../utils/common.js';
7
+ import { ConverterFactory } from './ConverterFactory.js';
8
+ import { OpenAIConverter } from './strategies/OpenAIConverter.js';
9
+ import { OpenAIResponsesConverter } from './strategies/OpenAIResponsesConverter.js';
10
+ import { ClaudeConverter } from './strategies/ClaudeConverter.js';
11
+ import { GeminiConverter } from './strategies/GeminiConverter.js';
12
+ import { OllamaConverter } from './strategies/OllamaConverter.js';
13
+ import { CodexConverter } from './strategies/CodexConverter.js';
14
+
15
+ /**
16
+ * 注册所有转换器到工厂
17
+ * 此函数应在应用启动时调用一次
18
+ */
19
+ export function registerAllConverters() {
20
+ ConverterFactory.registerConverter(MODEL_PROTOCOL_PREFIX.OPENAI, OpenAIConverter);
21
+ ConverterFactory.registerConverter(MODEL_PROTOCOL_PREFIX.OPENAI_RESPONSES, OpenAIResponsesConverter);
22
+ ConverterFactory.registerConverter(MODEL_PROTOCOL_PREFIX.CLAUDE, ClaudeConverter);
23
+ ConverterFactory.registerConverter(MODEL_PROTOCOL_PREFIX.GEMINI, GeminiConverter);
24
+ ConverterFactory.registerConverter(MODEL_PROTOCOL_PREFIX.OLLAMA, OllamaConverter);
25
+ ConverterFactory.registerConverter(MODEL_PROTOCOL_PREFIX.CODEX, CodexConverter);
26
+ }
27
+
28
+ // 自动注册所有转换器
29
+ registerAllConverters();
src/converters/strategies/ClaudeConverter.js ADDED
@@ -0,0 +1,1652 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Claude转换器
3
+ * 处理Claude(Anthropic)协议与其他协议之间的转换
4
+ */
5
+
6
+ import { v4 as uuidv4 } from 'uuid';
7
+ import { BaseConverter } from '../BaseConverter.js';
8
+ import {
9
+ checkAndAssignOrDefault,
10
+ cleanJsonSchemaProperties as cleanJsonSchema,
11
+ determineReasoningEffortFromBudget,
12
+ OPENAI_DEFAULT_MAX_TOKENS,
13
+ OPENAI_DEFAULT_TEMPERATURE,
14
+ OPENAI_DEFAULT_TOP_P,
15
+ GEMINI_DEFAULT_MAX_TOKENS,
16
+ GEMINI_DEFAULT_TEMPERATURE,
17
+ GEMINI_DEFAULT_TOP_P,
18
+ GEMINI_DEFAULT_INPUT_TOKEN_LIMIT,
19
+ GEMINI_DEFAULT_OUTPUT_TOKEN_LIMIT
20
+ } from '../utils.js';
21
+ import { MODEL_PROTOCOL_PREFIX } from '../../utils/common.js';
22
+ import {
23
+ generateResponseCreated,
24
+ generateResponseInProgress,
25
+ generateOutputItemAdded,
26
+ generateContentPartAdded,
27
+ generateOutputTextDone,
28
+ generateContentPartDone,
29
+ generateOutputItemDone,
30
+ generateResponseCompleted
31
+ } from '../../providers/openai/openai-responses-core.mjs';
32
+
33
+ /**
34
+ * Claude转换器类
35
+ * 实现Claude协议到其他协议的转换
36
+ */
37
+ export class ClaudeConverter extends BaseConverter {
38
+ constructor() {
39
+ super('claude');
40
+ }
41
+
42
+ /**
43
+ * 转换请求
44
+ */
45
+ convertRequest(data, targetProtocol) {
46
+ switch (targetProtocol) {
47
+ case MODEL_PROTOCOL_PREFIX.OPENAI:
48
+ return this.toOpenAIRequest(data);
49
+ case MODEL_PROTOCOL_PREFIX.GEMINI:
50
+ return this.toGeminiRequest(data);
51
+ case MODEL_PROTOCOL_PREFIX.OPENAI_RESPONSES:
52
+ return this.toOpenAIResponsesRequest(data);
53
+ default:
54
+ throw new Error(`Unsupported target protocol: ${targetProtocol}`);
55
+ }
56
+ }
57
+
58
+ /**
59
+ * 转换响应
60
+ */
61
+ convertResponse(data, targetProtocol, model) {
62
+ switch (targetProtocol) {
63
+ case MODEL_PROTOCOL_PREFIX.OPENAI:
64
+ return this.toOpenAIResponse(data, model);
65
+ case MODEL_PROTOCOL_PREFIX.GEMINI:
66
+ return this.toGeminiResponse(data, model);
67
+ case MODEL_PROTOCOL_PREFIX.OPENAI_RESPONSES:
68
+ return this.toOpenAIResponsesResponse(data, model);
69
+ default:
70
+ throw new Error(`Unsupported target protocol: ${targetProtocol}`);
71
+ }
72
+ }
73
+
74
+ /**
75
+ * 转换流式响应块
76
+ */
77
+ convertStreamChunk(chunk, targetProtocol, model) {
78
+ switch (targetProtocol) {
79
+ case MODEL_PROTOCOL_PREFIX.OPENAI:
80
+ return this.toOpenAIStreamChunk(chunk, model);
81
+ case MODEL_PROTOCOL_PREFIX.GEMINI:
82
+ return this.toGeminiStreamChunk(chunk, model);
83
+ case MODEL_PROTOCOL_PREFIX.OPENAI_RESPONSES:
84
+ return this.toOpenAIResponsesStreamChunk(chunk, model);
85
+ default:
86
+ throw new Error(`Unsupported target protocol: ${targetProtocol}`);
87
+ }
88
+ }
89
+
90
+ /**
91
+ * 转换模型列表
92
+ */
93
+ convertModelList(data, targetProtocol) {
94
+ switch (targetProtocol) {
95
+ case MODEL_PROTOCOL_PREFIX.OPENAI:
96
+ return this.toOpenAIModelList(data);
97
+ case MODEL_PROTOCOL_PREFIX.GEMINI:
98
+ return this.toGeminiModelList(data);
99
+ default:
100
+ return data;
101
+ }
102
+ }
103
+
104
+ // =========================================================================
105
+ // Claude -> OpenAI 转换
106
+ // =========================================================================
107
+
108
+ /**
109
+ * Claude请求 -> OpenAI请求
110
+ */
111
+ toOpenAIRequest(claudeRequest) {
112
+ const openaiMessages = [];
113
+ let systemMessageContent = '';
114
+
115
+ // 添加系统消息
116
+ if (claudeRequest.system) {
117
+ systemMessageContent = claudeRequest.system;
118
+ }
119
+
120
+ // 处理消息
121
+ if (claudeRequest.messages && Array.isArray(claudeRequest.messages)) {
122
+ const tempOpenAIMessages = [];
123
+ for (const msg of claudeRequest.messages) {
124
+ const role = msg.role;
125
+
126
+ // 处理用户的工具结果消息
127
+ if (role === "user" && Array.isArray(msg.content)) {
128
+ const hasToolResult = msg.content.some(
129
+ item => item && typeof item === 'object' && item.type === "tool_result"
130
+ );
131
+
132
+ if (hasToolResult) {
133
+ for (const item of msg.content) {
134
+ if (item && typeof item === 'object' && item.type === "tool_result") {
135
+ const toolUseId = item.tool_use_id || item.id || "";
136
+ const contentStr = String(item.content || "");
137
+ tempOpenAIMessages.push({
138
+ role: "tool",
139
+ tool_call_id: toolUseId,
140
+ content: contentStr,
141
+ });
142
+ }
143
+ }
144
+ continue;
145
+ }
146
+ }
147
+
148
+ // 处理assistant消息中的工具调用
149
+ if (role === "assistant" && Array.isArray(msg.content) && msg.content.length > 0) {
150
+ const firstPart = msg.content[0];
151
+ if (firstPart.type === "tool_use") {
152
+ const funcName = firstPart.name || "";
153
+ const funcArgs = firstPart.input || {};
154
+ tempOpenAIMessages.push({
155
+ role: "assistant",
156
+ content: '',
157
+ tool_calls: [
158
+ {
159
+ id: firstPart.id || `call_${funcName}_1`,
160
+ type: "function",
161
+ function: {
162
+ name: funcName,
163
+ arguments: JSON.stringify(funcArgs)
164
+ },
165
+ index: firstPart.index || 0
166
+ }
167
+ ]
168
+ });
169
+ continue;
170
+ }
171
+ }
172
+
173
+ // 普通文本消息
174
+ const contentConverted = this.processClaudeContentToOpenAIContent(msg.content || "");
175
+ if (contentConverted && (Array.isArray(contentConverted) ? contentConverted.length > 0 : contentConverted.trim().length > 0)) {
176
+ tempOpenAIMessages.push({
177
+ role: role,
178
+ content: contentConverted
179
+ });
180
+ }
181
+ }
182
+
183
+ // OpenAI兼容性校验
184
+ const validatedMessages = [];
185
+ for (let idx = 0; idx < tempOpenAIMessages.length; idx++) {
186
+ const m = tempOpenAIMessages[idx];
187
+ if (m.role === "assistant" && m.tool_calls) {
188
+ const callIds = m.tool_calls.map(tc => tc.id).filter(id => id);
189
+ let unmatched = new Set(callIds);
190
+ for (let laterIdx = idx + 1; laterIdx < tempOpenAIMessages.length; laterIdx++) {
191
+ const later = tempOpenAIMessages[laterIdx];
192
+ if (later.role === "tool" && unmatched.has(later.tool_call_id)) {
193
+ unmatched.delete(later.tool_call_id);
194
+ }
195
+ if (unmatched.size === 0) break;
196
+ }
197
+ if (unmatched.size > 0) {
198
+ m.tool_calls = m.tool_calls.filter(tc => !unmatched.has(tc.id));
199
+ if (m.tool_calls.length === 0) {
200
+ delete m.tool_calls;
201
+ if (m.content === null) m.content = "";
202
+ }
203
+ }
204
+ }
205
+ validatedMessages.push(m);
206
+ }
207
+ openaiMessages.push(...validatedMessages);
208
+ }
209
+
210
+ const openaiRequest = {
211
+ model: claudeRequest.model,
212
+ messages: openaiMessages,
213
+ max_tokens: checkAndAssignOrDefault(claudeRequest.max_tokens, OPENAI_DEFAULT_MAX_TOKENS),
214
+ temperature: checkAndAssignOrDefault(claudeRequest.temperature, OPENAI_DEFAULT_TEMPERATURE),
215
+ top_p: checkAndAssignOrDefault(claudeRequest.top_p, OPENAI_DEFAULT_TOP_P),
216
+ stream: claudeRequest.stream,
217
+ };
218
+
219
+ // 处理工具
220
+ if (claudeRequest.tools) {
221
+ const openaiTools = [];
222
+ for (const tool of claudeRequest.tools) {
223
+ openaiTools.push({
224
+ type: "function",
225
+ function: {
226
+ name: tool.name || "",
227
+ description: tool.description || "",
228
+ parameters: cleanJsonSchema(tool.input_schema || {})
229
+ }
230
+ });
231
+ }
232
+ openaiRequest.tools = openaiTools;
233
+ openaiRequest.tool_choice = "auto";
234
+ }
235
+
236
+ // 处理thinking转换
237
+ if (claudeRequest.thinking && claudeRequest.thinking.type === "enabled") {
238
+ const budgetTokens = claudeRequest.thinking.budget_tokens;
239
+ const reasoningEffort = determineReasoningEffortFromBudget(budgetTokens);
240
+ openaiRequest.reasoning_effort = reasoningEffort;
241
+
242
+ let maxCompletionTokens = null;
243
+ if (claudeRequest.max_tokens !== undefined) {
244
+ maxCompletionTokens = claudeRequest.max_tokens;
245
+ delete openaiRequest.max_tokens;
246
+ } else {
247
+ const envMaxTokens = process.env.OPENAI_REASONING_MAX_TOKENS;
248
+ if (envMaxTokens) {
249
+ try {
250
+ maxCompletionTokens = parseInt(envMaxTokens, 10);
251
+ } catch (e) {
252
+ console.warn(`Invalid OPENAI_REASONING_MAX_TOKENS value '${envMaxTokens}'`);
253
+ }
254
+ }
255
+ if (!envMaxTokens) {
256
+ throw new Error("For OpenAI reasoning models, max_completion_tokens is required.");
257
+ }
258
+ }
259
+ openaiRequest.max_completion_tokens = maxCompletionTokens;
260
+ }
261
+
262
+ // 添加系统消息
263
+ if (systemMessageContent) {
264
+ let stringifiedSystemMessageContent = systemMessageContent;
265
+ if (Array.isArray(systemMessageContent)) {
266
+ stringifiedSystemMessageContent = systemMessageContent.map(item =>
267
+ typeof item === 'string' ? item : item.text).join('\n');
268
+ }
269
+ openaiRequest.messages.unshift({ role: 'system', content: stringifiedSystemMessageContent });
270
+ }
271
+
272
+ return openaiRequest;
273
+ }
274
+
275
+ /**
276
+ * Claude响应 -> OpenAI响应
277
+ */
278
+ toOpenAIResponse(claudeResponse, model) {
279
+ if (!claudeResponse || !claudeResponse.content || claudeResponse.content.length === 0) {
280
+ return {
281
+ id: `chatcmpl-${uuidv4()}`,
282
+ object: "chat.completion",
283
+ created: Math.floor(Date.now() / 1000),
284
+ model: model,
285
+ choices: [{
286
+ index: 0,
287
+ message: {
288
+ role: "assistant",
289
+ content: "",
290
+ },
291
+ finish_reason: "stop",
292
+ }],
293
+ usage: {
294
+ prompt_tokens: claudeResponse.usage?.input_tokens || 0,
295
+ completion_tokens: claudeResponse.usage?.output_tokens || 0,
296
+ total_tokens: (claudeResponse.usage?.input_tokens || 0) + (claudeResponse.usage?.output_tokens || 0),
297
+ },
298
+ };
299
+ }
300
+
301
+ // 检查是否包含 tool_use
302
+ const hasToolUse = claudeResponse.content.some(block => block && block.type === 'tool_use');
303
+
304
+ let message = {
305
+ role: "assistant",
306
+ content: null
307
+ };
308
+
309
+ if (hasToolUse) {
310
+ // 处理包含工具调用的响应
311
+ const toolCalls = [];
312
+ let textContent = '';
313
+
314
+ for (const block of claudeResponse.content) {
315
+ if (!block) continue;
316
+
317
+ if (block.type === 'text') {
318
+ textContent += block.text || '';
319
+ } else if (block.type === 'tool_use') {
320
+ toolCalls.push({
321
+ id: block.id || `call_${block.name}_${Date.now()}`,
322
+ type: "function",
323
+ function: {
324
+ name: block.name || '',
325
+ arguments: JSON.stringify(block.input || {})
326
+ }
327
+ });
328
+ }
329
+ }
330
+
331
+ message.content = textContent || null;
332
+ if (toolCalls.length > 0) {
333
+ message.tool_calls = toolCalls;
334
+ }
335
+ } else {
336
+ // 处理普通文本响应
337
+ message.content = this.processClaudeResponseContent(claudeResponse.content);
338
+ }
339
+
340
+ // 处理 finish_reason
341
+ let finishReason = 'stop';
342
+ if (claudeResponse.stop_reason === 'end_turn') {
343
+ finishReason = 'stop';
344
+ } else if (claudeResponse.stop_reason === 'max_tokens') {
345
+ finishReason = 'length';
346
+ } else if (claudeResponse.stop_reason === 'tool_use') {
347
+ finishReason = 'tool_calls';
348
+ } else if (claudeResponse.stop_reason) {
349
+ finishReason = claudeResponse.stop_reason;
350
+ }
351
+
352
+ return {
353
+ id: `chatcmpl-${uuidv4()}`,
354
+ object: "chat.completion",
355
+ created: Math.floor(Date.now() / 1000),
356
+ model: model,
357
+ choices: [{
358
+ index: 0,
359
+ message: message,
360
+ finish_reason: finishReason,
361
+ }],
362
+ usage: {
363
+ prompt_tokens: claudeResponse.usage?.input_tokens || 0,
364
+ completion_tokens: claudeResponse.usage?.output_tokens || 0,
365
+ total_tokens: (claudeResponse.usage?.input_tokens || 0) + (claudeResponse.usage?.output_tokens || 0),
366
+ cached_tokens: claudeResponse.usage?.cache_read_input_tokens || 0,
367
+ prompt_tokens_details: {
368
+ cached_tokens: claudeResponse.usage?.cache_read_input_tokens || 0
369
+ }
370
+ },
371
+ };
372
+ }
373
+
374
+ /**
375
+ * Claude流式响应 -> OpenAI流式响应
376
+ */
377
+ toOpenAIStreamChunk(claudeChunk, model) {
378
+ if (!claudeChunk) return null;
379
+
380
+ // 处理 Claude 流式事件
381
+ const chunkId = `chatcmpl-${uuidv4()}`;
382
+ const timestamp = Math.floor(Date.now() / 1000);
383
+
384
+ // message_start 事件
385
+ if (claudeChunk.type === 'message_start') {
386
+ return {
387
+ id: chunkId,
388
+ object: "chat.completion.chunk",
389
+ created: timestamp,
390
+ model: model,
391
+ system_fingerprint: "",
392
+ choices: [{
393
+ index: 0,
394
+ delta: {
395
+ role: "assistant",
396
+ content: ""
397
+ },
398
+ finish_reason: null
399
+ }],
400
+ usage: {
401
+ prompt_tokens: claudeChunk.message?.usage?.input_tokens || 0,
402
+ completion_tokens: 0,
403
+ total_tokens: claudeChunk.message?.usage?.input_tokens || 0,
404
+ cached_tokens: claudeChunk.message?.usage?.cache_read_input_tokens || 0
405
+ }
406
+ };
407
+ }
408
+
409
+ // content_block_start 事件
410
+ if (claudeChunk.type === 'content_block_start') {
411
+ const contentBlock = claudeChunk.content_block;
412
+
413
+ // 处理 tool_use 类型
414
+ if (contentBlock && contentBlock.type === 'tool_use') {
415
+ return {
416
+ id: chunkId,
417
+ object: "chat.completion.chunk",
418
+ created: timestamp,
419
+ model: model,
420
+ system_fingerprint: "",
421
+ choices: [{
422
+ index: 0,
423
+ delta: {
424
+ tool_calls: [{
425
+ index: claudeChunk.index || 0,
426
+ id: contentBlock.id,
427
+ type: "function",
428
+ function: {
429
+ name: contentBlock.name,
430
+ arguments: ""
431
+ }
432
+ }]
433
+ },
434
+ finish_reason: null
435
+ }]
436
+ };
437
+ }
438
+
439
+ // 处理 text 类型
440
+ return {
441
+ id: chunkId,
442
+ object: "chat.completion.chunk",
443
+ created: timestamp,
444
+ model: model,
445
+ system_fingerprint: "",
446
+ choices: [{
447
+ index: 0,
448
+ delta: {
449
+ content: ""
450
+ },
451
+ finish_reason: null
452
+ }]
453
+ };
454
+ }
455
+
456
+ // content_block_delta 事件
457
+ if (claudeChunk.type === 'content_block_delta') {
458
+ const delta = claudeChunk.delta;
459
+
460
+ // 处理 text_delta
461
+ if (delta && delta.type === 'text_delta') {
462
+ return {
463
+ id: chunkId,
464
+ object: "chat.completion.chunk",
465
+ created: timestamp,
466
+ model: model,
467
+ system_fingerprint: "",
468
+ choices: [{
469
+ index: 0,
470
+ delta: {
471
+ content: delta.text || ""
472
+ },
473
+ finish_reason: null
474
+ }]
475
+ };
476
+ }
477
+
478
+ // 处理 thinking_delta (推理内容)
479
+ if (delta && delta.type === 'thinking_delta') {
480
+ return {
481
+ id: chunkId,
482
+ object: "chat.completion.chunk",
483
+ created: timestamp,
484
+ model: model,
485
+ system_fingerprint: "",
486
+ choices: [{
487
+ index: 0,
488
+ delta: {
489
+ reasoning_content: delta.thinking || ""
490
+ },
491
+ finish_reason: null
492
+ }]
493
+ };
494
+ }
495
+
496
+ // 处理 input_json_delta (tool arguments)
497
+ if (delta && delta.type === 'input_json_delta') {
498
+ return {
499
+ id: chunkId,
500
+ object: "chat.completion.chunk",
501
+ created: timestamp,
502
+ model: model,
503
+ system_fingerprint: "",
504
+ choices: [{
505
+ index: 0,
506
+ delta: {
507
+ tool_calls: [{
508
+ index: claudeChunk.index || 0,
509
+ function: {
510
+ arguments: delta.partial_json || ""
511
+ }
512
+ }]
513
+ },
514
+ finish_reason: null
515
+ }]
516
+ };
517
+ }
518
+ }
519
+
520
+ // content_block_stop 事件
521
+ if (claudeChunk.type === 'content_block_stop') {
522
+ return {
523
+ id: chunkId,
524
+ object: "chat.completion.chunk",
525
+ created: timestamp,
526
+ model: model,
527
+ system_fingerprint: "",
528
+ choices: [{
529
+ index: 0,
530
+ delta: {},
531
+ finish_reason: null
532
+ }]
533
+ };
534
+ }
535
+
536
+ // message_delta 事件
537
+ if (claudeChunk.type === 'message_delta') {
538
+ const stopReason = claudeChunk.delta?.stop_reason;
539
+ const finishReason = stopReason === 'end_turn' ? 'stop' :
540
+ stopReason === 'max_tokens' ? 'length' :
541
+ stopReason === 'tool_use' ? 'tool_calls' :
542
+ stopReason || 'stop';
543
+
544
+ return {
545
+ id: chunkId,
546
+ object: "chat.completion.chunk",
547
+ created: timestamp,
548
+ model: model,
549
+ system_fingerprint: "",
550
+ choices: [{
551
+ index: 0,
552
+ delta: {},
553
+ finish_reason: finishReason
554
+ }],
555
+ usage: claudeChunk.usage ? {
556
+ prompt_tokens: claudeChunk.usage.input_tokens || 0,
557
+ completion_tokens: claudeChunk.usage.output_tokens || 0,
558
+ total_tokens: (claudeChunk.usage.input_tokens || 0) + (claudeChunk.usage.output_tokens || 0),
559
+ cached_tokens: claudeChunk.usage.cache_read_input_tokens || 0,
560
+ prompt_tokens_details: {
561
+ cached_tokens: claudeChunk.usage.cache_read_input_tokens || 0
562
+ }
563
+ } : undefined
564
+ };
565
+ }
566
+
567
+ // message_stop 事件
568
+ if (claudeChunk.type === 'message_stop') {
569
+ return {
570
+ id: chunkId,
571
+ object: "chat.completion.chunk",
572
+ created: timestamp,
573
+ model: model,
574
+ system_fingerprint: "",
575
+ choices: [{
576
+ index: 0,
577
+ delta: {},
578
+ finish_reason: 'stop'
579
+ }]
580
+ };
581
+ }
582
+
583
+ // 兼容旧格式:如果是字符串,直接作为文本内容
584
+ if (typeof claudeChunk === 'string') {
585
+ return {
586
+ id: chunkId,
587
+ object: "chat.completion.chunk",
588
+ created: timestamp,
589
+ model: model,
590
+ system_fingerprint: "",
591
+ choices: [{
592
+ index: 0,
593
+ delta: {
594
+ content: claudeChunk
595
+ },
596
+ finish_reason: null
597
+ }]
598
+ };
599
+ }
600
+
601
+ return null;
602
+ }
603
+
604
+ /**
605
+ * Claude模型列表 -> OpenAI模型列表
606
+ */
607
+ toOpenAIModelList(claudeModels) {
608
+ return {
609
+ object: "list",
610
+ data: claudeModels.models.map(m => {
611
+ const modelId = m.id || m.name;
612
+ return {
613
+ id: modelId,
614
+ object: "model",
615
+ created: Math.floor(Date.now() / 1000),
616
+ owned_by: "anthropic",
617
+ display_name: modelId,
618
+ };
619
+ }),
620
+ };
621
+ }
622
+
623
+ /**
624
+ * 将 Claude 模型列表转换为 Gemini 模型列表
625
+ */
626
+ toGeminiModelList(claudeModels) {
627
+ const models = claudeModels.models || [];
628
+ return {
629
+ models: models.map(m => ({
630
+ name: `models/${m.id || m.name}`,
631
+ version: m.version || "1.0.0",
632
+ displayName: m.displayName || m.id || m.name,
633
+ description: m.description || `A generative model for text and chat generation. ID: ${m.id || m.name}`,
634
+ inputTokenLimit: m.inputTokenLimit || GEMINI_DEFAULT_INPUT_TOKEN_LIMIT,
635
+ outputTokenLimit: m.outputTokenLimit || GEMINI_DEFAULT_OUTPUT_TOKEN_LIMIT,
636
+ supportedGenerationMethods: m.supportedGenerationMethods || ["generateContent", "streamGenerateContent"]
637
+ }))
638
+ };
639
+ }
640
+
641
+ /**
642
+ * 处理Claude内容到OpenAI格式
643
+ */
644
+ processClaudeContentToOpenAIContent(content) {
645
+ if (!content || !Array.isArray(content)) return [];
646
+
647
+ const contentArray = [];
648
+
649
+ content.forEach(block => {
650
+ if (!block) return;
651
+
652
+ switch (block.type) {
653
+ case 'text':
654
+ if (block.text) {
655
+ contentArray.push({
656
+ type: 'text',
657
+ text: block.text
658
+ });
659
+ }
660
+ break;
661
+
662
+ case 'image':
663
+ if (block.source && block.source.type === 'base64') {
664
+ contentArray.push({
665
+ type: 'image_url',
666
+ image_url: {
667
+ url: `data:${block.source.media_type};base64,${block.source.data}`
668
+ }
669
+ });
670
+ }
671
+ break;
672
+
673
+ case 'tool_use':
674
+ contentArray.push({
675
+ type: 'text',
676
+ text: `[Tool use: ${block.name}]`
677
+ });
678
+ break;
679
+
680
+ case 'tool_result':
681
+ contentArray.push({
682
+ type: 'text',
683
+ text: typeof block.content === 'string' ? block.content : JSON.stringify(block.content)
684
+ });
685
+ break;
686
+
687
+ default:
688
+ if (block.text) {
689
+ contentArray.push({
690
+ type: 'text',
691
+ text: block.text
692
+ });
693
+ }
694
+ }
695
+ });
696
+
697
+ return contentArray;
698
+ }
699
+
700
+ /**
701
+ * 处理Claude响应内容
702
+ */
703
+ processClaudeResponseContent(content) {
704
+ if (!content || !Array.isArray(content)) return '';
705
+
706
+ const contentArray = [];
707
+
708
+ content.forEach(block => {
709
+ if (!block) return;
710
+
711
+ switch (block.type) {
712
+ case 'text':
713
+ contentArray.push({
714
+ type: 'text',
715
+ text: block.text || ''
716
+ });
717
+ break;
718
+
719
+ case 'image':
720
+ if (block.source && block.source.type === 'base64') {
721
+ contentArray.push({
722
+ type: 'image_url',
723
+ image_url: {
724
+ url: `data:${block.source.media_type};base64,${block.source.data}`
725
+ }
726
+ });
727
+ }
728
+ break;
729
+
730
+ default:
731
+ if (block.text) {
732
+ contentArray.push({
733
+ type: 'text',
734
+ text: block.text
735
+ });
736
+ }
737
+ }
738
+ });
739
+
740
+ return contentArray.length === 1 && contentArray[0].type === 'text'
741
+ ? contentArray[0].text
742
+ : contentArray;
743
+ }
744
+
745
+ // =========================================================================
746
+ // Claude -> Gemini 转换
747
+ // =========================================================================
748
+
749
+ // Gemini Claude thought signature constant
750
+ static GEMINI_CLAUDE_THOUGHT_SIGNATURE = "skip_thought_signature_validator";
751
+
752
+ /**
753
+ * Claude请求 -> Gemini请求
754
+ */
755
+ toGeminiRequest(claudeRequest) {
756
+ if (!claudeRequest || typeof claudeRequest !== 'object') {
757
+ console.warn("Invalid claudeRequest provided to toGeminiRequest.");
758
+ return { contents: [] };
759
+ }
760
+
761
+ const geminiRequest = {
762
+ contents: []
763
+ };
764
+
765
+ // 处理系统指令 - 支持数组和字符串格式
766
+ if (claudeRequest.system) {
767
+ if (Array.isArray(claudeRequest.system)) {
768
+ // 数组格式的系统指令
769
+ const systemParts = [];
770
+ claudeRequest.system.forEach(systemPrompt => {
771
+ if (systemPrompt && systemPrompt.type === 'text' && typeof systemPrompt.text === 'string') {
772
+ systemParts.push({ text: systemPrompt.text });
773
+ }
774
+ });
775
+ if (systemParts.length > 0) {
776
+ geminiRequest.systemInstruction = {
777
+ role: 'user',
778
+ parts: systemParts
779
+ };
780
+ }
781
+ } else if (typeof claudeRequest.system === 'string') {
782
+ // 字符串格式的系统指令
783
+ geminiRequest.systemInstruction = {
784
+ parts: [{ text: claudeRequest.system }]
785
+ };
786
+ } else if (typeof claudeRequest.system === 'object') {
787
+ // 对象格式的系统指令
788
+ geminiRequest.systemInstruction = {
789
+ parts: [{ text: JSON.stringify(claudeRequest.system) }]
790
+ };
791
+ }
792
+ }
793
+
794
+ // 处理消息
795
+ if (Array.isArray(claudeRequest.messages)) {
796
+ claudeRequest.messages.forEach(message => {
797
+ if (!message || typeof message !== 'object' || !message.role) {
798
+ console.warn("Skipping invalid message in claudeRequest.messages.");
799
+ return;
800
+ }
801
+
802
+ const geminiRole = message.role === 'assistant' ? 'model' : 'user';
803
+ const content = message.content;
804
+
805
+ // 处理内容
806
+ if (Array.isArray(content)) {
807
+ const parts = [];
808
+
809
+ content.forEach(block => {
810
+ if (!block || typeof block !== 'object') return;
811
+
812
+ switch (block.type) {
813
+ case 'text':
814
+ if (typeof block.text === 'string') {
815
+ parts.push({ text: block.text });
816
+ }
817
+ break;
818
+
819
+ // [FIX] 参考 ag/request.rs 添加 thinking 块处理
820
+ case 'thinking':
821
+ if (typeof block.thinking === 'string' && block.thinking.length > 0) {
822
+ const thinkingPart = {
823
+ text: block.thinking,
824
+ thought: true
825
+ };
826
+ // 如果有签名,添加 thoughtSignature
827
+ if (block.signature && block.signature.length >= 50) {
828
+ thinkingPart.thoughtSignature = block.signature;
829
+ }
830
+ parts.push(thinkingPart);
831
+ }
832
+ break;
833
+
834
+ // [FIX] 处理 redacted_thinking 块
835
+ case 'redacted_thinking':
836
+ // 将 redacted_thinking 转换为普通文本
837
+ if (block.data) {
838
+ parts.push({
839
+ text: `[Redacted Thinking: ${block.data}]`
840
+ });
841
+ }
842
+ break;
843
+
844
+ case 'tool_use':
845
+ // 转换为 Gemini functionCall 格式
846
+ if (block.name && block.input) {
847
+ const args = typeof block.input === 'string'
848
+ ? block.input
849
+ : JSON.stringify(block.input);
850
+
851
+ // 验证 args 是有效的 JSON 对象
852
+ try {
853
+ const parsedArgs = JSON.parse(args);
854
+ if (parsedArgs && typeof parsedArgs === 'object') {
855
+ parts.push({
856
+ thoughtSignature: ClaudeConverter.GEMINI_CLAUDE_THOUGHT_SIGNATURE,
857
+ functionCall: {
858
+ name: block.name,
859
+ args: parsedArgs
860
+ }
861
+ });
862
+ }
863
+ } catch (e) {
864
+ // 如果解析失败,尝试直接使用 input
865
+ if (block.input && typeof block.input === 'object') {
866
+ parts.push({
867
+ thoughtSignature: ClaudeConverter.GEMINI_CLAUDE_THOUGHT_SIGNATURE,
868
+ functionCall: {
869
+ name: block.name,
870
+ args: block.input
871
+ }
872
+ });
873
+ }
874
+ }
875
+ }
876
+ break;
877
+
878
+ case 'tool_result':
879
+ // 转换为 Gemini functionResponse 格式
880
+ // [FIX] 参考 ag/request.rs 的实现,正确处理 tool_use_id 到函数名的映射
881
+ const toolCallId = block.tool_use_id;
882
+ if (toolCallId) {
883
+ // 尝试从之前的 tool_use 块中查找对应的函数名
884
+ // 如果找不到,则从 tool_use_id 中提取
885
+ let funcName = toolCallId;
886
+
887
+ // 检查是否有缓存的 tool_id -> name 映射
888
+ // 格式通常是 "funcName-uuid" 或 "toolu_xxx"
889
+ if (toolCallId.startsWith('toolu_')) {
890
+ // Claude 格式的 tool_use_id,需要从上下文中查找函数名
891
+ // 这里我们保留原始 ID 作为 name(Gemini 会处理)
892
+ funcName = toolCallId;
893
+ } else {
894
+ const toolCallIdParts = toolCallId.split('-');
895
+ if (toolCallIdParts.length > 1) {
896
+ // 移除最后一个部分(UUID),保留函数名
897
+ funcName = toolCallIdParts.slice(0, -1).join('-');
898
+ }
899
+ }
900
+
901
+ // 获取响应数据
902
+ let responseData = block.content;
903
+
904
+ // [FIX] 参考 ag/request.rs 的 tool_result_compressor 逻辑
905
+ // 处理嵌套的 content 数组(如图片等)
906
+ if (Array.isArray(responseData)) {
907
+ // 提取文本内容
908
+ const textParts = responseData
909
+ .filter(item => item && item.type === 'text')
910
+ .map(item => item.text)
911
+ .join('\n');
912
+ responseData = textParts || JSON.stringify(responseData);
913
+ } else if (typeof responseData !== 'string') {
914
+ responseData = JSON.stringify(responseData);
915
+ }
916
+
917
+ parts.push({
918
+ functionResponse: {
919
+ name: funcName,
920
+ response: {
921
+ result: responseData
922
+ }
923
+ }
924
+ });
925
+ }
926
+ break;
927
+
928
+ case 'image':
929
+ if (block.source && block.source.type === 'base64') {
930
+ parts.push({
931
+ inlineData: {
932
+ mimeType: block.source.media_type,
933
+ data: block.source.data
934
+ }
935
+ });
936
+ }
937
+ break;
938
+ }
939
+ });
940
+
941
+ if (parts.length > 0) {
942
+ geminiRequest.contents.push({
943
+ role: geminiRole,
944
+ parts: parts
945
+ });
946
+ }
947
+ } else if (typeof content === 'string') {
948
+ // 字符串内容
949
+ geminiRequest.contents.push({
950
+ role: geminiRole,
951
+ parts: [{ text: content }]
952
+ });
953
+ }
954
+ });
955
+ }
956
+
957
+ // 添加生成配置
958
+ const generationConfig = {};
959
+
960
+ if (claudeRequest.max_tokens !== undefined) {
961
+ generationConfig.maxOutputTokens = claudeRequest.max_tokens;
962
+ }
963
+ if (claudeRequest.temperature !== undefined) {
964
+ generationConfig.temperature = claudeRequest.temperature;
965
+ }
966
+ if (claudeRequest.top_p !== undefined) {
967
+ generationConfig.topP = claudeRequest.top_p;
968
+ }
969
+ if (claudeRequest.top_k !== undefined) {
970
+ generationConfig.topK = claudeRequest.top_k;
971
+ }
972
+
973
+ // 处理 thinking 配置 - 转换为 Gemini thinkingBudget
974
+ if (claudeRequest.thinking && claudeRequest.thinking.type === 'enabled') {
975
+ if (claudeRequest.thinking.budget_tokens !== undefined) {
976
+ const budget = claudeRequest.thinking.budget_tokens;
977
+ if (!generationConfig.thinkingConfig) {
978
+ generationConfig.thinkingConfig = {};
979
+ }
980
+ generationConfig.thinkingConfig.thinkingBudget = budget;
981
+ generationConfig.thinkingConfig.include_thoughts = true;
982
+ }
983
+ }
984
+
985
+ if (Object.keys(generationConfig).length > 0) {
986
+ geminiRequest.generationConfig = generationConfig;
987
+ }
988
+
989
+ // 处理工具 - 使用 parametersJsonSchema 格式
990
+ if (Array.isArray(claudeRequest.tools) && claudeRequest.tools.length > 0) {
991
+ const functionDeclarations = [];
992
+
993
+ claudeRequest.tools.forEach(tool => {
994
+ if (!tool || typeof tool !== 'object' || !tool.name) {
995
+ console.warn("Skipping invalid tool declaration in claudeRequest.tools.");
996
+ return;
997
+ }
998
+
999
+ // 清理 input_schema
1000
+ let inputSchema = tool.input_schema;
1001
+ if (inputSchema && typeof inputSchema === 'object') {
1002
+ // 创建副本以避免修改原始对象
1003
+ inputSchema = JSON.parse(JSON.stringify(inputSchema));
1004
+ // 清理不需要的字段
1005
+ delete inputSchema.$schema;
1006
+ // 清理 URL 格式(Gemini 不支持)
1007
+ this.cleanUrlFormatFromSchema(inputSchema);
1008
+ }
1009
+
1010
+ const funcDecl = {
1011
+ name: String(tool.name),
1012
+ description: String(tool.description || '')
1013
+ };
1014
+
1015
+ // 使用 parametersJsonSchema 而不是 parameters
1016
+ if (inputSchema) {
1017
+ funcDecl.parametersJsonSchema = inputSchema;
1018
+ }
1019
+
1020
+ functionDeclarations.push(funcDecl);
1021
+ });
1022
+
1023
+ if (functionDeclarations.length > 0) {
1024
+ geminiRequest.tools = [{
1025
+ functionDeclarations: functionDeclarations
1026
+ }];
1027
+ }
1028
+ }
1029
+
1030
+ // 处理tool_choice
1031
+ if (claudeRequest.tool_choice) {
1032
+ geminiRequest.toolConfig = this.buildGeminiToolConfigFromClaude(claudeRequest.tool_choice);
1033
+ }
1034
+
1035
+ // 添加默认安全设置
1036
+ geminiRequest.safetySettings = this.getDefaultSafetySettings();
1037
+
1038
+ return geminiRequest;
1039
+ }
1040
+
1041
+ /**
1042
+ * 清理 JSON Schema 中的 URL 格式
1043
+ * Gemini 不支持 "format": "uri"
1044
+ */
1045
+ cleanUrlFormatFromSchema(schema) {
1046
+ if (!schema || typeof schema !== 'object') return;
1047
+
1048
+ // 如果是属性对象,检查并清理 format
1049
+ if (schema.type === 'string' && schema.format === 'uri') {
1050
+ delete schema.format;
1051
+ }
1052
+
1053
+ // 递归处理 properties
1054
+ if (schema.properties && typeof schema.properties === 'object') {
1055
+ Object.values(schema.properties).forEach(prop => {
1056
+ this.cleanUrlFormatFromSchema(prop);
1057
+ });
1058
+ }
1059
+
1060
+ // 递归处理 items(数组类型)
1061
+ if (schema.items) {
1062
+ this.cleanUrlFormatFromSchema(schema.items);
1063
+ }
1064
+
1065
+ // 递归处理 additionalProperties
1066
+ if (schema.additionalProperties && typeof schema.additionalProperties === 'object') {
1067
+ this.cleanUrlFormatFromSchema(schema.additionalProperties);
1068
+ }
1069
+ }
1070
+
1071
+ /**
1072
+ * 获取默认的 Gemini 安全设置
1073
+ */
1074
+ getDefaultSafetySettings() {
1075
+ return [
1076
+ { category: "HARM_CATEGORY_HARASSMENT", threshold: "OFF" },
1077
+ { category: "HARM_CATEGORY_HATE_SPEECH", threshold: "OFF" },
1078
+ { category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold: "OFF" },
1079
+ { category: "HARM_CATEGORY_DANGEROUS_CONTENT", threshold: "OFF" },
1080
+ { category: "HARM_CATEGORY_CIVIC_INTEGRITY", threshold: "OFF" }
1081
+ ];
1082
+ }
1083
+
1084
+ /**
1085
+ * Claude响应 -> Gemini响应
1086
+ */
1087
+ toGeminiResponse(claudeResponse, model) {
1088
+ if (!claudeResponse || !claudeResponse.content || claudeResponse.content.length === 0) {
1089
+ return { candidates: [], usageMetadata: {} };
1090
+ }
1091
+
1092
+ const parts = [];
1093
+
1094
+ // 处理内容块
1095
+ for (const block of claudeResponse.content) {
1096
+ if (!block) continue;
1097
+
1098
+ switch (block.type) {
1099
+ case 'text':
1100
+ if (block.text) {
1101
+ parts.push({ text: block.text });
1102
+ }
1103
+ break;
1104
+
1105
+ // [FIX] 参考 ag/response.rs 添加 thinking 块处理
1106
+ case 'thinking':
1107
+ if (block.thinking) {
1108
+ const thinkingPart = {
1109
+ text: block.thinking,
1110
+ thought: true
1111
+ };
1112
+ // 如果有签名,添加 thoughtSignature
1113
+ if (block.signature && block.signature.length >= 50) {
1114
+ thinkingPart.thoughtSignature = block.signature;
1115
+ }
1116
+ parts.push(thinkingPart);
1117
+ }
1118
+ break;
1119
+
1120
+ case 'tool_use':
1121
+ // [FIX] 添加 id 和 thoughtSignature 支持
1122
+ const functionCallPart = {
1123
+ functionCall: {
1124
+ name: block.name,
1125
+ args: block.input || {}
1126
+ }
1127
+ };
1128
+ // 添加 id(如果存在)
1129
+ if (block.id) {
1130
+ functionCallPart.functionCall.id = block.id;
1131
+ }
1132
+ // 添加签名(如果存在)
1133
+ if (block.signature && block.signature.length >= 50) {
1134
+ functionCallPart.thoughtSignature = block.signature;
1135
+ }
1136
+ parts.push(functionCallPart);
1137
+ break;
1138
+
1139
+ case 'image':
1140
+ if (block.source && block.source.type === 'base64') {
1141
+ parts.push({
1142
+ inlineData: {
1143
+ mimeType: block.source.media_type,
1144
+ data: block.source.data
1145
+ }
1146
+ });
1147
+ }
1148
+ break;
1149
+
1150
+ default:
1151
+ if (block.text) {
1152
+ parts.push({ text: block.text });
1153
+ }
1154
+ }
1155
+ }
1156
+
1157
+ // 映射finish_reason
1158
+ const finishReasonMap = {
1159
+ 'end_turn': 'STOP',
1160
+ 'max_tokens': 'MAX_TOKENS',
1161
+ 'tool_use': 'STOP',
1162
+ 'stop_sequence': 'STOP'
1163
+ };
1164
+
1165
+ return {
1166
+ candidates: [{
1167
+ content: {
1168
+ role: 'model',
1169
+ parts: parts
1170
+ },
1171
+ finishReason: finishReasonMap[claudeResponse.stop_reason] || 'STOP'
1172
+ }],
1173
+ usageMetadata: claudeResponse.usage ? {
1174
+ promptTokenCount: claudeResponse.usage.input_tokens || 0,
1175
+ candidatesTokenCount: claudeResponse.usage.output_tokens || 0,
1176
+ totalTokenCount: (claudeResponse.usage.input_tokens || 0) + (claudeResponse.usage.output_tokens || 0),
1177
+ cachedContentTokenCount: claudeResponse.usage.cache_read_input_tokens || 0,
1178
+ promptTokensDetails: [{
1179
+ modality: "TEXT",
1180
+ tokenCount: claudeResponse.usage.input_tokens || 0
1181
+ }],
1182
+ candidatesTokensDetails: [{
1183
+ modality: "TEXT",
1184
+ tokenCount: claudeResponse.usage.output_tokens || 0
1185
+ }]
1186
+ } : {}
1187
+ };
1188
+ }
1189
+
1190
+ /**
1191
+ * Claude流式响应 -> Gemini流式响应
1192
+ */
1193
+ toGeminiStreamChunk(claudeChunk, model) {
1194
+ if (!claudeChunk) return null;
1195
+
1196
+ // 处理Claude流式事件
1197
+ if (typeof claudeChunk === 'object' && !Array.isArray(claudeChunk)) {
1198
+ // content_block_start 事件 - 处理 thinking 块开始
1199
+ if (claudeChunk.type === 'content_block_start') {
1200
+ const contentBlock = claudeChunk.content_block;
1201
+ if (contentBlock && contentBlock.type === 'thinking') {
1202
+ // thinking 块开始,返回空(等待 delta)
1203
+ return null;
1204
+ }
1205
+ if (contentBlock && contentBlock.type === 'tool_use') {
1206
+ // tool_use 块开始
1207
+ return {
1208
+ candidates: [{
1209
+ content: {
1210
+ role: "model",
1211
+ parts: [{
1212
+ functionCall: {
1213
+ name: contentBlock.name,
1214
+ args: {},
1215
+ id: contentBlock.id
1216
+ }
1217
+ }]
1218
+ }
1219
+ }]
1220
+ };
1221
+ }
1222
+ }
1223
+
1224
+ // content_block_delta 事件
1225
+ if (claudeChunk.type === 'content_block_delta') {
1226
+ const delta = claudeChunk.delta;
1227
+
1228
+ // 处理 text_delta
1229
+ if (delta && delta.type === 'text_delta') {
1230
+ return {
1231
+ candidates: [{
1232
+ content: {
1233
+ role: "model",
1234
+ parts: [{
1235
+ text: delta.text || ""
1236
+ }]
1237
+ }
1238
+ }]
1239
+ };
1240
+ }
1241
+
1242
+ // [FIX] 处理 thinking_delta - 转换为 Gemini 的 thought 格式
1243
+ if (delta && delta.type === 'thinking_delta') {
1244
+ return {
1245
+ candidates: [{
1246
+ content: {
1247
+ role: "model",
1248
+ parts: [{
1249
+ text: delta.thinking || "",
1250
+ thought: true
1251
+ }]
1252
+ }
1253
+ }]
1254
+ };
1255
+ }
1256
+
1257
+ // [FIX] 处理 signature_delta
1258
+ if (delta && delta.type === 'signature_delta') {
1259
+ // 签名通常与前一个 thinking 块关联
1260
+ // 在流式场景中,我们可以忽略或记录
1261
+ return null;
1262
+ }
1263
+
1264
+ // [FIX] 处理 input_json_delta (tool arguments)
1265
+ if (delta && delta.type === 'input_json_delta') {
1266
+ // 工具参数增量,Gemini 不支持增量参数,忽略
1267
+ return null;
1268
+ }
1269
+ }
1270
+
1271
+ // message_delta 事件 - 流结束
1272
+ if (claudeChunk.type === 'message_delta') {
1273
+ const stopReason = claudeChunk.delta?.stop_reason;
1274
+ const result = {
1275
+ candidates: [{
1276
+ finishReason: stopReason === 'end_turn' ? 'STOP' :
1277
+ stopReason === 'max_tokens' ? 'MAX_TOKENS' :
1278
+ stopReason === 'tool_use' ? 'STOP' :
1279
+ 'OTHER'
1280
+ }]
1281
+ };
1282
+
1283
+ // 添加 usage 信息
1284
+ if (claudeChunk.usage) {
1285
+ result.usageMetadata = {
1286
+ promptTokenCount: claudeChunk.usage.input_tokens || 0,
1287
+ candidatesTokenCount: claudeChunk.usage.output_tokens || 0,
1288
+ totalTokenCount: (claudeChunk.usage.input_tokens || 0) + (claudeChunk.usage.output_tokens || 0),
1289
+ cachedContentTokenCount: claudeChunk.usage.cache_read_input_tokens || 0,
1290
+ promptTokensDetails: [{
1291
+ modality: "TEXT",
1292
+ tokenCount: claudeChunk.usage.input_tokens || 0
1293
+ }],
1294
+ candidatesTokensDetails: [{
1295
+ modality: "TEXT",
1296
+ tokenCount: claudeChunk.usage.output_tokens || 0
1297
+ }]
1298
+ };
1299
+ }
1300
+
1301
+ return result;
1302
+ }
1303
+ }
1304
+
1305
+ // 向后兼容:处理字符串格式
1306
+ if (typeof claudeChunk === 'string') {
1307
+ return {
1308
+ candidates: [{
1309
+ content: {
1310
+ role: "model",
1311
+ parts: [{
1312
+ text: claudeChunk
1313
+ }]
1314
+ }
1315
+ }]
1316
+ };
1317
+ }
1318
+
1319
+ return null;
1320
+ }
1321
+
1322
+ /**
1323
+ * 处理Claude内容到Gemini parts
1324
+ */
1325
+ processClaudeContentToGeminiParts(content) {
1326
+ if (!content) return [];
1327
+
1328
+ if (typeof content === 'string') {
1329
+ return [{ text: content }];
1330
+ }
1331
+
1332
+ if (Array.isArray(content)) {
1333
+ const parts = [];
1334
+
1335
+ content.forEach(block => {
1336
+ if (!block || typeof block !== 'object' || !block.type) {
1337
+ console.warn("Skipping invalid content block.");
1338
+ return;
1339
+ }
1340
+
1341
+ switch (block.type) {
1342
+ case 'text':
1343
+ if (typeof block.text === 'string') {
1344
+ parts.push({ text: block.text });
1345
+ }
1346
+ break;
1347
+
1348
+ case 'image':
1349
+ if (block.source && typeof block.source === 'object' &&
1350
+ block.source.type === 'base64' &&
1351
+ typeof block.source.media_type === 'string' &&
1352
+ typeof block.source.data === 'string') {
1353
+ parts.push({
1354
+ inlineData: {
1355
+ mimeType: block.source.media_type,
1356
+ data: block.source.data
1357
+ }
1358
+ });
1359
+ }
1360
+ break;
1361
+
1362
+ case 'tool_use':
1363
+ if (typeof block.name === 'string' &&
1364
+ block.input && typeof block.input === 'object') {
1365
+ parts.push({
1366
+ functionCall: {
1367
+ name: block.name,
1368
+ args: block.input
1369
+ }
1370
+ });
1371
+ }
1372
+ break;
1373
+
1374
+ case 'tool_result':
1375
+ if (typeof block.tool_use_id === 'string') {
1376
+ parts.push({
1377
+ functionResponse: {
1378
+ name: block.tool_use_id,
1379
+ response: { content: block.content }
1380
+ }
1381
+ });
1382
+ }
1383
+ break;
1384
+
1385
+ default:
1386
+ if (typeof block.text === 'string') {
1387
+ parts.push({ text: block.text });
1388
+ }
1389
+ }
1390
+ });
1391
+
1392
+ return parts;
1393
+ }
1394
+
1395
+ return [];
1396
+ }
1397
+
1398
+ /**
1399
+ * 构建Gemini工具配置
1400
+ */
1401
+ buildGeminiToolConfigFromClaude(claudeToolChoice) {
1402
+ if (!claudeToolChoice || typeof claudeToolChoice !== 'object' || !claudeToolChoice.type) {
1403
+ console.warn("Invalid claudeToolChoice provided.");
1404
+ return undefined;
1405
+ }
1406
+
1407
+ switch (claudeToolChoice.type) {
1408
+ case 'auto':
1409
+ return { functionCallingConfig: { mode: 'AUTO' } };
1410
+ case 'none':
1411
+ return { functionCallingConfig: { mode: 'NONE' } };
1412
+ case 'tool':
1413
+ if (claudeToolChoice.name && typeof claudeToolChoice.name === 'string') {
1414
+ return {
1415
+ functionCallingConfig: {
1416
+ mode: 'ANY',
1417
+ allowedFunctionNames: [claudeToolChoice.name]
1418
+ }
1419
+ };
1420
+ }
1421
+ console.warn("Invalid tool name in claudeToolChoice of type 'tool'.");
1422
+ return undefined;
1423
+ default:
1424
+ console.warn(`Unsupported claudeToolChoice type: ${claudeToolChoice.type}`);
1425
+ return undefined;
1426
+ }
1427
+ }
1428
+
1429
+ // =========================================================================
1430
+ // Claude -> OpenAI Responses 转换
1431
+ // =========================================================================
1432
+
1433
+ /**
1434
+ * Claude请求 -> OpenAI Responses请求
1435
+ */
1436
+ toOpenAIResponsesRequest(claudeRequest) {
1437
+ // 转换为OpenAI Responses格式
1438
+ const responsesRequest = {
1439
+ model: claudeRequest.model,
1440
+ max_tokens: checkAndAssignOrDefault(claudeRequest.max_tokens, OPENAI_DEFAULT_MAX_TOKENS),
1441
+ temperature: checkAndAssignOrDefault(claudeRequest.temperature, OPENAI_DEFAULT_TEMPERATURE),
1442
+ top_p: checkAndAssignOrDefault(claudeRequest.top_p, OPENAI_DEFAULT_TOP_P),
1443
+ };
1444
+
1445
+ // 处理系统指令
1446
+ if (claudeRequest.system) {
1447
+ responsesRequest.instructions = claudeRequest.system;
1448
+ }
1449
+
1450
+ // 处理消息
1451
+ if (claudeRequest.messages && Array.isArray(claudeRequest.messages)) {
1452
+ responsesRequest.input = claudeRequest.messages;
1453
+ }
1454
+
1455
+ return responsesRequest;
1456
+ }
1457
+
1458
+ /**
1459
+ * Claude响应 -> OpenAI Responses响应
1460
+ */
1461
+ toOpenAIResponsesResponse(claudeResponse, model) {
1462
+ const content = this.processClaudeResponseContent(claudeResponse.content);
1463
+ const textContent = typeof content === 'string' ? content : JSON.stringify(content);
1464
+
1465
+ let output = [];
1466
+ output.push({
1467
+ type: "message",
1468
+ id: `msg_${uuidv4().replace(/-/g, '')}`,
1469
+ summary: [],
1470
+ role: "assistant",
1471
+ status: "completed",
1472
+ content: [{
1473
+ annotations: [],
1474
+ logprobs: [],
1475
+ text: textContent,
1476
+ type: "output_text"
1477
+ }]
1478
+ });
1479
+
1480
+ return {
1481
+ background: false,
1482
+ created_at: Math.floor(Date.now() / 1000),
1483
+ error: null,
1484
+ id: `resp_${uuidv4().replace(/-/g, '')}`,
1485
+ incomplete_details: null,
1486
+ max_output_tokens: null,
1487
+ max_tool_calls: null,
1488
+ metadata: {},
1489
+ model: model || claudeResponse.model,
1490
+ object: "response",
1491
+ output: output,
1492
+ parallel_tool_calls: true,
1493
+ previous_response_id: null,
1494
+ prompt_cache_key: null,
1495
+ reasoning: {},
1496
+ safety_identifier: "user-" + uuidv4().replace(/-/g, ''),
1497
+ service_tier: "default",
1498
+ status: "completed",
1499
+ store: false,
1500
+ temperature: 1,
1501
+ text: {
1502
+ format: { type: "text" },
1503
+ },
1504
+ tool_choice: "auto",
1505
+ tools: [],
1506
+ top_logprobs: 0,
1507
+ top_p: 1,
1508
+ truncation: "disabled",
1509
+ usage: {
1510
+ input_tokens: claudeResponse.usage?.input_tokens || 0,
1511
+ input_tokens_details: {
1512
+ cached_tokens: claudeResponse.usage?.cache_read_input_tokens || 0
1513
+ },
1514
+ output_tokens: claudeResponse.usage?.output_tokens || 0,
1515
+ output_tokens_details: {
1516
+ reasoning_tokens: 0
1517
+ },
1518
+ total_tokens: (claudeResponse.usage?.input_tokens || 0) + (claudeResponse.usage?.output_tokens || 0)
1519
+ },
1520
+ user: null
1521
+ };
1522
+ }
1523
+
1524
+ /**
1525
+ * Claude流式响应 -> OpenAI Responses流式响应
1526
+ */
1527
+ toOpenAIResponsesStreamChunk(claudeChunk, model, requestId = null) {
1528
+ if (!claudeChunk) return [];
1529
+
1530
+ const responseId = requestId || `resp_${uuidv4().replace(/-/g, '')}`;
1531
+ const events = [];
1532
+
1533
+ // message_start 事件 - 流开始
1534
+ if (claudeChunk.type === 'message_start') {
1535
+ events.push(
1536
+ generateResponseCreated(responseId, model || 'unknown'),
1537
+ generateResponseInProgress(responseId),
1538
+ generateOutputItemAdded(responseId),
1539
+ generateContentPartAdded(responseId)
1540
+ );
1541
+ }
1542
+
1543
+ // content_block_start 事件
1544
+ if (claudeChunk.type === 'content_block_start') {
1545
+ const contentBlock = claudeChunk.content_block;
1546
+
1547
+ // 对于 tool_use 类型,添加工具调用项
1548
+ if (contentBlock && contentBlock.type === 'tool_use') {
1549
+ events.push({
1550
+ item: {
1551
+ id: contentBlock.id,
1552
+ type: "function_call",
1553
+ name: contentBlock.name,
1554
+ arguments: "",
1555
+ status: "in_progress"
1556
+ },
1557
+ output_index: claudeChunk.index || 0,
1558
+ sequence_number: 2,
1559
+ type: "response.output_item.added"
1560
+ });
1561
+ }
1562
+ }
1563
+
1564
+ // content_block_delta 事件
1565
+ if (claudeChunk.type === 'content_block_delta') {
1566
+ const delta = claudeChunk.delta;
1567
+
1568
+ // 处理文本增量
1569
+ if (delta && delta.type === 'text_delta') {
1570
+ events.push({
1571
+ delta: delta.text || "",
1572
+ item_id: `msg_${uuidv4().replace(/-/g, '')}`,
1573
+ output_index: claudeChunk.index || 0,
1574
+ sequence_number: 3,
1575
+ type: "response.output_text.delta"
1576
+ });
1577
+ }
1578
+ // 处理推理内容增量
1579
+ else if (delta && delta.type === 'thinking_delta') {
1580
+ events.push({
1581
+ delta: delta.thinking || "",
1582
+ item_id: `thinking_${uuidv4().replace(/-/g, '')}`,
1583
+ output_index: claudeChunk.index || 0,
1584
+ sequence_number: 3,
1585
+ type: "response.reasoning_summary_text.delta"
1586
+ });
1587
+ }
1588
+ // 处理工具调用参数增量
1589
+ else if (delta && delta.type === 'input_json_delta') {
1590
+ events.push({
1591
+ delta: delta.partial_json || "",
1592
+ item_id: `call_${uuidv4().replace(/-/g, '')}`,
1593
+ output_index: claudeChunk.index || 0,
1594
+ sequence_number: 3,
1595
+ type: "response.custom_tool_call_input.delta"
1596
+ });
1597
+ }
1598
+ }
1599
+
1600
+ // content_block_stop 事件
1601
+ if (claudeChunk.type === 'content_block_stop') {
1602
+ events.push({
1603
+ item_id: `msg_${uuidv4().replace(/-/g, '')}`,
1604
+ output_index: claudeChunk.index || 0,
1605
+ sequence_number: 4,
1606
+ type: "response.output_item.done"
1607
+ });
1608
+ }
1609
+
1610
+ // message_delta 事件 - 流结束
1611
+ if (claudeChunk.type === 'message_delta') {
1612
+ // events.push(
1613
+ // generateOutputTextDone(responseId),
1614
+ // generateContentPartDone(responseId),
1615
+ // generateOutputItemDone(responseId),
1616
+ // generateResponseCompleted(responseId)
1617
+ // );
1618
+
1619
+ // 如果有 usage 信息,更新最后一个事件
1620
+ if (claudeChunk.usage && events.length > 0) {
1621
+ const lastEvent = events[events.length - 1];
1622
+ if (lastEvent.response) {
1623
+ lastEvent.response.usage = {
1624
+ input_tokens: claudeChunk.usage.input_tokens || 0,
1625
+ input_tokens_details: {
1626
+ cached_tokens: claudeChunk.usage.cache_read_input_tokens || 0
1627
+ },
1628
+ output_tokens: claudeChunk.usage.output_tokens || 0,
1629
+ output_tokens_details: {
1630
+ reasoning_tokens: 0
1631
+ },
1632
+ total_tokens: (claudeChunk.usage.input_tokens || 0) + (claudeChunk.usage.output_tokens || 0)
1633
+ };
1634
+ }
1635
+ }
1636
+ }
1637
+
1638
+ // message_stop 事件
1639
+ if (claudeChunk.type === 'message_stop') {
1640
+ events.push(
1641
+ generateOutputTextDone(responseId),
1642
+ generateContentPartDone(responseId),
1643
+ generateOutputItemDone(responseId),
1644
+ generateResponseCompleted(responseId)
1645
+ );
1646
+ }
1647
+
1648
+ return events;
1649
+ }
1650
+ }
1651
+
1652
+ export default ClaudeConverter;
src/converters/strategies/CodexConverter.js ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Codex 转换器
3
+ * 处理 OpenAI 协议与 Codex 协议之间的转换
4
+ */
5
+
6
+ import crypto from 'crypto';
7
+ import { BaseConverter } from '../BaseConverter.js';
8
+ import { MODEL_PROTOCOL_PREFIX } from '../../utils/common.js';
9
+
10
+ export class CodexConverter extends BaseConverter {
11
+ constructor() {
12
+ super('codex');
13
+ this.toolNameMap = new Map(); // 工具名称缩短/恢复映射
14
+ this.reverseToolNameMap = new Map(); // 反向映射
15
+ }
16
+
17
+ /**
18
+ * 转换请求
19
+ */
20
+ convertRequest(data, targetProtocol) {
21
+ if (targetProtocol === 'codex') {
22
+ return this.toCodexRequest(data);
23
+ } else if (targetProtocol === MODEL_PROTOCOL_PREFIX.OPENAI) {
24
+ // Codex → OpenAI (通常不需要,因为 Codex 响应会直接转换)
25
+ return data;
26
+ }
27
+ throw new Error(`Unsupported target protocol: ${targetProtocol}`);
28
+ }
29
+
30
+ /**
31
+ * 转换响应
32
+ */
33
+ convertResponse(data, targetProtocol, model) {
34
+ if (targetProtocol === MODEL_PROTOCOL_PREFIX.OPENAI) {
35
+ return this.toOpenAIResponse(data, model);
36
+ }
37
+ throw new Error(`Unsupported target protocol: ${targetProtocol}`);
38
+ }
39
+
40
+ /**
41
+ * 转换流式响应块
42
+ */
43
+ convertStreamChunk(chunk, targetProtocol, model) {
44
+ if (targetProtocol === MODEL_PROTOCOL_PREFIX.OPENAI) {
45
+ return this.toOpenAIStreamChunk(chunk, model);
46
+ }
47
+ throw new Error(`Unsupported target protocol: ${targetProtocol}`);
48
+ }
49
+
50
+ /**
51
+ * OpenAI → Codex 请求转换
52
+ */
53
+ toCodexRequest(data) {
54
+ const codexRequest = {
55
+ model: data.model,
56
+ instructions: this.buildInstructions(data),
57
+ input: this.convertMessages(data.messages || []),
58
+ stream: data.stream || false,
59
+ store: false,
60
+ reasoning: {
61
+ effort: 'medium',
62
+ summary: 'auto'
63
+ },
64
+ parallel_tool_calls: data.parallel_tool_calls !== false,
65
+ include: ['reasoning.encrypted_content']
66
+ };
67
+
68
+ // 添加工具
69
+ if (data.tools && data.tools.length > 0) {
70
+ codexRequest.tools = this.convertTools(data.tools);
71
+ codexRequest.tool_choice = data.tool_choice || 'auto';
72
+ }
73
+
74
+ // 添加响应格式
75
+ if (data.response_format) {
76
+ codexRequest.text = {
77
+ format: this.convertResponseFormat(data.response_format)
78
+ };
79
+ }
80
+
81
+ // 添加推理强度(如果指定)
82
+ if (data.reasoning_effort) {
83
+ codexRequest.reasoning.effort = data.reasoning_effort;
84
+ }
85
+
86
+ // 添加温度和其他参数
87
+ if (data.temperature !== undefined) {
88
+ codexRequest.temperature = data.temperature;
89
+ }
90
+ if (data.max_tokens !== undefined) {
91
+ codexRequest.max_output_tokens = data.max_tokens;
92
+ }
93
+ if (data.top_p !== undefined) {
94
+ codexRequest.top_p = data.top_p;
95
+ }
96
+
97
+ return codexRequest;
98
+ }
99
+
100
+ /**
101
+ * 构建指令
102
+ */
103
+ buildInstructions(data) {
104
+ // 提取系统消息
105
+ const systemMessages = (data.messages || []).filter(m => m.role === 'system');
106
+ if (systemMessages.length > 0) {
107
+ return systemMessages.map(m => {
108
+ if (typeof m.content === 'string') {
109
+ return m.content;
110
+ } else if (Array.isArray(m.content)) {
111
+ return m.content
112
+ .filter(part => part.type === 'text')
113
+ .map(part => part.text)
114
+ .join('\n');
115
+ }
116
+ return '';
117
+ }).join('\n');
118
+ }
119
+ return 'You are a helpful assistant.';
120
+ }
121
+
122
+ /**
123
+ * 转换消息
124
+ */
125
+ convertMessages(messages) {
126
+ const input = [];
127
+ const nonSystemMessages = messages.filter(m => m.role !== 'system');
128
+
129
+ for (const msg of nonSystemMessages) {
130
+ if (msg.role === 'user' || msg.role === 'assistant') {
131
+ input.push({
132
+ type: 'message',
133
+ role: msg.role,
134
+ content: this.convertMessageContent(msg.content, msg.role)
135
+ });
136
+
137
+ // 处理助手消息中的工具调用
138
+ if (msg.role === 'assistant' && msg.tool_calls) {
139
+ for (const toolCall of msg.tool_calls) {
140
+ const shortName = this.getShortToolName(toolCall.function.name);
141
+ input.push({
142
+ type: 'function_call',
143
+ call_id: toolCall.id,
144
+ name: shortName,
145
+ arguments: JSON.parse(toolCall.function.arguments)
146
+ });
147
+ }
148
+ }
149
+ } else if (msg.role === 'tool') {
150
+ input.push({
151
+ type: 'function_call_output',
152
+ call_id: msg.tool_call_id,
153
+ output: msg.content
154
+ });
155
+ }
156
+ }
157
+
158
+ return input;
159
+ }
160
+
161
+ /**
162
+ * 转换消息内容
163
+ */
164
+ convertMessageContent(content, role) {
165
+ if (typeof content === 'string') {
166
+ return [{
167
+ type: role === 'user' ? 'input_text' : 'output_text',
168
+ text: content
169
+ }];
170
+ }
171
+
172
+ if (Array.isArray(content)) {
173
+ return content.map(part => {
174
+ if (part.type === 'text') {
175
+ return {
176
+ type: role === 'user' ? 'input_text' : 'output_text',
177
+ text: part.text
178
+ };
179
+ } else if (part.type === 'image_url') {
180
+ return {
181
+ type: 'input_image',
182
+ image_url: part.image_url.url
183
+ };
184
+ }
185
+ return part;
186
+ });
187
+ }
188
+
189
+ return [];
190
+ }
191
+
192
+ /**
193
+ * 转换工具
194
+ */
195
+ convertTools(tools) {
196
+ this.toolNameMap.clear();
197
+ this.reverseToolNameMap.clear();
198
+
199
+ return tools.map(tool => {
200
+ const originalName = tool.function.name;
201
+ const shortName = this.shortenToolName(originalName);
202
+
203
+ this.toolNameMap.set(originalName, shortName);
204
+ this.reverseToolNameMap.set(shortName, originalName);
205
+
206
+ return {
207
+ type: 'function',
208
+ name: shortName,
209
+ description: tool.function.description,
210
+ parameters: tool.function.parameters
211
+ };
212
+ });
213
+ }
214
+
215
+ /**
216
+ * 缩短工具名称(最多 64 字符)
217
+ */
218
+ shortenToolName(name) {
219
+ if (name.length <= 64) {
220
+ return name;
221
+ }
222
+
223
+ // 保留 mcp__ 前缀和最后一段
224
+ if (name.startsWith('mcp__')) {
225
+ const parts = name.split('__');
226
+ if (parts.length > 2) {
227
+ const prefix = 'mcp__';
228
+ const lastPart = parts[parts.length - 1];
229
+ const maxLastPartLength = 64 - prefix.length - 1; // -1 for underscore
230
+
231
+ if (lastPart.length <= maxLastPartLength) {
232
+ return prefix + lastPart;
233
+ } else {
234
+ return prefix + lastPart.slice(0, maxLastPartLength);
235
+ }
236
+ }
237
+ }
238
+
239
+ // 使用哈希创建唯一的短名称
240
+ const hash = crypto.createHash('md5').update(name).digest('hex').slice(0, 8);
241
+ return name.slice(0, 55) + '_' + hash;
242
+ }
243
+
244
+ /**
245
+ * 获取短工具名称
246
+ */
247
+ getShortToolName(originalName) {
248
+ return this.toolNameMap.get(originalName) || originalName;
249
+ }
250
+
251
+ /**
252
+ * 获取原始工具名称
253
+ */
254
+ getOriginalToolName(shortName) {
255
+ return this.reverseToolNameMap.get(shortName) || shortName;
256
+ }
257
+
258
+ /**
259
+ * 转换响应格式
260
+ */
261
+ convertResponseFormat(responseFormat) {
262
+ if (responseFormat.type === 'json_schema') {
263
+ return {
264
+ type: 'json_schema',
265
+ name: responseFormat.json_schema?.name || 'response',
266
+ schema: responseFormat.json_schema?.schema || {}
267
+ };
268
+ } else if (responseFormat.type === 'json_object') {
269
+ return {
270
+ type: 'json_object'
271
+ };
272
+ }
273
+ return responseFormat;
274
+ }
275
+
276
+ /**
277
+ * Codex → OpenAI 响应转换(非流式)
278
+ */
279
+ toOpenAIResponse(data, model) {
280
+ const response = data.response || data;
281
+
282
+ const message = {
283
+ role: 'assistant',
284
+ content: ''
285
+ };
286
+
287
+ // 提取文本内容和工具调用
288
+ const textParts = [];
289
+ const toolCalls = [];
290
+
291
+ if (response.output) {
292
+ for (const item of response.output) {
293
+ if (item.type === 'message') {
294
+ for (const content of item.content || []) {
295
+ if (content.type === 'output_text') {
296
+ textParts.push(content.text);
297
+ }
298
+ }
299
+ } else if (item.type === 'function_call') {
300
+ const originalName = this.getOriginalToolName(item.name);
301
+ toolCalls.push({
302
+ id: item.call_id,
303
+ type: 'function',
304
+ function: {
305
+ name: originalName,
306
+ arguments: JSON.stringify(item.arguments)
307
+ }
308
+ });
309
+ }
310
+ }
311
+ }
312
+
313
+ message.content = textParts.join('');
314
+ if (toolCalls.length > 0) {
315
+ message.tool_calls = toolCalls;
316
+ }
317
+
318
+ // 提取推理内容
319
+ let reasoningContent = '';
320
+ if (response.output) {
321
+ for (const item of response.output) {
322
+ if (item.summary) {
323
+ reasoningContent = item.summary;
324
+ break;
325
+ }
326
+ }
327
+ }
328
+
329
+ return {
330
+ id: response.id || `chatcmpl-${Date.now()}`,
331
+ object: 'chat.completion',
332
+ created: Math.floor(Date.now() / 1000),
333
+ model: model,
334
+ choices: [{
335
+ index: 0,
336
+ message: message,
337
+ finish_reason: this.mapFinishReason(response.status),
338
+ ...(reasoningContent && { reasoning_content: reasoningContent })
339
+ }],
340
+ usage: {
341
+ prompt_tokens: response.usage?.input_tokens || 0,
342
+ completion_tokens: response.usage?.output_tokens || 0,
343
+ total_tokens: response.usage?.total_tokens || 0,
344
+ ...(response.usage?.input_tokens_details?.cached_tokens && {
345
+ prompt_tokens_details: {
346
+ cached_tokens: response.usage.input_tokens_details.cached_tokens
347
+ }
348
+ }),
349
+ ...(response.usage?.output_tokens_details?.reasoning_tokens && {
350
+ completion_tokens_details: {
351
+ reasoning_tokens: response.usage.output_tokens_details.reasoning_tokens
352
+ }
353
+ })
354
+ }
355
+ };
356
+ }
357
+
358
+ /**
359
+ * Codex → OpenAI 流式响应块转换
360
+ */
361
+ toOpenAIStreamChunk(chunk, model) {
362
+ const type = chunk.type;
363
+
364
+ // response.created - 存储元数据
365
+ if (type === 'response.created') {
366
+ return {
367
+ id: chunk.response.id,
368
+ object: 'chat.completion.chunk',
369
+ created: Math.floor(Date.now() / 1000),
370
+ model: model,
371
+ choices: [{
372
+ index: 0,
373
+ delta: { role: 'assistant' },
374
+ finish_reason: null
375
+ }]
376
+ };
377
+ }
378
+
379
+ // response.output_text.delta - 文本内容
380
+ if (type === 'response.output_text.delta') {
381
+ return {
382
+ id: `chatcmpl-${Date.now()}`,
383
+ object: 'chat.completion.chunk',
384
+ created: Math.floor(Date.now() / 1000),
385
+ model: model,
386
+ choices: [{
387
+ index: 0,
388
+ delta: { content: chunk.delta },
389
+ finish_reason: null
390
+ }]
391
+ };
392
+ }
393
+
394
+ // response.reasoning_summary_text.delta - 推理内容
395
+ if (type === 'response.reasoning_summary_text.delta') {
396
+ return {
397
+ id: `chatcmpl-${Date.now()}`,
398
+ object: 'chat.completion.chunk',
399
+ created: Math.floor(Date.now() / 1000),
400
+ model: model,
401
+ choices: [{
402
+ index: 0,
403
+ delta: { reasoning_content: chunk.delta },
404
+ finish_reason: null
405
+ }]
406
+ };
407
+ }
408
+
409
+ // response.output_item.done - 工具调用完成
410
+ if (type === 'response.output_item.done' && chunk.item?.type === 'function_call') {
411
+ const originalName = this.getOriginalToolName(chunk.item.name);
412
+ return {
413
+ id: `chatcmpl-${Date.now()}`,
414
+ object: 'chat.completion.chunk',
415
+ created: Math.floor(Date.now() / 1000),
416
+ model: model,
417
+ choices: [{
418
+ index: 0,
419
+ delta: {
420
+ tool_calls: [{
421
+ index: 0,
422
+ id: chunk.item.call_id,
423
+ type: 'function',
424
+ function: {
425
+ name: originalName,
426
+ arguments: JSON.stringify(chunk.item.arguments)
427
+ }
428
+ }]
429
+ },
430
+ finish_reason: null
431
+ }]
432
+ };
433
+ }
434
+
435
+ // response.completed - 完成
436
+ if (type === 'response.completed') {
437
+ return {
438
+ id: chunk.response.id,
439
+ object: 'chat.completion.chunk',
440
+ created: Math.floor(Date.now() / 1000),
441
+ model: model,
442
+ choices: [{
443
+ index: 0,
444
+ delta: {},
445
+ finish_reason: this.mapFinishReason(chunk.response.status)
446
+ }],
447
+ usage: {
448
+ prompt_tokens: chunk.response.usage?.input_tokens || 0,
449
+ completion_tokens: chunk.response.usage?.output_tokens || 0,
450
+ total_tokens: chunk.response.usage?.total_tokens || 0,
451
+ ...(chunk.response.usage?.input_tokens_details?.cached_tokens && {
452
+ prompt_tokens_details: {
453
+ cached_tokens: chunk.response.usage.input_tokens_details.cached_tokens
454
+ }
455
+ }),
456
+ ...(chunk.response.usage?.output_tokens_details?.reasoning_tokens && {
457
+ completion_tokens_details: {
458
+ reasoning_tokens: chunk.response.usage.output_tokens_details.reasoning_tokens
459
+ }
460
+ })
461
+ }
462
+ };
463
+ }
464
+
465
+ // 其他事件类型暂时忽略
466
+ return null;
467
+ }
468
+
469
+ /**
470
+ * 映射完成原因
471
+ */
472
+ mapFinishReason(status) {
473
+ const mapping = {
474
+ 'completed': 'stop',
475
+ 'incomplete': 'length',
476
+ 'failed': 'error',
477
+ 'cancelled': 'stop'
478
+ };
479
+ return mapping[status] || 'stop';
480
+ }
481
+
482
+ /**
483
+ * 转换模型列表
484
+ */
485
+ convertModelList(data, targetProtocol) {
486
+ // Codex 使用 OpenAI 格式的模型列表,无需转换
487
+ return data;
488
+ }
489
+ }
src/converters/strategies/GeminiConverter.js ADDED
@@ -0,0 +1,1226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Gemini转换器
3
+ * 处理Gemini(Google)协议与其他协议之间的转换
4
+ */
5
+
6
+ import { v4 as uuidv4 } from 'uuid';
7
+ import { BaseConverter } from '../BaseConverter.js';
8
+ import {
9
+ checkAndAssignOrDefault,
10
+ OPENAI_DEFAULT_MAX_TOKENS,
11
+ OPENAI_DEFAULT_TEMPERATURE,
12
+ OPENAI_DEFAULT_TOP_P,
13
+ CLAUDE_DEFAULT_MAX_TOKENS,
14
+ CLAUDE_DEFAULT_TEMPERATURE,
15
+ CLAUDE_DEFAULT_TOP_P
16
+ } from '../utils.js';
17
+ import { MODEL_PROTOCOL_PREFIX } from '../../utils/common.js';
18
+ import {
19
+ generateResponseCreated,
20
+ generateResponseInProgress,
21
+ generateOutputItemAdded,
22
+ generateContentPartAdded,
23
+ generateOutputTextDone,
24
+ generateContentPartDone,
25
+ generateOutputItemDone,
26
+ generateResponseCompleted
27
+ } from '../../providers/openai/openai-responses-core.mjs';
28
+
29
+ /**
30
+ * [FIX] 参考 ag/response.rs 和 ag/streaming.rs 的 remap_function_call_args 函数
31
+ * 修复 Gemini 返回的工具参数名称问题
32
+ * Gemini 有时会使用不同的参数名称,需要映射到 Claude Code 期望的格式
33
+ */
34
+ function remapFunctionCallArgs(toolName, args) {
35
+ if (!args || typeof args !== 'object') return args;
36
+
37
+ const remappedArgs = { ...args };
38
+ const toolNameLower = toolName.toLowerCase();
39
+
40
+ // [IMPORTANT] Claude Code CLI 的 EnterPlanMode 工具禁止携带任何参数
41
+ if (toolName === 'EnterPlanMode') {
42
+ return {};
43
+ }
44
+
45
+ switch (toolNameLower) {
46
+ case 'grep':
47
+ case 'search':
48
+ case 'search_code_definitions':
49
+ case 'search_code_snippets':
50
+ // [FIX] Gemini hallucination: maps parameter description to "description" field
51
+ if (remappedArgs.description && !remappedArgs.pattern) {
52
+ remappedArgs.pattern = remappedArgs.description;
53
+ delete remappedArgs.description;
54
+ }
55
+
56
+ // Gemini uses "query", Claude Code expects "pattern"
57
+ if (remappedArgs.query && !remappedArgs.pattern) {
58
+ remappedArgs.pattern = remappedArgs.query;
59
+ delete remappedArgs.query;
60
+ }
61
+
62
+ // [CRITICAL FIX] Claude Code uses "path" (string), NOT "paths" (array)!
63
+ if (!remappedArgs.path) {
64
+ if (remappedArgs.paths) {
65
+ if (Array.isArray(remappedArgs.paths)) {
66
+ remappedArgs.path = remappedArgs.paths[0] || '.';
67
+ } else if (typeof remappedArgs.paths === 'string') {
68
+ remappedArgs.path = remappedArgs.paths;
69
+ } else {
70
+ remappedArgs.path = '.';
71
+ }
72
+ delete remappedArgs.paths;
73
+ } else {
74
+ // Default to current directory if missing
75
+ remappedArgs.path = '.';
76
+ }
77
+ }
78
+ // Note: We keep "-n" and "output_mode" if present as they are valid in Grep schema
79
+ break;
80
+
81
+ case 'glob':
82
+ // [FIX] Gemini hallucination: maps parameter description to "description" field
83
+ if (remappedArgs.description && !remappedArgs.pattern) {
84
+ remappedArgs.pattern = remappedArgs.description;
85
+ delete remappedArgs.description;
86
+ }
87
+
88
+ // Gemini uses "query", Claude Code expects "pattern"
89
+ if (remappedArgs.query && !remappedArgs.pattern) {
90
+ remappedArgs.pattern = remappedArgs.query;
91
+ delete remappedArgs.query;
92
+ }
93
+
94
+ // [CRITICAL FIX] Claude Code uses "path" (string), NOT "paths" (array)!
95
+ // [NOTE] 与 grep 不同,glob 不添加默认 path(参考 Rust 代码)
96
+ if (!remappedArgs.path) {
97
+ if (remappedArgs.paths) {
98
+ if (Array.isArray(remappedArgs.paths)) {
99
+ remappedArgs.path = remappedArgs.paths[0] || '.';
100
+ } else if (typeof remappedArgs.paths === 'string') {
101
+ remappedArgs.path = remappedArgs.paths;
102
+ } else {
103
+ remappedArgs.path = '.';
104
+ }
105
+ delete remappedArgs.paths;
106
+ }
107
+ // [FIX] glob 不添加默认 path,与 Rust 代码保持一致
108
+ }
109
+ break;
110
+
111
+ case 'read':
112
+ // Gemini might use "path" vs "file_path"
113
+ if (remappedArgs.path && !remappedArgs.file_path) {
114
+ remappedArgs.file_path = remappedArgs.path;
115
+ delete remappedArgs.path;
116
+ }
117
+ break;
118
+
119
+ case 'ls':
120
+ // LS tool: ensure "path" parameter exists
121
+ if (!remappedArgs.path) {
122
+ remappedArgs.path = '.';
123
+ }
124
+ break;
125
+
126
+ default:
127
+ // [NEW] [Issue #785] Generic Property Mapping for all tools
128
+ // If a tool has "paths" (array of 1) but no "path", convert it.
129
+ // [FIX] 与 Rust 代码保持一致:只在 paths.length === 1 时转换,不删除原始 paths
130
+ if (!remappedArgs.path && remappedArgs.paths) {
131
+ if (Array.isArray(remappedArgs.paths) && remappedArgs.paths.length === 1) {
132
+ const pathValue = remappedArgs.paths[0];
133
+ if (typeof pathValue === 'string') {
134
+ remappedArgs.path = pathValue;
135
+ // [FIX] Rust 代码中不删除 paths,这里也不删除
136
+ }
137
+ }
138
+ }
139
+ break;
140
+ }
141
+
142
+ return remappedArgs;
143
+ }
144
+
145
+ /**
146
+ * [FIX] 规范化工具名称
147
+ * Gemini 有时会返回 "search" 而不是 "Grep"
148
+ */
149
+ function normalizeToolName(name) {
150
+ if (!name) return name;
151
+
152
+ const nameLower = name.toLowerCase();
153
+ if (nameLower === 'search') {
154
+ return 'Grep';
155
+ }
156
+ return name;
157
+ }
158
+
159
+ /**
160
+ * Gemini转换器类
161
+ * 实现Gemini协议到其他协议的转换
162
+ */
163
+ export class GeminiConverter extends BaseConverter {
164
+ constructor() {
165
+ super('gemini');
166
+ }
167
+
168
+ /**
169
+ * 转换请求
170
+ */
171
+ convertRequest(data, targetProtocol) {
172
+ switch (targetProtocol) {
173
+ case MODEL_PROTOCOL_PREFIX.OPENAI:
174
+ return this.toOpenAIRequest(data);
175
+ case MODEL_PROTOCOL_PREFIX.CLAUDE:
176
+ return this.toClaudeRequest(data);
177
+ case MODEL_PROTOCOL_PREFIX.OPENAI_RESPONSES:
178
+ return this.toOpenAIResponsesRequest(data);
179
+ default:
180
+ throw new Error(`Unsupported target protocol: ${targetProtocol}`);
181
+ }
182
+ }
183
+
184
+ /**
185
+ * 转换响应
186
+ */
187
+ convertResponse(data, targetProtocol, model) {
188
+ switch (targetProtocol) {
189
+ case MODEL_PROTOCOL_PREFIX.OPENAI:
190
+ return this.toOpenAIResponse(data, model);
191
+ case MODEL_PROTOCOL_PREFIX.CLAUDE:
192
+ return this.toClaudeResponse(data, model);
193
+ case MODEL_PROTOCOL_PREFIX.OPENAI_RESPONSES:
194
+ return this.toOpenAIResponsesResponse(data, model);
195
+ default:
196
+ throw new Error(`Unsupported target protocol: ${targetProtocol}`);
197
+ }
198
+ }
199
+
200
+ /**
201
+ * 转换流式响应块
202
+ */
203
+ convertStreamChunk(chunk, targetProtocol, model) {
204
+ switch (targetProtocol) {
205
+ case MODEL_PROTOCOL_PREFIX.OPENAI:
206
+ return this.toOpenAIStreamChunk(chunk, model);
207
+ case MODEL_PROTOCOL_PREFIX.CLAUDE:
208
+ return this.toClaudeStreamChunk(chunk, model);
209
+ case MODEL_PROTOCOL_PREFIX.OPENAI_RESPONSES:
210
+ return this.toOpenAIResponsesStreamChunk(chunk, model);
211
+ default:
212
+ throw new Error(`Unsupported target protocol: ${targetProtocol}`);
213
+ }
214
+ }
215
+
216
+ /**
217
+ * 转换模型列表
218
+ */
219
+ convertModelList(data, targetProtocol) {
220
+ switch (targetProtocol) {
221
+ case MODEL_PROTOCOL_PREFIX.OPENAI:
222
+ return this.toOpenAIModelList(data);
223
+ case MODEL_PROTOCOL_PREFIX.CLAUDE:
224
+ return this.toClaudeModelList(data);
225
+ default:
226
+ return data;
227
+ }
228
+ }
229
+
230
+ // =========================================================================
231
+ // Gemini -> OpenAI 转换
232
+ // =========================================================================
233
+
234
+ /**
235
+ * Gemini请求 -> OpenAI请求
236
+ */
237
+ toOpenAIRequest(geminiRequest) {
238
+ const openaiRequest = {
239
+ messages: [],
240
+ model: geminiRequest.model,
241
+ max_tokens: checkAndAssignOrDefault(geminiRequest.max_tokens, OPENAI_DEFAULT_MAX_TOKENS),
242
+ temperature: checkAndAssignOrDefault(geminiRequest.temperature, OPENAI_DEFAULT_TEMPERATURE),
243
+ top_p: checkAndAssignOrDefault(geminiRequest.top_p, OPENAI_DEFAULT_TOP_P),
244
+ };
245
+
246
+ // 处理系统指令
247
+ if (geminiRequest.systemInstruction && Array.isArray(geminiRequest.systemInstruction.parts)) {
248
+ const systemContent = this.processGeminiPartsToOpenAIContent(geminiRequest.systemInstruction.parts);
249
+ if (systemContent) {
250
+ openaiRequest.messages.push({
251
+ role: 'system',
252
+ content: systemContent
253
+ });
254
+ }
255
+ }
256
+
257
+ // 处理内容
258
+ if (geminiRequest.contents && Array.isArray(geminiRequest.contents)) {
259
+ geminiRequest.contents.forEach(content => {
260
+ if (content && Array.isArray(content.parts)) {
261
+ const openaiContent = this.processGeminiPartsToOpenAIContent(content.parts);
262
+ if (openaiContent && openaiContent.length > 0) {
263
+ const openaiRole = content.role === 'model' ? 'assistant' : content.role;
264
+ openaiRequest.messages.push({
265
+ role: openaiRole,
266
+ content: openaiContent
267
+ });
268
+ }
269
+ }
270
+ });
271
+ }
272
+
273
+ return openaiRequest;
274
+ }
275
+
276
+ /**
277
+ * Gemini响应 -> OpenAI响应
278
+ */
279
+ toOpenAIResponse(geminiResponse, model) {
280
+ const content = this.processGeminiResponseContent(geminiResponse);
281
+
282
+ return {
283
+ id: `chatcmpl-${uuidv4()}`,
284
+ object: "chat.completion",
285
+ created: Math.floor(Date.now() / 1000),
286
+ model: model,
287
+ choices: [{
288
+ index: 0,
289
+ message: {
290
+ role: "assistant",
291
+ content: content
292
+ },
293
+ finish_reason: "stop",
294
+ }],
295
+ usage: geminiResponse.usageMetadata ? {
296
+ prompt_tokens: geminiResponse.usageMetadata.promptTokenCount || 0,
297
+ completion_tokens: geminiResponse.usageMetadata.candidatesTokenCount || 0,
298
+ total_tokens: geminiResponse.usageMetadata.totalTokenCount || 0,
299
+ cached_tokens: geminiResponse.usageMetadata.cachedContentTokenCount || 0,
300
+ prompt_tokens_details: {
301
+ cached_tokens: geminiResponse.usageMetadata.cachedContentTokenCount || 0
302
+ },
303
+ completion_tokens_details: {
304
+ reasoning_tokens: geminiResponse.usageMetadata.thoughtsTokenCount || 0
305
+ }
306
+ } : {
307
+ prompt_tokens: 0,
308
+ completion_tokens: 0,
309
+ total_tokens: 0,
310
+ cached_tokens: 0,
311
+ prompt_tokens_details: {
312
+ cached_tokens: 0
313
+ },
314
+ completion_tokens_details: {
315
+ reasoning_tokens: 0
316
+ }
317
+ },
318
+ };
319
+ }
320
+
321
+ /**
322
+ * Gemini流式响应 -> OpenAI流式响应
323
+ */
324
+ toOpenAIStreamChunk(geminiChunk, model) {
325
+ if (!geminiChunk) return null;
326
+
327
+ const candidate = geminiChunk.candidates?.[0];
328
+ if (!candidate) return null;
329
+
330
+ let content = '';
331
+ const toolCalls = [];
332
+
333
+ // 从parts中提取文本和tool calls
334
+ const parts = candidate.content?.parts;
335
+ if (parts && Array.isArray(parts)) {
336
+ for (const part of parts) {
337
+ if (part.text) {
338
+ content += part.text;
339
+ }
340
+ if (part.functionCall) {
341
+ toolCalls.push({
342
+ index: toolCalls.length,
343
+ id: part.functionCall.id || `call_${uuidv4()}`,
344
+ type: 'function',
345
+ function: {
346
+ name: part.functionCall.name,
347
+ arguments: typeof part.functionCall.args === 'string'
348
+ ? part.functionCall.args
349
+ : JSON.stringify(part.functionCall.args)
350
+ }
351
+ });
352
+ }
353
+ // thoughtSignature is ignored (internal Gemini data)
354
+ }
355
+ }
356
+
357
+ // 处理finishReason
358
+ let finishReason = null;
359
+ if (candidate.finishReason) {
360
+ finishReason = candidate.finishReason === 'STOP' ? 'stop' :
361
+ candidate.finishReason === 'MAX_TOKENS' ? 'length' :
362
+ candidate.finishReason.toLowerCase();
363
+ }
364
+
365
+ // 如果包含工具调用,且完成原因为 stop,则将完成原因修改为 tool_calls
366
+ if (toolCalls.length > 0 && finishReason === 'stop') {
367
+ finishReason = 'tool_calls';
368
+ }
369
+
370
+ // 构建delta对象
371
+ const delta = {};
372
+ if (content) delta.content = content;
373
+ if (toolCalls.length > 0) delta.tool_calls = toolCalls;
374
+
375
+ // Don't return empty delta chunks
376
+ if (Object.keys(delta).length === 0 && !finishReason) {
377
+ return null;
378
+ }
379
+
380
+ return {
381
+ id: `chatcmpl-${uuidv4()}`,
382
+ object: "chat.completion.chunk",
383
+ created: Math.floor(Date.now() / 1000),
384
+ model: model,
385
+ choices: [{
386
+ index: 0,
387
+ delta: delta,
388
+ finish_reason: finishReason,
389
+ }],
390
+ usage: geminiChunk.usageMetadata ? {
391
+ prompt_tokens: geminiChunk.usageMetadata.promptTokenCount || 0,
392
+ completion_tokens: geminiChunk.usageMetadata.candidatesTokenCount || 0,
393
+ total_tokens: geminiChunk.usageMetadata.totalTokenCount || 0,
394
+ cached_tokens: geminiChunk.usageMetadata.cachedContentTokenCount || 0,
395
+ prompt_tokens_details: {
396
+ cached_tokens: geminiChunk.usageMetadata.cachedContentTokenCount || 0
397
+ },
398
+ completion_tokens_details: {
399
+ reasoning_tokens: geminiChunk.usageMetadata.thoughtsTokenCount || 0
400
+ }
401
+ } : {
402
+ prompt_tokens: 0,
403
+ completion_tokens: 0,
404
+ total_tokens: 0,
405
+ cached_tokens: 0,
406
+ prompt_tokens_details: {
407
+ cached_tokens: 0
408
+ },
409
+ completion_tokens_details: {
410
+ reasoning_tokens: 0
411
+ }
412
+ },
413
+ };
414
+ }
415
+
416
+ /**
417
+ * Gemini模型列表 -> OpenAI模型列表
418
+ */
419
+ toOpenAIModelList(geminiModels) {
420
+ return {
421
+ object: "list",
422
+ data: geminiModels.models.map(m => {
423
+ const modelId = m.name.startsWith('models/') ? m.name.substring(7) : m.name;
424
+ return {
425
+ id: modelId,
426
+ object: "model",
427
+ created: Math.floor(Date.now() / 1000),
428
+ owned_by: "google",
429
+ display_name: m.displayName || modelId,
430
+ };
431
+ }),
432
+ };
433
+ }
434
+
435
+ /**
436
+ * 处理Gemini parts到OpenAI内容
437
+ */
438
+ processGeminiPartsToOpenAIContent(parts) {
439
+ if (!parts || !Array.isArray(parts)) return '';
440
+
441
+ const contentArray = [];
442
+
443
+ parts.forEach(part => {
444
+ if (!part) return;
445
+
446
+ if (typeof part.text === 'string') {
447
+ contentArray.push({
448
+ type: 'text',
449
+ text: part.text
450
+ });
451
+ }
452
+
453
+ if (part.inlineData) {
454
+ const { mimeType, data } = part.inlineData;
455
+ if (mimeType && data) {
456
+ contentArray.push({
457
+ type: 'image_url',
458
+ image_url: {
459
+ url: `data:${mimeType};base64,${data}`
460
+ }
461
+ });
462
+ }
463
+ }
464
+
465
+ if (part.fileData) {
466
+ const { mimeType, fileUri } = part.fileData;
467
+ if (mimeType && fileUri) {
468
+ if (mimeType.startsWith('image/')) {
469
+ contentArray.push({
470
+ type: 'image_url',
471
+ image_url: {
472
+ url: fileUri
473
+ }
474
+ });
475
+ } else if (mimeType.startsWith('audio/')) {
476
+ contentArray.push({
477
+ type: 'text',
478
+ text: `[Audio file: ${fileUri}]`
479
+ });
480
+ }
481
+ }
482
+ }
483
+ });
484
+
485
+ return contentArray.length === 1 && contentArray[0].type === 'text'
486
+ ? contentArray[0].text
487
+ : contentArray;
488
+ }
489
+
490
+ /**
491
+ * 处理Gemini响应内容
492
+ */
493
+ processGeminiResponseContent(geminiResponse) {
494
+ if (!geminiResponse || !geminiResponse.candidates) return '';
495
+
496
+ const contents = [];
497
+
498
+ geminiResponse.candidates.forEach(candidate => {
499
+ if (candidate.content && candidate.content.parts) {
500
+ candidate.content.parts.forEach(part => {
501
+ if (part.text) {
502
+ contents.push(part.text);
503
+ }
504
+ });
505
+ }
506
+ });
507
+
508
+ return contents.join('\n');
509
+ }
510
+
511
+ // =========================================================================
512
+ // Gemini -> Claude 转换
513
+ // =========================================================================
514
+
515
+ /**
516
+ * Gemini请求 -> Claude请求
517
+ */
518
+ toClaudeRequest(geminiRequest) {
519
+ const claudeRequest = {
520
+ model: geminiRequest.model || 'claude-3-opus',
521
+ messages: [],
522
+ max_tokens: checkAndAssignOrDefault(geminiRequest.generationConfig?.maxOutputTokens, CLAUDE_DEFAULT_MAX_TOKENS),
523
+ temperature: checkAndAssignOrDefault(geminiRequest.generationConfig?.temperature, CLAUDE_DEFAULT_TEMPERATURE),
524
+ top_p: checkAndAssignOrDefault(geminiRequest.generationConfig?.topP, CLAUDE_DEFAULT_TOP_P),
525
+ };
526
+
527
+ // 处理系统指令
528
+ if (geminiRequest.systemInstruction && geminiRequest.systemInstruction.parts) {
529
+ const systemText = geminiRequest.systemInstruction.parts
530
+ .filter(p => p.text)
531
+ .map(p => p.text)
532
+ .join('\n');
533
+ if (systemText) {
534
+ claudeRequest.system = systemText;
535
+ }
536
+ }
537
+
538
+ // 处理内容
539
+ if (geminiRequest.contents && Array.isArray(geminiRequest.contents)) {
540
+ geminiRequest.contents.forEach(content => {
541
+ if (!content || !content.parts) return;
542
+
543
+ const role = content.role === 'model' ? 'assistant' : 'user';
544
+ const claudeContent = this.processGeminiPartsToClaudeContent(content.parts);
545
+
546
+ if (claudeContent.length > 0) {
547
+ claudeRequest.messages.push({
548
+ role: role,
549
+ content: claudeContent
550
+ });
551
+ }
552
+ });
553
+ }
554
+
555
+ // 处理工具
556
+ if (geminiRequest.tools && geminiRequest.tools[0]?.functionDeclarations) {
557
+ claudeRequest.tools = geminiRequest.tools[0].functionDeclarations.map(func => ({
558
+ name: func.name,
559
+ description: func.description || '',
560
+ input_schema: func.parameters || { type: 'object', properties: {} }
561
+ }));
562
+ }
563
+
564
+ return claudeRequest;
565
+ }
566
+
567
+ /**
568
+ * Gemini响应 -> Claude响应
569
+ */
570
+ toClaudeResponse(geminiResponse, model) {
571
+ if (!geminiResponse || !geminiResponse.candidates || geminiResponse.candidates.length === 0) {
572
+ return {
573
+ id: `msg_${uuidv4()}`,
574
+ type: "message",
575
+ role: "assistant",
576
+ content: [],
577
+ model: model,
578
+ stop_reason: "end_turn",
579
+ stop_sequence: null,
580
+ usage: {
581
+ input_tokens: geminiResponse?.usageMetadata?.promptTokenCount || 0,
582
+ output_tokens: geminiResponse?.usageMetadata?.candidatesTokenCount || 0
583
+ }
584
+ };
585
+ }
586
+
587
+ const candidate = geminiResponse.candidates[0];
588
+ const { content, hasToolUse } = this.processGeminiResponseToClaudeContent(geminiResponse);
589
+ const finishReason = candidate.finishReason;
590
+ let stopReason = "end_turn";
591
+
592
+ // [FIX] 参考 ag/response.rs - 如果有工具调用,stop_reason 应该是 "tool_use"
593
+ if (hasToolUse) {
594
+ stopReason = 'tool_use';
595
+ } else if (finishReason) {
596
+ switch (finishReason) {
597
+ case 'STOP':
598
+ stopReason = 'end_turn';
599
+ break;
600
+ case 'MAX_TOKENS':
601
+ stopReason = 'max_tokens';
602
+ break;
603
+ case 'SAFETY':
604
+ stopReason = 'safety';
605
+ break;
606
+ case 'RECITATION':
607
+ stopReason = 'recitation';
608
+ break;
609
+ case 'OTHER':
610
+ stopReason = 'other';
611
+ break;
612
+ default:
613
+ stopReason = 'end_turn';
614
+ }
615
+ }
616
+
617
+ return {
618
+ id: `msg_${uuidv4()}`,
619
+ type: "message",
620
+ role: "assistant",
621
+ content: content,
622
+ model: model,
623
+ stop_reason: stopReason,
624
+ stop_sequence: null,
625
+ usage: {
626
+ input_tokens: geminiResponse.usageMetadata?.promptTokenCount || 0,
627
+ cache_creation_input_tokens: 0,
628
+ cache_read_input_tokens: geminiResponse.usageMetadata?.cachedContentTokenCount || 0,
629
+ output_tokens: geminiResponse.usageMetadata?.candidatesTokenCount || 0
630
+ }
631
+ };
632
+ }
633
+
634
+ /**
635
+ * Gemini流式响应 -> Claude流式响应
636
+ */
637
+ toClaudeStreamChunk(geminiChunk, model) {
638
+ if (!geminiChunk) return null;
639
+
640
+ // 处理完整的Gemini chunk对象
641
+ if (typeof geminiChunk === 'object' && !Array.isArray(geminiChunk)) {
642
+ const candidate = geminiChunk.candidates?.[0];
643
+
644
+ if (candidate) {
645
+ const parts = candidate.content?.parts;
646
+
647
+ // [FIX] 参考 ag/streaming.rs 处理 thinking 和 text 块
648
+ if (parts && Array.isArray(parts)) {
649
+ const results = [];
650
+ let hasToolUse = false;
651
+
652
+ for (const part of parts) {
653
+ if (!part) continue;
654
+
655
+ if (typeof part.text === 'string') {
656
+ if (part.thought === true) {
657
+ // [FIX] 这是一个 thinking 块
658
+ const thinkingResult = {
659
+ type: "content_block_delta",
660
+ index: 0,
661
+ delta: {
662
+ type: "thinking_delta",
663
+ thinking: part.text
664
+ }
665
+ };
666
+ results.push(thinkingResult);
667
+
668
+ // 如果有签名,发送 signature_delta
669
+ // [FIX] 同时检查 thoughtSignature 和 thought_signature
670
+ const rawSignature = part.thoughtSignature || part.thought_signature;
671
+ if (rawSignature) {
672
+ let signature = rawSignature;
673
+ try {
674
+ const decoded = Buffer.from(signature, 'base64').toString('utf-8');
675
+ if (decoded && decoded.length > 0 && !decoded.includes('\ufffd')) {
676
+ signature = decoded;
677
+ }
678
+ } catch (e) {
679
+ // 解码失败,保持原样
680
+ }
681
+ results.push({
682
+ type: "content_block_delta",
683
+ index: 0,
684
+ delta: {
685
+ type: "signature_delta",
686
+ signature: signature
687
+ }
688
+ });
689
+ }
690
+ } else {
691
+ // 普通文本
692
+ results.push({
693
+ type: "content_block_delta",
694
+ index: 0,
695
+ delta: {
696
+ type: "text_delta",
697
+ text: part.text
698
+ }
699
+ });
700
+ }
701
+ }
702
+
703
+ // [FIX] 处理 functionCall
704
+ if (part.functionCall) {
705
+ hasToolUse = true;
706
+ // [FIX] 规范化工具名称和参数映射
707
+ const toolName = normalizeToolName(part.functionCall.name);
708
+ const remappedArgs = remapFunctionCallArgs(toolName, part.functionCall.args || {});
709
+
710
+ // 发送 tool_use 开始
711
+ const toolId = part.functionCall.id || `${toolName}-${uuidv4().split('-')[0]}`;
712
+ results.push({
713
+ type: "content_block_start",
714
+ index: 0,
715
+ content_block: {
716
+ type: "tool_use",
717
+ id: toolId,
718
+ name: toolName,
719
+ input: {}
720
+ }
721
+ });
722
+ // 发送参数
723
+ results.push({
724
+ type: "content_block_delta",
725
+ index: 0,
726
+ delta: {
727
+ type: "input_json_delta",
728
+ partial_json: JSON.stringify(remappedArgs)
729
+ }
730
+ });
731
+ }
732
+ }
733
+
734
+ // [FIX] 如果有工具调用,添加 message_delta 事件设置 stop_reason 为 tool_use
735
+ if (hasToolUse && candidate.finishReason) {
736
+ const messageDelta = {
737
+ type: "message_delta",
738
+ delta: {
739
+ stop_reason: 'tool_use'
740
+ }
741
+ };
742
+ if (geminiChunk.usageMetadata) {
743
+ messageDelta.usage = {
744
+ input_tokens: geminiChunk.usageMetadata.promptTokenCount || 0,
745
+ cache_creation_input_tokens: 0,
746
+ cache_read_input_tokens: geminiChunk.usageMetadata.cachedContentTokenCount || 0,
747
+ output_tokens: geminiChunk.usageMetadata.candidatesTokenCount || 0
748
+ };
749
+ }
750
+ results.push(messageDelta);
751
+ }
752
+
753
+ // 如果有多个结果,返回数组;否则返回单个或 null
754
+ if (results.length > 1) {
755
+ return results;
756
+ } else if (results.length === 1) {
757
+ return results[0];
758
+ }
759
+ }
760
+
761
+ // 处理finishReason
762
+ if (candidate.finishReason) {
763
+ const result = {
764
+ type: "message_delta",
765
+ delta: {
766
+ stop_reason: candidate.finishReason === 'STOP' ? 'end_turn' :
767
+ candidate.finishReason === 'MAX_TOKENS' ? 'max_tokens' :
768
+ candidate.finishReason.toLowerCase()
769
+ }
770
+ };
771
+
772
+ // 添加 usage 信息
773
+ if (geminiChunk.usageMetadata) {
774
+ result.usage = {
775
+ input_tokens: geminiChunk.usageMetadata.promptTokenCount || 0,
776
+ cache_creation_input_tokens: 0,
777
+ cache_read_input_tokens: geminiChunk.usageMetadata.cachedContentTokenCount || 0,
778
+ output_tokens: geminiChunk.usageMetadata.candidatesTokenCount || 0,
779
+ prompt_tokens: geminiChunk.usageMetadata.promptTokenCount || 0,
780
+ completion_tokens: geminiChunk.usageMetadata.candidatesTokenCount || 0,
781
+ total_tokens: geminiChunk.usageMetadata.totalTokenCount || 0,
782
+ cached_tokens: geminiChunk.usageMetadata.cachedContentTokenCount || 0
783
+ };
784
+ }
785
+
786
+ return result;
787
+ }
788
+ }
789
+ }
790
+
791
+ // 向后兼容:处理字符串格式
792
+ if (typeof geminiChunk === 'string') {
793
+ return {
794
+ type: "content_block_delta",
795
+ index: 0,
796
+ delta: {
797
+ type: "text_delta",
798
+ text: geminiChunk
799
+ }
800
+ };
801
+ }
802
+
803
+ return null;
804
+ }
805
+
806
+ /**
807
+ * Gemini模型列表 -> Claude模型列表
808
+ */
809
+ toClaudeModelList(geminiModels) {
810
+ return {
811
+ models: geminiModels.models.map(m => ({
812
+ name: m.name.startsWith('models/') ? m.name.substring(7) : m.name,
813
+ description: "",
814
+ })),
815
+ };
816
+ }
817
+
818
+ /**
819
+ * 处理Gemini parts到Claude内容
820
+ */
821
+ processGeminiPartsToClaudeContent(parts) {
822
+ if (!parts || !Array.isArray(parts)) return [];
823
+
824
+ const content = [];
825
+
826
+ parts.forEach(part => {
827
+ if (!part) return;
828
+
829
+ // [FIX] 参考 ag/response.rs 处理 thinking 块
830
+ // Gemini 使用 thought: true 和 thoughtSignature 表示思考内容
831
+ // [FIX] 同时支持 thoughtSignature 和 thought_signature(Gemini CLI 可能使用下划线格式)
832
+ if (part.text) {
833
+ if (part.thought === true) {
834
+ // 这是一个 thinking 块
835
+ const thinkingBlock = {
836
+ type: 'thinking',
837
+ thinking: part.text
838
+ };
839
+ // 处理签名 - 可能是 Base64 编码的
840
+ // [FIX] 同时检查 thoughtSignature 和 thought_signature
841
+ const rawSignature = part.thoughtSignature || part.thought_signature;
842
+ if (rawSignature) {
843
+ let signature = rawSignature;
844
+ // 尝试 Base64 解码
845
+ try {
846
+ const decoded = Buffer.from(signature, 'base64').toString('utf-8');
847
+ // 检查解码后是否是有效的 UTF-8 字符串
848
+ if (decoded && decoded.length > 0 && !decoded.includes('\ufffd')) {
849
+ signature = decoded;
850
+ }
851
+ } catch (e) {
852
+ // 解码失败,保持原样
853
+ }
854
+ thinkingBlock.signature = signature;
855
+ }
856
+ content.push(thinkingBlock);
857
+ } else {
858
+ // 普通文本
859
+ content.push({
860
+ type: 'text',
861
+ text: part.text
862
+ });
863
+ }
864
+ }
865
+
866
+ if (part.inlineData) {
867
+ content.push({
868
+ type: 'image',
869
+ source: {
870
+ type: 'base64',
871
+ media_type: part.inlineData.mimeType,
872
+ data: part.inlineData.data
873
+ }
874
+ });
875
+ }
876
+
877
+ if (part.functionCall) {
878
+ // [FIX] 规范化工具名称和参数映射
879
+ const toolName = normalizeToolName(part.functionCall.name);
880
+ const remappedArgs = remapFunctionCallArgs(toolName, part.functionCall.args || {});
881
+
882
+ // [FIX] 使用 Gemini 提供的 id,如果没有则生成
883
+ const toolUseBlock = {
884
+ type: 'tool_use',
885
+ id: part.functionCall.id || `${toolName}-${uuidv4().split('-')[0]}`,
886
+ name: toolName,
887
+ input: remappedArgs
888
+ };
889
+ // [FIX] 如果有签名,添加到 tool_use 块
890
+ // [FIX] 同时检查 thoughtSignature 和 thought_signature
891
+ const rawSignature = part.thoughtSignature || part.thought_signature;
892
+ if (rawSignature) {
893
+ let signature = rawSignature;
894
+ try {
895
+ const decoded = Buffer.from(signature, 'base64').toString('utf-8');
896
+ if (decoded && decoded.length > 0 && !decoded.includes('\ufffd')) {
897
+ signature = decoded;
898
+ }
899
+ } catch (e) {
900
+ // 解码失败,保持原样
901
+ }
902
+ toolUseBlock.signature = signature;
903
+ }
904
+ content.push(toolUseBlock);
905
+ }
906
+
907
+ if (part.functionResponse) {
908
+ // [FIX] 正确处理 functionResponse
909
+ let responseContent = part.functionResponse.response;
910
+ // 如果 response 是对象且有 result 字段,提取它
911
+ if (responseContent && typeof responseContent === 'object' && responseContent.result !== undefined) {
912
+ responseContent = responseContent.result;
913
+ }
914
+ content.push({
915
+ type: 'tool_result',
916
+ tool_use_id: part.functionResponse.name,
917
+ content: typeof responseContent === 'string' ? responseContent : JSON.stringify(responseContent)
918
+ });
919
+ }
920
+ });
921
+
922
+ return content;
923
+ }
924
+
925
+ /**
926
+ * 处理Gemini响应到Claude内容
927
+ * @returns {{ content: Array, hasToolUse: boolean }}
928
+ */
929
+ processGeminiResponseToClaudeContent(geminiResponse) {
930
+ if (!geminiResponse || !geminiResponse.candidates || geminiResponse.candidates.length === 0) {
931
+ return { content: [], hasToolUse: false };
932
+ }
933
+
934
+ const content = [];
935
+ let hasToolUse = false;
936
+
937
+ for (const candidate of geminiResponse.candidates) {
938
+ if (candidate.finishReason && candidate.finishReason !== 'STOP') {
939
+ if (candidate.finishMessage) {
940
+ content.push({
941
+ type: 'text',
942
+ text: `Error: ${candidate.finishMessage}`
943
+ });
944
+ }
945
+ continue;
946
+ }
947
+
948
+ if (candidate.content && candidate.content.parts) {
949
+ for (const part of candidate.content.parts) {
950
+ // [FIX] 参考 ag/response.rs 处理 thinking 块
951
+ if (part.text) {
952
+ if (part.thought === true) {
953
+ // 这是一个 thinking 块
954
+ const thinkingBlock = {
955
+ type: 'thinking',
956
+ thinking: part.text
957
+ };
958
+ // 处理签名
959
+ // [FIX] 同时检查 thoughtSignature 和 thought_signature
960
+ const rawSignature = part.thoughtSignature || part.thought_signature;
961
+ if (rawSignature) {
962
+ let signature = rawSignature;
963
+ try {
964
+ const decoded = Buffer.from(signature, 'base64').toString('utf-8');
965
+ if (decoded && decoded.length > 0 && !decoded.includes('\ufffd')) {
966
+ signature = decoded;
967
+ }
968
+ } catch (e) {
969
+ // 解码失败,保持原样
970
+ }
971
+ thinkingBlock.signature = signature;
972
+ }
973
+ content.push(thinkingBlock);
974
+ } else {
975
+ // 普通文本
976
+ content.push({
977
+ type: 'text',
978
+ text: part.text
979
+ });
980
+ }
981
+ } else if (part.inlineData) {
982
+ content.push({
983
+ type: 'image',
984
+ source: {
985
+ type: 'base64',
986
+ media_type: part.inlineData.mimeType,
987
+ data: part.inlineData.data
988
+ }
989
+ });
990
+ } else if (part.functionCall) {
991
+ hasToolUse = true;
992
+ // [FIX] 规范化工具名称和参数映射
993
+ const toolName = normalizeToolName(part.functionCall.name);
994
+ const remappedArgs = remapFunctionCallArgs(toolName, part.functionCall.args || {});
995
+
996
+ // [FIX] 使用 Gemini 提供的 id
997
+ const toolUseBlock = {
998
+ type: 'tool_use',
999
+ id: part.functionCall.id || `${toolName}-${uuidv4().split('-')[0]}`,
1000
+ name: toolName,
1001
+ input: remappedArgs
1002
+ };
1003
+ // 添加签名(如果存在)
1004
+ // [FIX] 同时检查 thoughtSignature 和 thought_signature
1005
+ const rawSignature = part.thoughtSignature || part.thought_signature;
1006
+ if (rawSignature) {
1007
+ let signature = rawSignature;
1008
+ try {
1009
+ const decoded = Buffer.from(signature, 'base64').toString('utf-8');
1010
+ if (decoded && decoded.length > 0 && !decoded.includes('\ufffd')) {
1011
+ signature = decoded;
1012
+ }
1013
+ } catch (e) {
1014
+ // 解码失败,保持原样
1015
+ }
1016
+ toolUseBlock.signature = signature;
1017
+ }
1018
+ content.push(toolUseBlock);
1019
+ }
1020
+ }
1021
+ }
1022
+ }
1023
+
1024
+ return { content, hasToolUse };
1025
+ }
1026
+
1027
+ // =========================================================================
1028
+ // Gemini -> OpenAI Responses 转换
1029
+ // =========================================================================
1030
+
1031
+ /**
1032
+ * Gemini请求 -> OpenAI Responses请求
1033
+ */
1034
+ toOpenAIResponsesRequest(geminiRequest) {
1035
+ const responsesRequest = {
1036
+ model: geminiRequest.model,
1037
+ max_tokens: checkAndAssignOrDefault(geminiRequest.generationConfig?.maxOutputTokens, OPENAI_DEFAULT_MAX_TOKENS),
1038
+ temperature: checkAndAssignOrDefault(geminiRequest.generationConfig?.temperature, OPENAI_DEFAULT_TEMPERATURE),
1039
+ top_p: checkAndAssignOrDefault(geminiRequest.generationConfig?.topP, OPENAI_DEFAULT_TOP_P),
1040
+ };
1041
+
1042
+ // 处理系统指令
1043
+ if (geminiRequest.systemInstruction && geminiRequest.systemInstruction.parts) {
1044
+ const instructionsText = geminiRequest.systemInstruction.parts
1045
+ .filter(p => p.text)
1046
+ .map(p => p.text)
1047
+ .join('\n');
1048
+ if (instructionsText) {
1049
+ responsesRequest.instructions = instructionsText;
1050
+ }
1051
+ }
1052
+
1053
+ // 处理输入
1054
+ if (geminiRequest.contents && Array.isArray(geminiRequest.contents)) {
1055
+ const lastContent = geminiRequest.contents[geminiRequest.contents.length - 1];
1056
+ if (lastContent && lastContent.parts) {
1057
+ const inputText = lastContent.parts
1058
+ .filter(p => p.text)
1059
+ .map(p => p.text)
1060
+ .join(' ');
1061
+ if (inputText) {
1062
+ responsesRequest.input = inputText;
1063
+ }
1064
+ }
1065
+ }
1066
+
1067
+ return responsesRequest;
1068
+ }
1069
+
1070
+ /**
1071
+ * Gemini响应 -> OpenAI Responses响应
1072
+ */
1073
+ toOpenAIResponsesResponse(geminiResponse, model) {
1074
+ const content = this.processGeminiResponseContent(geminiResponse);
1075
+ const textContent = typeof content === 'string' ? content : JSON.stringify(content);
1076
+
1077
+ let output = [];
1078
+ output.push({
1079
+ id: `msg_${uuidv4().replace(/-/g, '')}`,
1080
+ summary: [],
1081
+ type: "message",
1082
+ role: "assistant",
1083
+ status: "completed",
1084
+ content: [{
1085
+ annotations: [],
1086
+ logprobs: [],
1087
+ text: textContent,
1088
+ type: "output_text"
1089
+ }]
1090
+ });
1091
+
1092
+ return {
1093
+ background: false,
1094
+ created_at: Math.floor(Date.now() / 1000),
1095
+ error: null,
1096
+ id: `resp_${uuidv4().replace(/-/g, '')}`,
1097
+ incomplete_details: null,
1098
+ max_output_tokens: null,
1099
+ max_tool_calls: null,
1100
+ metadata: {},
1101
+ model: model,
1102
+ object: "response",
1103
+ output: output,
1104
+ parallel_tool_calls: true,
1105
+ previous_response_id: null,
1106
+ prompt_cache_key: null,
1107
+ reasoning: {},
1108
+ safety_identifier: "user-" + uuidv4().replace(/-/g, ''),
1109
+ service_tier: "default",
1110
+ status: "completed",
1111
+ store: false,
1112
+ temperature: 1,
1113
+ text: {
1114
+ format: { type: "text" },
1115
+ },
1116
+ tool_choice: "auto",
1117
+ tools: [],
1118
+ top_logprobs: 0,
1119
+ top_p: 1,
1120
+ truncation: "disabled",
1121
+ usage: {
1122
+ input_tokens: geminiResponse.usageMetadata?.promptTokenCount || 0,
1123
+ input_tokens_details: {
1124
+ cached_tokens: geminiResponse.usageMetadata?.cachedContentTokenCount || 0
1125
+ },
1126
+ output_tokens: geminiResponse.usageMetadata?.candidatesTokenCount || 0,
1127
+ output_tokens_details: {
1128
+ reasoning_tokens: geminiResponse.usageMetadata?.thoughtsTokenCount || 0
1129
+ },
1130
+ total_tokens: geminiResponse.usageMetadata?.totalTokenCount || 0
1131
+ },
1132
+ user: null
1133
+ };
1134
+ }
1135
+
1136
+ /**
1137
+ * Gemini流式响应 -> OpenAI Responses流式响应
1138
+ */
1139
+ toOpenAIResponsesStreamChunk(geminiChunk, model, requestId = null) {
1140
+ if (!geminiChunk) return [];
1141
+
1142
+ const responseId = requestId || `resp_${uuidv4().replace(/-/g, '')}`;
1143
+ const events = [];
1144
+
1145
+ // 处理完整的Gemini chunk对象
1146
+ if (typeof geminiChunk === 'object' && !Array.isArray(geminiChunk)) {
1147
+ const candidate = geminiChunk.candidates?.[0];
1148
+
1149
+ if (candidate) {
1150
+ const parts = candidate.content?.parts;
1151
+
1152
+ // 第一个chunk - 检测是否是开始(有role)
1153
+ if (candidate.content?.role === 'model' && parts && parts.length > 0) {
1154
+ // 只在第一次有内容时发送开始事件
1155
+ const hasContent = parts.some(part => part && typeof part.text === 'string' && part.text.length > 0);
1156
+ if (hasContent) {
1157
+ events.push(
1158
+ generateResponseCreated(responseId, model || 'unknown'),
1159
+ generateResponseInProgress(responseId),
1160
+ generateOutputItemAdded(responseId),
1161
+ generateContentPartAdded(responseId)
1162
+ );
1163
+ }
1164
+ }
1165
+
1166
+ // 提取文本内容
1167
+ if (parts && Array.isArray(parts)) {
1168
+ const textParts = parts.filter(part => part && typeof part.text === 'string');
1169
+ if (textParts.length > 0) {
1170
+ const text = textParts.map(part => part.text).join('');
1171
+ events.push({
1172
+ delta: text,
1173
+ item_id: `msg_${uuidv4().replace(/-/g, '')}`,
1174
+ output_index: 0,
1175
+ sequence_number: 3,
1176
+ type: "response.output_text.delta"
1177
+ });
1178
+ }
1179
+ }
1180
+
1181
+ // 处理finishReason
1182
+ if (candidate.finishReason) {
1183
+ events.push(
1184
+ generateOutputTextDone(responseId),
1185
+ generateContentPartDone(responseId),
1186
+ generateOutputItemDone(responseId),
1187
+ generateResponseCompleted(responseId)
1188
+ );
1189
+
1190
+ // 如果有 usage 信息,更新最后一个事件
1191
+ if (geminiChunk.usageMetadata && events.length > 0) {
1192
+ const lastEvent = events[events.length - 1];
1193
+ if (lastEvent.response) {
1194
+ lastEvent.response.usage = {
1195
+ input_tokens: geminiChunk.usageMetadata.promptTokenCount || 0,
1196
+ input_tokens_details: {
1197
+ cached_tokens: geminiChunk.usageMetadata.cachedContentTokenCount || 0
1198
+ },
1199
+ output_tokens: geminiChunk.usageMetadata.candidatesTokenCount || 0,
1200
+ output_tokens_details: {
1201
+ reasoning_tokens: geminiChunk.usageMetadata.thoughtsTokenCount || 0
1202
+ },
1203
+ total_tokens: geminiChunk.usageMetadata.totalTokenCount || 0
1204
+ };
1205
+ }
1206
+ }
1207
+ }
1208
+ }
1209
+ }
1210
+
1211
+ // 向后兼容:处理字符串格式
1212
+ if (typeof geminiChunk === 'string') {
1213
+ events.push({
1214
+ delta: geminiChunk,
1215
+ item_id: `msg_${uuidv4().replace(/-/g, '')}`,
1216
+ output_index: 0,
1217
+ sequence_number: 3,
1218
+ type: "response.output_text.delta"
1219
+ });
1220
+ }
1221
+
1222
+ return events;
1223
+ }
1224
+ }
1225
+
1226
+ export default GeminiConverter;
src/converters/strategies/OllamaConverter.js ADDED
@@ -0,0 +1,690 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Ollama转换器
3
+ * 处理Ollama协议与其他协议之间的转换
4
+ */
5
+
6
+ import { v4 as uuidv4 } from 'uuid';
7
+ import { createHash } from 'crypto';
8
+ import { BaseConverter } from '../BaseConverter.js';
9
+ import { MODEL_PROTOCOL_PREFIX } from '../../utils/common.js';
10
+ import {
11
+ OLLAMA_DEFAULT_CONTEXT_LENGTH,
12
+ OLLAMA_DEFAULT_MAX_OUTPUT_TOKENS,
13
+ OLLAMA_CLAUDE_DEFAULT_CONTEXT_LENGTH,
14
+ OLLAMA_CLAUDE_SONNET_45_CONTEXT_LENGTH,
15
+ OLLAMA_CLAUDE_SONNET_45_MAX_OUTPUT_TOKENS,
16
+ OLLAMA_CLAUDE_HAIKU_45_CONTEXT_LENGTH,
17
+ OLLAMA_CLAUDE_HAIKU_45_MAX_OUTPUT_TOKENS,
18
+ OLLAMA_CLAUDE_OPUS_41_CONTEXT_LENGTH,
19
+ OLLAMA_CLAUDE_OPUS_41_MAX_OUTPUT_TOKENS,
20
+ OLLAMA_CLAUDE_SONNET_40_CONTEXT_LENGTH,
21
+ OLLAMA_CLAUDE_SONNET_40_MAX_OUTPUT_TOKENS,
22
+ OLLAMA_CLAUDE_SONNET_37_CONTEXT_LENGTH,
23
+ OLLAMA_CLAUDE_SONNET_37_MAX_OUTPUT_TOKENS,
24
+ OLLAMA_CLAUDE_OPUS_40_CONTEXT_LENGTH,
25
+ OLLAMA_CLAUDE_OPUS_40_MAX_OUTPUT_TOKENS,
26
+ OLLAMA_CLAUDE_HAIKU_35_CONTEXT_LENGTH,
27
+ OLLAMA_CLAUDE_HAIKU_35_MAX_OUTPUT_TOKENS,
28
+ OLLAMA_CLAUDE_HAIKU_30_CONTEXT_LENGTH,
29
+ OLLAMA_CLAUDE_HAIKU_30_MAX_OUTPUT_TOKENS,
30
+ OLLAMA_CLAUDE_SONNET_35_CONTEXT_LENGTH,
31
+ OLLAMA_CLAUDE_SONNET_35_MAX_OUTPUT_TOKENS,
32
+ OLLAMA_CLAUDE_OPUS_30_CONTEXT_LENGTH,
33
+ OLLAMA_CLAUDE_OPUS_30_MAX_OUTPUT_TOKENS,
34
+ OLLAMA_GEMINI_25_PRO_CONTEXT_LENGTH,
35
+ OLLAMA_GEMINI_25_PRO_MAX_OUTPUT_TOKENS,
36
+ OLLAMA_GEMINI_25_FLASH_CONTEXT_LENGTH,
37
+ OLLAMA_GEMINI_25_FLASH_MAX_OUTPUT_TOKENS,
38
+ OLLAMA_GEMINI_25_IMAGE_CONTEXT_LENGTH,
39
+ OLLAMA_GEMINI_25_IMAGE_MAX_OUTPUT_TOKENS,
40
+ OLLAMA_GEMINI_25_LIVE_CONTEXT_LENGTH,
41
+ OLLAMA_GEMINI_25_LIVE_MAX_OUTPUT_TOKENS,
42
+ OLLAMA_GEMINI_25_TTS_CONTEXT_LENGTH,
43
+ OLLAMA_GEMINI_25_TTS_MAX_OUTPUT_TOKENS,
44
+ OLLAMA_GEMINI_20_FLASH_CONTEXT_LENGTH,
45
+ OLLAMA_GEMINI_20_FLASH_MAX_OUTPUT_TOKENS,
46
+ OLLAMA_GEMINI_20_IMAGE_CONTEXT_LENGTH,
47
+ OLLAMA_GEMINI_20_IMAGE_MAX_OUTPUT_TOKENS,
48
+ OLLAMA_GEMINI_15_PRO_CONTEXT_LENGTH,
49
+ OLLAMA_GEMINI_15_PRO_MAX_OUTPUT_TOKENS,
50
+ OLLAMA_GEMINI_15_FLASH_CONTEXT_LENGTH,
51
+ OLLAMA_GEMINI_15_FLASH_MAX_OUTPUT_TOKENS,
52
+ OLLAMA_GEMINI_DEFAULT_CONTEXT_LENGTH,
53
+ OLLAMA_GEMINI_DEFAULT_MAX_OUTPUT_TOKENS,
54
+ OLLAMA_GPT4_TURBO_CONTEXT_LENGTH,
55
+ OLLAMA_GPT4_TURBO_MAX_OUTPUT_TOKENS,
56
+ OLLAMA_GPT4_32K_CONTEXT_LENGTH,
57
+ OLLAMA_GPT4_32K_MAX_OUTPUT_TOKENS,
58
+ OLLAMA_GPT4_BASE_CONTEXT_LENGTH,
59
+ OLLAMA_GPT4_BASE_MAX_OUTPUT_TOKENS,
60
+ OLLAMA_GPT35_16K_CONTEXT_LENGTH,
61
+ OLLAMA_GPT35_16K_MAX_OUTPUT_TOKENS,
62
+ OLLAMA_GPT35_BASE_CONTEXT_LENGTH,
63
+ OLLAMA_GPT35_BASE_MAX_OUTPUT_TOKENS,
64
+ OLLAMA_QWEN_CODER_PLUS_CONTEXT_LENGTH,
65
+ OLLAMA_QWEN_CODER_PLUS_MAX_OUTPUT_TOKENS,
66
+ OLLAMA_QWEN_VL_PLUS_CONTEXT_LENGTH,
67
+ OLLAMA_QWEN_VL_PLUS_MAX_OUTPUT_TOKENS,
68
+ OLLAMA_QWEN_CODER_FLASH_CONTEXT_LENGTH,
69
+ OLLAMA_QWEN_CODER_FLASH_MAX_OUTPUT_TOKENS,
70
+ OLLAMA_QWEN_DEFAULT_CONTEXT_LENGTH,
71
+ OLLAMA_QWEN_DEFAULT_MAX_OUTPUT_TOKENS,
72
+ OLLAMA_DEFAULT_FILE_TYPE,
73
+ OLLAMA_DEFAULT_QUANTIZATION_VERSION,
74
+ OLLAMA_DEFAULT_ROPE_FREQ_BASE,
75
+ OLLAMA_DEFAULT_TEMPERATURE,
76
+ OLLAMA_DEFAULT_TOP_P,
77
+ OLLAMA_DEFAULT_QUANTIZATION_LEVEL,
78
+ OLLAMA_SHOW_QUANTIZATION_LEVEL
79
+ } from '../utils.js';
80
+
81
+
82
+
83
+ /**
84
+ * Ollama转换器类
85
+ * 实现Ollama协议到其他协议的转换
86
+ */
87
+ export class OllamaConverter extends BaseConverter {
88
+ constructor() {
89
+ super('ollama');
90
+ }
91
+
92
+ /**
93
+ * 转换请求 - Ollama -> 其他协议
94
+ */
95
+ convertRequest(data, targetProtocol) {
96
+ switch (targetProtocol) {
97
+ case MODEL_PROTOCOL_PREFIX.OPENAI:
98
+ case MODEL_PROTOCOL_PREFIX.CLAUDE:
99
+ case MODEL_PROTOCOL_PREFIX.GEMINI:
100
+ return this.toOpenAIRequest(data);
101
+ default:
102
+ throw new Error(`Unsupported target protocol: ${targetProtocol}`);
103
+ }
104
+ }
105
+
106
+ /**
107
+ * 转换响应 - 其他协议 -> Ollama
108
+ */
109
+ convertResponse(data, sourceProtocol, model) {
110
+ return this.toOllamaChatResponse(data, model);
111
+ }
112
+
113
+ /**
114
+ * 转换流式响应块 - 其他协议 -> Ollama
115
+ */
116
+ convertStreamChunk(chunk, sourceProtocol, model, isDone = false) {
117
+ return this.toOllamaStreamChunk(chunk, model, isDone);
118
+ }
119
+
120
+ /**
121
+ * 转换模型列表 - 其他协议 -> Ollama
122
+ */
123
+ convertModelList(data, sourceProtocol) {
124
+ return this.toOllamaTags(data, sourceProtocol);
125
+ }
126
+
127
+ // =========================================================================
128
+ // Ollama -> OpenAI 转换
129
+ // =========================================================================
130
+
131
+ /**
132
+ * Ollama请求 -> OpenAI请求
133
+ */
134
+ toOpenAIRequest(ollamaRequest) {
135
+ const openaiRequest = {
136
+ model: ollamaRequest.model || 'default',
137
+ messages: [],
138
+ stream: ollamaRequest.stream !== undefined ? ollamaRequest.stream : false
139
+ };
140
+
141
+ // Map Ollama messages to OpenAI format
142
+ if (ollamaRequest.messages && Array.isArray(ollamaRequest.messages)) {
143
+ openaiRequest.messages = ollamaRequest.messages.map(msg => ({
144
+ role: msg.role || 'user',
145
+ content: msg.content || ''
146
+ }));
147
+ }
148
+
149
+ // Map Ollama options to OpenAI parameters
150
+ if (ollamaRequest.options) {
151
+ const opts = ollamaRequest.options;
152
+ if (opts.temperature !== undefined) openaiRequest.temperature = opts.temperature;
153
+ if (opts.top_p !== undefined) openaiRequest.top_p = opts.top_p;
154
+ if (opts.top_k !== undefined) openaiRequest.top_k = opts.top_k;
155
+ if (opts.num_predict !== undefined) openaiRequest.max_tokens = opts.num_predict;
156
+ if (opts.stop !== undefined) openaiRequest.stop = opts.stop;
157
+ }
158
+
159
+ // Handle system prompt
160
+ if (ollamaRequest.system) {
161
+ openaiRequest.messages.unshift({
162
+ role: 'system',
163
+ content: ollamaRequest.system
164
+ });
165
+ }
166
+
167
+ // Handle template/prompt for generate endpoint
168
+ if (ollamaRequest.prompt) {
169
+ openaiRequest.messages = [{
170
+ role: 'user',
171
+ content: ollamaRequest.prompt
172
+ }];
173
+
174
+ // Add system prompt if provided
175
+ if (ollamaRequest.system) {
176
+ openaiRequest.messages.unshift({
177
+ role: 'system',
178
+ content: ollamaRequest.system
179
+ });
180
+ }
181
+ }
182
+
183
+ return openaiRequest;
184
+ }
185
+
186
+ // =========================================================================
187
+ // OpenAI/Claude/Gemini -> Ollama 转换
188
+ // =========================================================================
189
+
190
+ /**
191
+ * OpenAI/Claude/Gemini响应 -> Ollama chat响应
192
+ */
193
+ toOllamaChatResponse(response, model) {
194
+ const ollamaResponse = {
195
+ model: model || response.model || 'unknown',
196
+ created_at: new Date().toISOString(),
197
+ done: true
198
+ };
199
+
200
+ // Handle OpenAI format (choices array)
201
+ if (response.choices && response.choices.length > 0) {
202
+ const choice = response.choices[0];
203
+ ollamaResponse.message = {
204
+ role: choice.message?.role || 'assistant',
205
+ content: choice.message?.content || ''
206
+ };
207
+
208
+ // Map finish reason
209
+ if (choice.finish_reason) {
210
+ ollamaResponse.done_reason = choice.finish_reason === 'stop' ? 'stop' : choice.finish_reason;
211
+ }
212
+ }
213
+ // Handle Claude format (content array)
214
+ else if (response.content && Array.isArray(response.content)) {
215
+ let textContent = '';
216
+ response.content.forEach(block => {
217
+ if (block.type === 'text' && block.text) {
218
+ textContent += block.text;
219
+ }
220
+ });
221
+
222
+ ollamaResponse.message = {
223
+ role: response.role || 'assistant',
224
+ content: textContent
225
+ };
226
+
227
+ if (response.stop_reason) {
228
+ ollamaResponse.done_reason = response.stop_reason === 'end_turn' ? 'stop' : response.stop_reason;
229
+ }
230
+ }
231
+ // Handle Gemini format (candidates array)
232
+ else if (response.candidates && response.candidates.length > 0) {
233
+ const candidate = response.candidates[0];
234
+ let textContent = '';
235
+ if (candidate.content && candidate.content.parts) {
236
+ textContent = candidate.content.parts
237
+ .filter(part => part.text)
238
+ .map(part => part.text)
239
+ .join('');
240
+ }
241
+
242
+ ollamaResponse.message = {
243
+ role: candidate.content?.role || 'assistant',
244
+ content: textContent
245
+ };
246
+
247
+ if (candidate.finishReason) {
248
+ ollamaResponse.done_reason = candidate.finishReason.toLowerCase();
249
+ }
250
+ }
251
+
252
+ // Add usage statistics if available
253
+ const usage = response.usage || response.usageMetadata;
254
+ if (usage) {
255
+ ollamaResponse.prompt_eval_count = usage.prompt_tokens || usage.input_tokens || usage.promptTokenCount || 0;
256
+ ollamaResponse.eval_count = usage.completion_tokens || usage.output_tokens || usage.candidatesTokenCount || 0;
257
+ ollamaResponse.total_duration = 0;
258
+ ollamaResponse.load_duration = 0;
259
+ ollamaResponse.prompt_eval_duration = 0;
260
+ ollamaResponse.eval_duration = 0;
261
+ }
262
+
263
+ return ollamaResponse;
264
+ }
265
+
266
+ /**
267
+ * OpenAI/Claude/Gemini generate响应 -> Ollama generate响应
268
+ */
269
+ toOllamaGenerateResponse(response, model) {
270
+ const ollamaResponse = {
271
+ model: model || response.model || 'unknown',
272
+ created_at: new Date().toISOString(),
273
+ done: true
274
+ };
275
+
276
+ // Handle OpenAI format
277
+ if (response.choices && response.choices.length > 0) {
278
+ const choice = response.choices[0];
279
+ ollamaResponse.response = choice.message?.content || choice.text || '';
280
+
281
+ if (choice.finish_reason) {
282
+ ollamaResponse.done_reason = choice.finish_reason === 'stop' ? 'stop' : choice.finish_reason;
283
+ }
284
+ }
285
+ // Handle Claude format
286
+ else if (response.content && Array.isArray(response.content)) {
287
+ let textContent = '';
288
+ response.content.forEach(block => {
289
+ if (block.type === 'text' && block.text) {
290
+ textContent += block.text;
291
+ }
292
+ });
293
+ ollamaResponse.response = textContent;
294
+
295
+ if (response.stop_reason) {
296
+ ollamaResponse.done_reason = response.stop_reason === 'end_turn' ? 'stop' : response.stop_reason;
297
+ }
298
+ }
299
+ // Handle Gemini format
300
+ else if (response.candidates && response.candidates.length > 0) {
301
+ const candidate = response.candidates[0];
302
+ let textContent = '';
303
+ if (candidate.content && candidate.content.parts) {
304
+ textContent = candidate.content.parts
305
+ .filter(part => part.text)
306
+ .map(part => part.text)
307
+ .join('');
308
+ }
309
+ ollamaResponse.response = textContent;
310
+
311
+ if (candidate.finishReason) {
312
+ ollamaResponse.done_reason = candidate.finishReason.toLowerCase();
313
+ }
314
+ }
315
+
316
+ // Add usage statistics
317
+ const genUsage = response.usage || response.usageMetadata;
318
+ if (genUsage) {
319
+ ollamaResponse.prompt_eval_count = genUsage.prompt_tokens || genUsage.input_tokens || genUsage.promptTokenCount || 0;
320
+ ollamaResponse.eval_count = genUsage.completion_tokens || genUsage.output_tokens || genUsage.candidatesTokenCount || 0;
321
+ ollamaResponse.total_duration = 0;
322
+ ollamaResponse.load_duration = 0;
323
+ ollamaResponse.prompt_eval_duration = 0;
324
+ ollamaResponse.eval_duration = 0;
325
+ }
326
+
327
+ return ollamaResponse;
328
+ }
329
+
330
+ /**
331
+ * OpenAI/Claude/Gemini流式块 -> Ollama流式块
332
+ */
333
+ toOllamaStreamChunk(chunk, model, isDone = false) {
334
+ const ollamaChunk = {
335
+ model: model || 'unknown',
336
+ created_at: new Date().toISOString(),
337
+ done: isDone
338
+ };
339
+
340
+ // Handle Claude SSE format
341
+ if (chunk.type) {
342
+ if (chunk.type === 'content_block_delta' && chunk.delta) {
343
+ ollamaChunk.message = {
344
+ role: 'assistant',
345
+ content: chunk.delta.text || ''
346
+ };
347
+ } else if (chunk.type === 'message_delta' && chunk.usage) {
348
+ ollamaChunk.message = {
349
+ role: 'assistant',
350
+ content: ''
351
+ };
352
+ ollamaChunk.prompt_eval_count = 0;
353
+ ollamaChunk.eval_count = chunk.usage.output_tokens || 0;
354
+ } else {
355
+ ollamaChunk.message = {
356
+ role: 'assistant',
357
+ content: ''
358
+ };
359
+ }
360
+ }
361
+ // Handle Gemini format
362
+ else if (!isDone && chunk.candidates && chunk.candidates.length > 0) {
363
+ const candidate = chunk.candidates[0];
364
+ let content = '';
365
+ if (candidate.content && candidate.content.parts) {
366
+ content = candidate.content.parts
367
+ .filter(part => part.text)
368
+ .map(part => part.text)
369
+ .join('');
370
+ }
371
+ ollamaChunk.message = {
372
+ role: 'assistant',
373
+ content: content
374
+ };
375
+ }
376
+ // Handle OpenAI format
377
+ else if (!isDone && chunk.choices && chunk.choices.length > 0) {
378
+ const delta = chunk.choices[0].delta;
379
+ ollamaChunk.message = {
380
+ role: delta.role || 'assistant',
381
+ content: delta.content || ''
382
+ };
383
+ }
384
+ // Handle final chunk
385
+ else if (isDone) {
386
+ ollamaChunk.message = {
387
+ role: 'assistant',
388
+ content: ''
389
+ };
390
+ ollamaChunk.done_reason = 'stop';
391
+ }
392
+
393
+ return ollamaChunk;
394
+ }
395
+
396
+ /**
397
+ * OpenAI/Claude/Gemini流式块 -> Ollama generate流式块
398
+ */
399
+ toOllamaGenerateStreamChunk(chunk, model, isDone = false) {
400
+ const ollamaChunk = {
401
+ model: model || 'unknown',
402
+ created_at: new Date().toISOString(),
403
+ done: isDone
404
+ };
405
+
406
+ // Handle Claude SSE format
407
+ if (chunk.type) {
408
+ if (chunk.type === 'content_block_delta' && chunk.delta) {
409
+ ollamaChunk.response = chunk.delta.text || '';
410
+ } else if (chunk.type === 'message_delta' && chunk.usage) {
411
+ ollamaChunk.response = '';
412
+ ollamaChunk.prompt_eval_count = 0;
413
+ ollamaChunk.eval_count = chunk.usage.output_tokens || 0;
414
+ } else {
415
+ ollamaChunk.response = '';
416
+ }
417
+ }
418
+ // Handle OpenAI format
419
+ else if (!isDone && chunk.choices && chunk.choices.length > 0) {
420
+ const delta = chunk.choices[0].delta;
421
+ ollamaChunk.response = delta.content || '';
422
+ }
423
+ // Handle final chunk
424
+ else if (isDone) {
425
+ ollamaChunk.response = '';
426
+ ollamaChunk.done_reason = 'stop';
427
+ }
428
+
429
+ return ollamaChunk;
430
+ }
431
+
432
+ /**
433
+ * OpenAI/Claude/Gemini模型列表 -> Ollama tags
434
+ */
435
+ toOllamaTags(modelList, sourceProtocol = null) {
436
+ const models = [];
437
+
438
+ // Handle both OpenAI format (data array) and Gemini format (models array)
439
+ const sourceModels = modelList.data || modelList.models || [];
440
+
441
+ if (Array.isArray(sourceModels)) {
442
+ sourceModels.forEach(model => {
443
+ // Get model name
444
+ let modelName = model.id || model.name || model.displayName || 'unknown';
445
+
446
+ // Remove "models/" prefix if present (for Gemini)
447
+ if (modelName.startsWith('models/')) {
448
+ modelName = modelName.substring(7); // Remove "models/"
449
+ }
450
+
451
+ // Skip models with invalid names
452
+ if (modelName === 'unknown' || !modelName) {
453
+ return;
454
+ }
455
+
456
+ // IMPORTANT: Copilot expects family: "Ollama" with capital O!
457
+ const modelOwner = 'Ollama';
458
+
459
+ models.push({
460
+ name: modelName,
461
+ model: modelName,
462
+ modified_at: new Date().toISOString(),
463
+ size: 0, // As in the old patch
464
+ digest: '', // Empty string, as in the old patch
465
+ details: {
466
+ parent_model: '',
467
+ format: 'gguf',
468
+ family: modelOwner, // "Ollama" with capital O
469
+ families: [modelOwner],
470
+ parameter_size: '0B', // As in the old patch
471
+ quantization_level: OLLAMA_DEFAULT_QUANTIZATION_LEVEL
472
+ }
473
+ });
474
+ });
475
+ }
476
+
477
+ return { models };
478
+ }
479
+
480
+ /**
481
+ * Generate Ollama show response
482
+ */
483
+ toOllamaShowResponse(modelName) {
484
+ // Minimal implementation, as in the old patch
485
+ let contextLength = OLLAMA_DEFAULT_CONTEXT_LENGTH;
486
+ let maxOutputTokens = OLLAMA_DEFAULT_MAX_OUTPUT_TOKENS;
487
+ let family = 'Ollama'; // ВАЖНО: С большой буквы, как ожидает Copilot!
488
+ let architecture = 'transformer';
489
+
490
+ const lowerName = modelName.toLowerCase();
491
+
492
+ // Determine contextLength by model name
493
+ // Claude models
494
+ if (lowerName.includes('claude')) {
495
+ architecture = 'claude';
496
+ contextLength = OLLAMA_CLAUDE_DEFAULT_CONTEXT_LENGTH; // Default 200K
497
+
498
+ // Claude Sonnet 4.5
499
+ if (lowerName.includes('sonnet-4-5') || lowerName.includes('sonnet-4.5')) {
500
+ contextLength = OLLAMA_CLAUDE_SONNET_45_CONTEXT_LENGTH; // 200K (1M beta available)
501
+ maxOutputTokens = OLLAMA_CLAUDE_SONNET_45_MAX_OUTPUT_TOKENS; // 64K output
502
+ }
503
+ // Claude Haiku 4.5
504
+ else if (lowerName.includes('haiku-4-5') || lowerName.includes('haiku-4.5')) {
505
+ contextLength = OLLAMA_CLAUDE_HAIKU_45_CONTEXT_LENGTH; // 200K
506
+ maxOutputTokens = OLLAMA_CLAUDE_HAIKU_45_MAX_OUTPUT_TOKENS; // 64K output
507
+ }
508
+ // Claude Opus 4.1
509
+ else if (lowerName.includes('opus-4-1') || lowerName.includes('opus-4.1')) {
510
+ contextLength = OLLAMA_CLAUDE_OPUS_41_CONTEXT_LENGTH; // 200K
511
+ maxOutputTokens = OLLAMA_CLAUDE_OPUS_41_MAX_OUTPUT_TOKENS; // 32K output
512
+ }
513
+ // Claude Sonnet 4.0 (legacy)
514
+ else if (lowerName.includes('sonnet-4-0') || lowerName.includes('sonnet-4.0') || lowerName.includes('sonnet-4-20')) {
515
+ contextLength = OLLAMA_CLAUDE_SONNET_40_CONTEXT_LENGTH; // 200K (1M beta available)
516
+ maxOutputTokens = OLLAMA_CLAUDE_SONNET_40_MAX_OUTPUT_TOKENS; // 64K output
517
+ }
518
+ // Claude Sonnet 3.7 (legacy)
519
+ else if (lowerName.includes('3-7') || lowerName.includes('3.7')) {
520
+ contextLength = OLLAMA_CLAUDE_SONNET_37_CONTEXT_LENGTH; // 200K
521
+ maxOutputTokens = OLLAMA_CLAUDE_SONNET_37_MAX_OUTPUT_TOKENS; // 64K output (128K beta available)
522
+ }
523
+ // Claude Opus 4.0 (legacy)
524
+ else if (lowerName.includes('opus-4-0') || lowerName.includes('opus-4.0') || lowerName.includes('opus-4-20')) {
525
+ contextLength = OLLAMA_CLAUDE_OPUS_40_CONTEXT_LENGTH; // 200K
526
+ maxOutputTokens = OLLAMA_CLAUDE_OPUS_40_MAX_OUTPUT_TOKENS; // 32K output
527
+ }
528
+ // Claude Haiku 3.5 (legacy)
529
+ else if (lowerName.includes('haiku-3-5') || lowerName.includes('haiku-3.5')) {
530
+ contextLength = OLLAMA_CLAUDE_HAIKU_35_CONTEXT_LENGTH; // 200K
531
+ maxOutputTokens = OLLAMA_CLAUDE_HAIKU_35_MAX_OUTPUT_TOKENS; // 8K output
532
+ }
533
+ // Claude Haiku 3.0 (legacy)
534
+ else if (lowerName.includes('haiku-3-0') || lowerName.includes('haiku-3.0') || lowerName.includes('haiku-20240307')) {
535
+ contextLength = OLLAMA_CLAUDE_HAIKU_30_CONTEXT_LENGTH; // 200K
536
+ maxOutputTokens = OLLAMA_CLAUDE_HAIKU_30_MAX_OUTPUT_TOKENS; // 4K output
537
+ }
538
+ // Claude Sonnet 3.5 (legacy)
539
+ else if (lowerName.includes('sonnet-3-5') || lowerName.includes('sonnet-3.5')) {
540
+ contextLength = OLLAMA_CLAUDE_SONNET_35_CONTEXT_LENGTH; // 200K
541
+ maxOutputTokens = OLLAMA_CLAUDE_SONNET_35_MAX_OUTPUT_TOKENS; // 8K output
542
+ }
543
+ // Claude Opus 3.0 (legacy)
544
+ else if (lowerName.includes('opus-3-0') || lowerName.includes('opus-3.0') || lowerName.includes('opus') && lowerName.includes('20240229')) {
545
+ contextLength = OLLAMA_CLAUDE_OPUS_30_CONTEXT_LENGTH; // 200K
546
+ maxOutputTokens = OLLAMA_CLAUDE_OPUS_30_MAX_OUTPUT_TOKENS; // 4K output
547
+ }
548
+ // Default for Claude
549
+ else {
550
+ contextLength = OLLAMA_CLAUDE_DEFAULT_CONTEXT_LENGTH; // 200K
551
+ maxOutputTokens = OLLAMA_CLAUDE_HAIKU_35_MAX_OUTPUT_TOKENS; // 8K output
552
+ }
553
+ }
554
+ // Gemini models
555
+ else if (lowerName.includes('gemini')) {
556
+ architecture = 'gemini';
557
+
558
+ // Gemini 2.5 Pro
559
+ if (lowerName.includes('2.5') && lowerName.includes('pro')) {
560
+ contextLength = OLLAMA_GEMINI_25_PRO_CONTEXT_LENGTH; // 1M input tokens
561
+ maxOutputTokens = OLLAMA_GEMINI_25_PRO_MAX_OUTPUT_TOKENS; // 65K output tokens
562
+ }
563
+ // Gemini 2.5 Flash / Flash-Lite
564
+ else if (lowerName.includes('2.5') && (lowerName.includes('flash') || lowerName.includes('lite'))) {
565
+ contextLength = OLLAMA_GEMINI_25_FLASH_CONTEXT_LENGTH; // 1M input tokens
566
+ maxOutputTokens = OLLAMA_GEMINI_25_FLASH_MAX_OUTPUT_TOKENS; // 65K output tokens
567
+ }
568
+ // Gemini 2.5 Flash Image
569
+ else if (lowerName.includes('2.5') && lowerName.includes('image')) {
570
+ contextLength = OLLAMA_GEMINI_25_IMAGE_CONTEXT_LENGTH; // 65K input tokens
571
+ maxOutputTokens = OLLAMA_GEMINI_25_IMAGE_MAX_OUTPUT_TOKENS; // 32K output tokens
572
+ }
573
+ // Gemini 2.5 Flash Live / Native Audio
574
+ else if (lowerName.includes('2.5') && (lowerName.includes('live') || lowerName.includes('native-audio'))) {
575
+ contextLength = OLLAMA_GEMINI_25_LIVE_CONTEXT_LENGTH; // 131K input tokens
576
+ maxOutputTokens = OLLAMA_GEMINI_25_LIVE_MAX_OUTPUT_TOKENS; // 8K output tokens
577
+ }
578
+ // Gemini 2.5 TTS
579
+ else if (lowerName.includes('2.5') && lowerName.includes('tts')) {
580
+ contextLength = OLLAMA_GEMINI_25_TTS_CONTEXT_LENGTH; // 8K input tokens
581
+ maxOutputTokens = OLLAMA_GEMINI_25_TTS_MAX_OUTPUT_TOKENS; // 16K output tokens
582
+ }
583
+ // Gemini 2.0 Flash
584
+ else if (lowerName.includes('2.0') && lowerName.includes('flash')) {
585
+ contextLength = OLLAMA_GEMINI_20_FLASH_CONTEXT_LENGTH; // 1M input tokens
586
+ maxOutputTokens = OLLAMA_GEMINI_20_FLASH_MAX_OUTPUT_TOKENS; // 8K output tokens
587
+ }
588
+ // Gemini 2.0 Flash Image
589
+ else if (lowerName.includes('2.0') && lowerName.includes('image')) {
590
+ contextLength = OLLAMA_GEMINI_20_IMAGE_CONTEXT_LENGTH; // 32K input tokens
591
+ maxOutputTokens = OLLAMA_GEMINI_20_IMAGE_MAX_OUTPUT_TOKENS; // 8K output tokens
592
+ }
593
+ // Gemini 1.5 Pro (legacy)
594
+ else if (lowerName.includes('1.5') && lowerName.includes('pro')) {
595
+ contextLength = OLLAMA_GEMINI_15_PRO_CONTEXT_LENGTH; // 2M tokens
596
+ maxOutputTokens = OLLAMA_GEMINI_15_PRO_MAX_OUTPUT_TOKENS;
597
+ }
598
+ // Gemini 1.5 Flash (legacy)
599
+ else if (lowerName.includes('1.5') && lowerName.includes('flash')) {
600
+ contextLength = OLLAMA_GEMINI_15_FLASH_CONTEXT_LENGTH; // 1M tokens
601
+ maxOutputTokens = OLLAMA_GEMINI_15_FLASH_MAX_OUTPUT_TOKENS;
602
+ }
603
+ // Default for Gemini
604
+ else {
605
+ contextLength = OLLAMA_GEMINI_DEFAULT_CONTEXT_LENGTH; // 1M tokens
606
+ maxOutputTokens = OLLAMA_GEMINI_DEFAULT_MAX_OUTPUT_TOKENS;
607
+ }
608
+ }
609
+ // GPT-4 models
610
+ else if (lowerName.includes('gpt-4')) {
611
+ architecture = 'gpt';
612
+
613
+ if (lowerName.includes('turbo') || lowerName.includes('preview')) {
614
+ contextLength = OLLAMA_GPT4_TURBO_CONTEXT_LENGTH; // GPT-4 Turbo
615
+ maxOutputTokens = OLLAMA_GPT4_TURBO_MAX_OUTPUT_TOKENS;
616
+ } else if (lowerName.includes('32k')) {
617
+ contextLength = OLLAMA_GPT4_32K_CONTEXT_LENGTH;
618
+ maxOutputTokens = OLLAMA_GPT4_32K_MAX_OUTPUT_TOKENS;
619
+ } else {
620
+ contextLength = OLLAMA_GPT4_BASE_CONTEXT_LENGTH; // GPT-4 base
621
+ maxOutputTokens = OLLAMA_GPT4_BASE_MAX_OUTPUT_TOKENS;
622
+ }
623
+ }
624
+ // GPT-3.5 models
625
+ else if (lowerName.includes('gpt-3.5')) {
626
+ architecture = 'gpt';
627
+
628
+ if (lowerName.includes('16k')) {
629
+ contextLength = OLLAMA_GPT35_16K_CONTEXT_LENGTH;
630
+ maxOutputTokens = OLLAMA_GPT35_16K_MAX_OUTPUT_TOKENS;
631
+ } else {
632
+ contextLength = OLLAMA_GPT35_BASE_CONTEXT_LENGTH;
633
+ maxOutputTokens = OLLAMA_GPT35_BASE_MAX_OUTPUT_TOKENS;
634
+ }
635
+ }
636
+ // Qwen models
637
+ else if (lowerName.includes('qwen')) {
638
+ architecture = 'qwen';
639
+
640
+ // Qwen3 Coder Plus (coder-model)
641
+ if (lowerName.includes('coder-plus') || lowerName.includes('coder_plus') || lowerName.includes('coder-model')) {
642
+ contextLength = OLLAMA_QWEN_CODER_PLUS_CONTEXT_LENGTH; // 128K tokens
643
+ maxOutputTokens = OLLAMA_QWEN_CODER_PLUS_MAX_OUTPUT_TOKENS; // 65K output
644
+ }
645
+ // Qwen3 VL Plus (vision-model)
646
+ else if (lowerName.includes('vl-plus') || lowerName.includes('vl_plus') || lowerName.includes('vision-model')) {
647
+ contextLength = OLLAMA_QWEN_VL_PLUS_CONTEXT_LENGTH; // 256K tokens
648
+ maxOutputTokens = OLLAMA_QWEN_VL_PLUS_MAX_OUTPUT_TOKENS; // 32K output
649
+ }
650
+ // Qwen3 Coder Flash
651
+ else if (lowerName.includes('coder-flash') || lowerName.includes('coder_flash')) {
652
+ contextLength = OLLAMA_QWEN_CODER_FLASH_CONTEXT_LENGTH; // 128K tokens
653
+ maxOutputTokens = OLLAMA_QWEN_CODER_FLASH_MAX_OUTPUT_TOKENS; // 65K output
654
+ }
655
+ // Default for Qwen
656
+ else {
657
+ contextLength = OLLAMA_QWEN_DEFAULT_CONTEXT_LENGTH; // 32K tokens
658
+ maxOutputTokens = OLLAMA_QWEN_DEFAULT_MAX_OUTPUT_TOKENS;
659
+ }
660
+ }
661
+
662
+ // Minimal parameter_size, as in the old patch
663
+ let parameterSize = '0B';
664
+
665
+ return {
666
+ license: '',
667
+ modelfile: `# Modelfile for ${modelName}\nFROM ${modelName}`,
668
+ parameters: `num_ctx ${contextLength}\nnum_predict ${maxOutputTokens}\ntemperature ${OLLAMA_DEFAULT_TEMPERATURE}\ntop_p ${OLLAMA_DEFAULT_TOP_P}`,
669
+ template: '{{ if .System }}{{ .System }}\n{{ end }}{{ .Prompt }}',
670
+ details: {
671
+ parent_model: '',
672
+ format: 'gguf',
673
+ family: family,
674
+ families: [family],
675
+ parameter_size: parameterSize,
676
+ quantization_level: OLLAMA_SHOW_QUANTIZATION_LEVEL
677
+ },
678
+ model_info: {
679
+ 'general.architecture': architecture,
680
+ 'general.file_type': OLLAMA_DEFAULT_FILE_TYPE,
681
+ 'general.parameter_count': 0,
682
+ 'general.quantization_version': OLLAMA_DEFAULT_QUANTIZATION_VERSION,
683
+ 'general.context_length': contextLength,
684
+ 'llama.context_length': contextLength,
685
+ 'llama.rope.freq_base': OLLAMA_DEFAULT_ROPE_FREQ_BASE
686
+ },
687
+ capabilities: ['tools', 'vision', 'completion'] // Indicate that the model supports tool calling
688
+ };
689
+ }
690
+ }
src/converters/strategies/OpenAIConverter.js ADDED
@@ -0,0 +1,1565 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * OpenAI转换器
3
+ * 处理OpenAI协议与其他协议之间的转换
4
+ */
5
+
6
+ import { v4 as uuidv4 } from 'uuid';
7
+ import { BaseConverter } from '../BaseConverter.js';
8
+ import { CodexConverter } from './CodexConverter.js';
9
+ import {
10
+ extractAndProcessSystemMessages as extractSystemMessages,
11
+ extractTextFromMessageContent as extractText,
12
+ safeParseJSON,
13
+ checkAndAssignOrDefault,
14
+ extractThinkingFromOpenAIText,
15
+ mapFinishReason,
16
+ cleanJsonSchemaProperties as cleanJsonSchema,
17
+ CLAUDE_DEFAULT_MAX_TOKENS,
18
+ CLAUDE_DEFAULT_TEMPERATURE,
19
+ CLAUDE_DEFAULT_TOP_P,
20
+ GEMINI_DEFAULT_MAX_TOKENS,
21
+ GEMINI_DEFAULT_TEMPERATURE,
22
+ GEMINI_DEFAULT_TOP_P,
23
+ OPENAI_DEFAULT_INPUT_TOKEN_LIMIT,
24
+ OPENAI_DEFAULT_OUTPUT_TOKEN_LIMIT
25
+ } from '../utils.js';
26
+ import { MODEL_PROTOCOL_PREFIX } from '../../utils/common.js';
27
+ import {
28
+ generateResponseCreated,
29
+ generateResponseInProgress,
30
+ generateOutputItemAdded,
31
+ generateContentPartAdded,
32
+ generateOutputTextDone,
33
+ generateContentPartDone,
34
+ generateOutputItemDone,
35
+ generateResponseCompleted
36
+ } from '../../providers/openai/openai-responses-core.mjs';
37
+
38
+ /**
39
+ * OpenAI转换器类
40
+ * 实现OpenAI协议到其他协议的转换
41
+ */
42
+ export class OpenAIConverter extends BaseConverter {
43
+ constructor() {
44
+ super('openai');
45
+ // 创建 CodexConverter 实例用于委托
46
+ this.codexConverter = new CodexConverter();
47
+ }
48
+
49
+ /**
50
+ * 转换请求
51
+ */
52
+ convertRequest(data, targetProtocol) {
53
+ switch (targetProtocol) {
54
+ case MODEL_PROTOCOL_PREFIX.CLAUDE:
55
+ return this.toClaudeRequest(data);
56
+ case MODEL_PROTOCOL_PREFIX.GEMINI:
57
+ return this.toGeminiRequest(data);
58
+ case MODEL_PROTOCOL_PREFIX.OPENAI_RESPONSES:
59
+ return this.toOpenAIResponsesRequest(data);
60
+ case MODEL_PROTOCOL_PREFIX.CODEX:
61
+ return this.toCodexRequest(data);
62
+ default:
63
+ throw new Error(`Unsupported target protocol: ${targetProtocol}`);
64
+ }
65
+ }
66
+
67
+ /**
68
+ * 转换响应
69
+ */
70
+ convertResponse(data, targetProtocol, model) {
71
+ // OpenAI作为源格式时,通常不需要转换响应
72
+ // 因为其他协议会转换到OpenAI格式
73
+ switch (targetProtocol) {
74
+ case MODEL_PROTOCOL_PREFIX.CLAUDE:
75
+ return this.toClaudeResponse(data, model);
76
+ case MODEL_PROTOCOL_PREFIX.GEMINI:
77
+ return this.toGeminiResponse(data, model);
78
+ case MODEL_PROTOCOL_PREFIX.OPENAI_RESPONSES:
79
+ return this.toOpenAIResponsesResponse(data, model);
80
+ default:
81
+ throw new Error(`Unsupported target protocol: ${targetProtocol}`);
82
+ }
83
+ }
84
+
85
+ /**
86
+ * 转换流式响应块
87
+ */
88
+ convertStreamChunk(chunk, targetProtocol, model) {
89
+ switch (targetProtocol) {
90
+ case MODEL_PROTOCOL_PREFIX.CLAUDE:
91
+ return this.toClaudeStreamChunk(chunk, model);
92
+ case MODEL_PROTOCOL_PREFIX.GEMINI:
93
+ return this.toGeminiStreamChunk(chunk, model);
94
+ case MODEL_PROTOCOL_PREFIX.OPENAI_RESPONSES:
95
+ return this.toOpenAIResponsesStreamChunk(chunk, model);
96
+ default:
97
+ throw new Error(`Unsupported target protocol: ${targetProtocol}`);
98
+ }
99
+ }
100
+
101
+ /**
102
+ * 转换模型列表
103
+ */
104
+ convertModelList(data, targetProtocol) {
105
+ switch (targetProtocol) {
106
+ case MODEL_PROTOCOL_PREFIX.CLAUDE:
107
+ return this.toClaudeModelList(data);
108
+ case MODEL_PROTOCOL_PREFIX.GEMINI:
109
+ return this.toGeminiModelList(data);
110
+ default:
111
+ return this.ensureDisplayName(data);
112
+ }
113
+ }
114
+
115
+ /**
116
+ * Ensure display_name field exists in OpenAI model list
117
+ */
118
+ ensureDisplayName(openaiModels) {
119
+ if (!openaiModels || !openaiModels.data) {
120
+ return openaiModels;
121
+ }
122
+
123
+ return {
124
+ ...openaiModels,
125
+ data: openaiModels.data.map(model => ({
126
+ ...model,
127
+ display_name: model.display_name || model.id,
128
+ })),
129
+ };
130
+ }
131
+
132
+ // =========================================================================
133
+ // OpenAI -> Claude 转换
134
+ // =========================================================================
135
+
136
+ /**
137
+ * OpenAI请求 -> Claude请求
138
+ */
139
+ toClaudeRequest(openaiRequest) {
140
+ const messages = openaiRequest.messages || [];
141
+ const { systemInstruction, nonSystemMessages } = extractSystemMessages(messages);
142
+
143
+ const claudeMessages = [];
144
+
145
+ for (const message of nonSystemMessages) {
146
+ const role = message.role === 'assistant' ? 'assistant' : 'user';
147
+ let content = [];
148
+
149
+ if (message.role === 'tool') {
150
+ // 工具结果消息
151
+ content.push({
152
+ type: 'tool_result',
153
+ tool_use_id: message.tool_call_id,
154
+ content: safeParseJSON(message.content)
155
+ });
156
+ claudeMessages.push({ role: 'user', content: content });
157
+ } else if (message.role === 'assistant' && (message.tool_calls?.length || message.function_calls?.length)) {
158
+ // 助手工具调用消息 - 支持tool_calls和function_calls
159
+ const calls = message.tool_calls || message.function_calls || [];
160
+ const toolUseBlocks = calls.map(tc => ({
161
+ type: 'tool_use',
162
+ id: tc.id,
163
+ name: tc.function.name,
164
+ input: safeParseJSON(tc.function.arguments)
165
+ }));
166
+ claudeMessages.push({ role: 'assistant', content: toolUseBlocks });
167
+ } else {
168
+ // 普通消息
169
+ if (typeof message.content === 'string') {
170
+ if (message.content) {
171
+ content.push({ type: 'text', text: message.content.trim() });
172
+ }
173
+ } else if (Array.isArray(message.content)) {
174
+ message.content.forEach(item => {
175
+ if (!item) return;
176
+ switch (item.type) {
177
+ case 'text':
178
+ if (item.text) {
179
+ content.push({ type: 'text', text: item.text.trim() });
180
+ }
181
+ break;
182
+ case 'image_url':
183
+ if (item.image_url) {
184
+ const imageUrl = typeof item.image_url === 'string'
185
+ ? item.image_url
186
+ : item.image_url.url;
187
+ if (imageUrl.startsWith('data:')) {
188
+ const [header, data] = imageUrl.split(',');
189
+ const mediaType = header.match(/data:([^;]+)/)?.[1] || 'image/jpeg';
190
+ content.push({
191
+ type: 'image',
192
+ source: {
193
+ type: 'base64',
194
+ media_type: mediaType,
195
+ data: data
196
+ }
197
+ });
198
+ } else {
199
+ content.push({ type: 'text', text: `[Image: ${imageUrl}]` });
200
+ }
201
+ }
202
+ break;
203
+ case 'audio':
204
+ if (item.audio_url) {
205
+ const audioUrl = typeof item.audio_url === 'string'
206
+ ? item.audio_url
207
+ : item.audio_url.url;
208
+ content.push({ type: 'text', text: `[Audio: ${audioUrl}]` });
209
+ }
210
+ break;
211
+ }
212
+ });
213
+ }
214
+ if (content.length > 0) {
215
+ claudeMessages.push({ role: role, content: content });
216
+ }
217
+ }
218
+ }
219
+ // 合并相邻相同 role 的消息
220
+ const mergedClaudeMessages = [];
221
+ for (let i = 0; i < claudeMessages.length; i++) {
222
+ const currentMessage = claudeMessages[i];
223
+
224
+ if (mergedClaudeMessages.length === 0) {
225
+ mergedClaudeMessages.push(currentMessage);
226
+ } else {
227
+ const lastMessage = mergedClaudeMessages[mergedClaudeMessages.length - 1];
228
+
229
+ // 如果当前消息的 role 与上一条消息的 role 相同,则合并 content 数组
230
+ if (lastMessage.role === currentMessage.role) {
231
+ lastMessage.content = lastMessage.content.concat(currentMessage.content);
232
+ } else {
233
+ mergedClaudeMessages.push(currentMessage);
234
+ }
235
+ }
236
+ }
237
+
238
+ // 清理最后一条 assistant 消息的尾部空白
239
+ if (mergedClaudeMessages.length > 0) {
240
+ const lastMessage = mergedClaudeMessages[mergedClaudeMessages.length - 1];
241
+ if (lastMessage.role === 'assistant' && Array.isArray(lastMessage.content)) {
242
+ // 从后往前找到最后一个 text 类型的内容块
243
+ for (let i = lastMessage.content.length - 1; i >= 0; i--) {
244
+ const contentBlock = lastMessage.content[i];
245
+ if (contentBlock.type === 'text' && contentBlock.text) {
246
+ // 移除尾部空白字���
247
+ contentBlock.text = contentBlock.text.trimEnd();
248
+ break;
249
+ }
250
+ }
251
+ }
252
+ }
253
+
254
+
255
+ const claudeRequest = {
256
+ model: openaiRequest.model,
257
+ messages: mergedClaudeMessages,
258
+ max_tokens: checkAndAssignOrDefault(openaiRequest.max_tokens, CLAUDE_DEFAULT_MAX_TOKENS),
259
+ temperature: checkAndAssignOrDefault(openaiRequest.temperature, CLAUDE_DEFAULT_TEMPERATURE),
260
+ top_p: checkAndAssignOrDefault(openaiRequest.top_p, CLAUDE_DEFAULT_TOP_P),
261
+ };
262
+
263
+ if (systemInstruction) {
264
+ claudeRequest.system = extractText(systemInstruction.parts[0].text);
265
+ }
266
+
267
+ if (openaiRequest.tools?.length) {
268
+ claudeRequest.tools = openaiRequest.tools.map(t => ({
269
+ name: t.function.name,
270
+ description: t.function.description || '',
271
+ input_schema: t.function.parameters || { type: 'object', properties: {} }
272
+ }));
273
+ claudeRequest.tool_choice = this.buildClaudeToolChoice(openaiRequest.tool_choice);
274
+ }
275
+
276
+ return claudeRequest;
277
+ }
278
+
279
+ /**
280
+ * OpenAI响应 -> Claude响应
281
+ */
282
+ toClaudeResponse(openaiResponse, model) {
283
+ if (!openaiResponse || !openaiResponse.choices || openaiResponse.choices.length === 0) {
284
+ return {
285
+ id: `msg_${uuidv4()}`,
286
+ type: "message",
287
+ role: "assistant",
288
+ content: [],
289
+ model: model,
290
+ stop_reason: "end_turn",
291
+ stop_sequence: null,
292
+ usage: {
293
+ input_tokens: openaiResponse?.usage?.prompt_tokens || 0,
294
+ output_tokens: openaiResponse?.usage?.completion_tokens || 0
295
+ }
296
+ };
297
+ }
298
+
299
+ const choice = openaiResponse.choices[0];
300
+ const contentList = [];
301
+
302
+ // 处理工具调用 - 支持tool_calls和function_calls
303
+ const toolCalls = choice.message?.tool_calls || choice.message?.function_calls || [];
304
+ for (const toolCall of toolCalls.filter(tc => tc && typeof tc === 'object')) {
305
+ if (toolCall.function) {
306
+ const func = toolCall.function;
307
+ const argStr = func.arguments || "{}";
308
+ let argObj;
309
+ try {
310
+ argObj = typeof argStr === 'string' ? JSON.parse(argStr) : argStr;
311
+ } catch (e) {
312
+ argObj = {};
313
+ }
314
+ contentList.push({
315
+ type: "tool_use",
316
+ id: toolCall.id || "",
317
+ name: func.name || "",
318
+ input: argObj,
319
+ });
320
+ }
321
+ }
322
+
323
+ // 处理reasoning_content(推理内容)
324
+ const reasoningContent = choice.message?.reasoning_content || "";
325
+ if (reasoningContent) {
326
+ contentList.push({
327
+ type: "thinking",
328
+ thinking: reasoningContent
329
+ });
330
+ }
331
+
332
+ // 处理文本内容
333
+ const contentText = choice.message?.content || "";
334
+ if (contentText) {
335
+ const extractedContent = extractThinkingFromOpenAIText(contentText);
336
+ if (Array.isArray(extractedContent)) {
337
+ contentList.push(...extractedContent);
338
+ } else {
339
+ contentList.push({ type: "text", text: extractedContent });
340
+ }
341
+ }
342
+
343
+ // 映射结束原因
344
+ const stopReason = mapFinishReason(
345
+ choice.finish_reason || "stop",
346
+ "openai",
347
+ "anthropic"
348
+ );
349
+
350
+ return {
351
+ id: `msg_${uuidv4()}`,
352
+ type: "message",
353
+ role: "assistant",
354
+ content: contentList,
355
+ model: model,
356
+ stop_reason: stopReason,
357
+ stop_sequence: null,
358
+ usage: {
359
+ input_tokens: openaiResponse.usage?.prompt_tokens || 0,
360
+ cache_creation_input_tokens: 0,
361
+ cache_read_input_tokens: openaiResponse.usage?.prompt_tokens_details?.cached_tokens || 0,
362
+ output_tokens: openaiResponse.usage?.completion_tokens || 0
363
+ }
364
+ };
365
+ }
366
+
367
+ /**
368
+ * OpenAI流式响应 -> Claude流式响应
369
+ *
370
+ * 这个方法实现了与 ClaudeConverter.toOpenAIStreamChunk 相反的转换逻辑
371
+ * 将 OpenAI 的流式 chunk 转换为 Claude 的流式事件
372
+ */
373
+ toClaudeStreamChunk(openaiChunk, model) {
374
+ if (!openaiChunk) return null;
375
+
376
+ // 处理 OpenAI chunk 对象
377
+ if (typeof openaiChunk === 'object' && !Array.isArray(openaiChunk)) {
378
+ const choice = openaiChunk.choices?.[0];
379
+ if (!choice) {
380
+ return null;
381
+ }
382
+
383
+ const delta = choice.delta;
384
+ const finishReason = choice.finish_reason;
385
+ const events = [];
386
+
387
+ // 注释部分是为了兼容claude code,但是不兼容cherry studio
388
+ // 1. 处理 role (对应 message_start)
389
+ // if (delta?.role === "assistant") {
390
+ // events.push({
391
+ // type: "message_start",
392
+ // message: {
393
+ // id: openaiChunk.id || `msg_${uuidv4()}`,
394
+ // type: "message",
395
+ // role: "assistant",
396
+ // content: [],
397
+ // model: model || openaiChunk.model || "unknown",
398
+ // stop_reason: null,
399
+ // stop_sequence: null,
400
+ // usage: {
401
+ // input_tokens: openaiChunk.usage?.prompt_tokens || 0,
402
+ // output_tokens: 0
403
+ // }
404
+ // }
405
+ // });
406
+ // events.push({
407
+ // type: "content_block_start",
408
+ // index: 0,
409
+ // content_block: {
410
+ // type: "text",
411
+ // text: ""
412
+ // }
413
+ // });
414
+ // }
415
+
416
+ // 2. 处理 tool_calls (对应 content_block_start 和 content_block_delta)
417
+ // if (delta?.tool_calls) {
418
+ // const toolCalls = delta.tool_calls;
419
+ // for (const toolCall of toolCalls) {
420
+ // // 如果有 function.name,说明是工具调用开始
421
+ // if (toolCall.function?.name) {
422
+ // events.push({
423
+ // type: "content_block_start",
424
+ // index: toolCall.index || 0,
425
+ // content_block: {
426
+ // type: "tool_use",
427
+ // id: toolCall.id || `tool_${uuidv4()}`,
428
+ // name: toolCall.function.name,
429
+ // input: {}
430
+ // }
431
+ // });
432
+ // }
433
+
434
+ // // 如果有 function.arguments,说明是参数增量
435
+ // if (toolCall.function?.arguments) {
436
+ // events.push({
437
+ // type: "content_block_delta",
438
+ // index: toolCall.index || 0,
439
+ // delta: {
440
+ // type: "input_json_delta",
441
+ // partial_json: toolCall.function.arguments
442
+ // }
443
+ // });
444
+ // }
445
+ // }
446
+ // }
447
+
448
+ // 3. 处理 reasoning_content (对应 thinking 类型的 content_block)
449
+ if (delta?.reasoning_content) {
450
+ // 注意:这里可能需要先发送 content_block_start,但由于状态管理复杂,
451
+ // 我们假设调用方会处理这个逻辑
452
+ events.push({
453
+ type: "content_block_delta",
454
+ index: 0,
455
+ delta: {
456
+ type: "thinking_delta",
457
+ thinking: delta.reasoning_content
458
+ }
459
+ });
460
+ }
461
+
462
+ // 4. 处理普通文本 content (对应 text 类型的 content_block)
463
+ if (delta?.content) {
464
+ events.push({
465
+ type: "content_block_delta",
466
+ index: 0,
467
+ delta: {
468
+ type: "text_delta",
469
+ text: delta.content
470
+ }
471
+ });
472
+ }
473
+
474
+ // 5. 处理 finish_reason (对应 message_delta 和 message_stop)
475
+ if (finishReason) {
476
+ // 映射 finish_reason
477
+ const stopReason = finishReason === "stop" ? "end_turn" :
478
+ finishReason === "length" ? "max_tokens" :
479
+ "end_turn";
480
+
481
+ events.push({
482
+ type: "content_block_stop",
483
+ index: 0
484
+ });
485
+ // 发送 message_delta
486
+ events.push({
487
+ type: "message_delta",
488
+ delta: {
489
+ stop_reason: stopReason,
490
+ stop_sequence: null
491
+ },
492
+ usage: {
493
+ input_tokens: openaiChunk.usage?.prompt_tokens || 0,
494
+ cache_creation_input_tokens: 0,
495
+ cache_read_input_tokens: openaiChunk.usage?.prompt_tokens_details?.cached_tokens || 0,
496
+ output_tokens: openaiChunk.usage?.completion_tokens || 0
497
+ }
498
+ });
499
+
500
+ // 发送 message_stop
501
+ events.push({
502
+ type: "message_stop"
503
+ });
504
+ }
505
+
506
+ return events.length > 0 ? events : null;
507
+ }
508
+
509
+ // 向后兼容:处理字符串格式
510
+ if (typeof openaiChunk === 'string') {
511
+ return {
512
+ type: "content_block_delta",
513
+ index: 0,
514
+ delta: {
515
+ type: "text_delta",
516
+ text: openaiChunk
517
+ }
518
+ };
519
+ }
520
+
521
+ return null;
522
+ }
523
+
524
+ /**
525
+ * OpenAI模型列表 -> Claude模型列表
526
+ */
527
+ toClaudeModelList(openaiModels) {
528
+ return {
529
+ models: openaiModels.data.map(m => ({
530
+ name: m.id,
531
+ description: "",
532
+ })),
533
+ };
534
+ }
535
+
536
+ /**
537
+ * 将 OpenAI 模型列表转换为 Gemini 模型列表
538
+ */
539
+ toGeminiModelList(openaiModels) {
540
+ const models = openaiModels.data || [];
541
+ return {
542
+ models: models.map(m => ({
543
+ name: `models/${m.id}`,
544
+ version: m.version || "1.0.0",
545
+ displayName: m.displayName || m.id,
546
+ description: m.description || `A generative model for text and chat generation. ID: ${m.id}`,
547
+ inputTokenLimit: m.inputTokenLimit || OPENAI_DEFAULT_INPUT_TOKEN_LIMIT,
548
+ outputTokenLimit: m.outputTokenLimit || OPENAI_DEFAULT_OUTPUT_TOKEN_LIMIT,
549
+ supportedGenerationMethods: m.supportedGenerationMethods || ["generateContent", "streamGenerateContent"]
550
+ }))
551
+ };
552
+ }
553
+
554
+ /**
555
+ * 构建Claude工具选择
556
+ */
557
+ buildClaudeToolChoice(toolChoice) {
558
+ if (typeof toolChoice === 'string') {
559
+ const mapping = { auto: 'auto', none: 'none', required: 'any' };
560
+ return { type: mapping[toolChoice] };
561
+ }
562
+ if (typeof toolChoice === 'object' && toolChoice.function) {
563
+ return { type: 'tool', name: toolChoice.function.name };
564
+ }
565
+ return undefined;
566
+ }
567
+
568
+ // =========================================================================
569
+ // OpenAI -> Gemini 转换
570
+ // =========================================================================
571
+
572
+ // Gemini Openai thought signature constant
573
+ static GEMINI_OPENAI_THOUGHT_SIGNATURE = "skip_thought_signature_validator";
574
+ /**
575
+ * OpenAI请求 -> Gemini请求
576
+ */
577
+ toGeminiRequest(openaiRequest) {
578
+ const messages = openaiRequest.messages || [];
579
+ const model = openaiRequest.model || '';
580
+
581
+ // 构建 tool_call_id -> function_name 映射
582
+ const tcID2Name = {};
583
+ for (const message of messages) {
584
+ if (message.role === 'assistant' && message.tool_calls) {
585
+ for (const tc of message.tool_calls) {
586
+ if (tc.type === 'function' && tc.id && tc.function?.name) {
587
+ tcID2Name[tc.id] = tc.function.name;
588
+ }
589
+ }
590
+ }
591
+ }
592
+
593
+ // 构建 tool_call_id -> response 映射
594
+ const toolResponses = {};
595
+ for (const message of messages) {
596
+ if (message.role === 'tool' && message.tool_call_id) {
597
+ toolResponses[message.tool_call_id] = message.content;
598
+ }
599
+ }
600
+
601
+ const processedMessages = [];
602
+ let systemInstruction = null;
603
+
604
+ for (let i = 0; i < messages.length; i++) {
605
+ const message = messages[i];
606
+ const role = message.role;
607
+ const content = message.content;
608
+
609
+ if (role === 'system') {
610
+ // system -> system_instruction
611
+ if (messages.length > 1) {
612
+ if (typeof content === 'string') {
613
+ systemInstruction = {
614
+ role: 'user',
615
+ parts: [{ text: content }]
616
+ };
617
+ } else if (Array.isArray(content)) {
618
+ const parts = content
619
+ .filter(item => item.type === 'text' && item.text)
620
+ .map(item => ({ text: item.text }));
621
+ if (parts.length > 0) {
622
+ systemInstruction = {
623
+ role: 'user',
624
+ parts: parts
625
+ };
626
+ }
627
+ } else if (typeof content === 'object' && content.type === 'text') {
628
+ systemInstruction = {
629
+ role: 'user',
630
+ parts: [{ text: content.text }]
631
+ };
632
+ }
633
+ } else {
634
+ // 只有一条 system 消息时,作为 user 消息处理
635
+ const node = { role: 'user', parts: [] };
636
+ if (typeof content === 'string') {
637
+ node.parts.push({ text: content });
638
+ } else if (Array.isArray(content)) {
639
+ for (const item of content) {
640
+ if (item.type === 'text' && item.text) {
641
+ node.parts.push({ text: item.text });
642
+ }
643
+ }
644
+ }
645
+ if (node.parts.length > 0) {
646
+ processedMessages.push(node);
647
+ }
648
+ }
649
+ } else if (role === 'user') {
650
+ // user -> user content
651
+ const node = { role: 'user', parts: [] };
652
+ if (typeof content === 'string') {
653
+ node.parts.push({ text: content });
654
+ } else if (Array.isArray(content)) {
655
+ for (const item of content) {
656
+ if (!item) continue;
657
+ switch (item.type) {
658
+ case 'text':
659
+ if (item.text) {
660
+ node.parts.push({ text: item.text });
661
+ }
662
+ break;
663
+ case 'image_url':
664
+ if (item.image_url) {
665
+ const imageUrl = typeof item.image_url === 'string'
666
+ ? item.image_url
667
+ : item.image_url.url;
668
+ if (imageUrl && imageUrl.startsWith('data:')) {
669
+ const commaIndex = imageUrl.indexOf(',');
670
+ if (commaIndex > 5) {
671
+ const header = imageUrl.substring(5, commaIndex);
672
+ const semicolonIndex = header.indexOf(';');
673
+ if (semicolonIndex > 0) {
674
+ const mimeType = header.substring(0, semicolonIndex);
675
+ const data = imageUrl.substring(commaIndex + 1);
676
+ node.parts.push({
677
+ inlineData: {
678
+ mimeType: mimeType,
679
+ data: data
680
+ },
681
+ thoughtSignature: OpenAIConverter.GEMINI_OPENAI_THOUGHT_SIGNATURE
682
+ });
683
+ }
684
+ }
685
+ } else if (imageUrl) {
686
+ node.parts.push({
687
+ fileData: {
688
+ mimeType: 'image/jpeg',
689
+ fileUri: imageUrl
690
+ }
691
+ });
692
+ }
693
+ }
694
+ break;
695
+ case 'file':
696
+ if (item.file) {
697
+ const filename = item.file.filename || '';
698
+ const fileData = item.file.file_data || '';
699
+ const ext = filename.includes('.')
700
+ ? filename.split('.').pop().toLowerCase()
701
+ : '';
702
+ const mimeTypes = {
703
+ 'pdf': 'application/pdf',
704
+ 'txt': 'text/plain',
705
+ 'html': 'text/html',
706
+ 'css': 'text/css',
707
+ 'js': 'application/javascript',
708
+ 'json': 'application/json',
709
+ 'xml': 'application/xml',
710
+ 'csv': 'text/csv',
711
+ 'md': 'text/markdown',
712
+ 'py': 'text/x-python',
713
+ 'java': 'text/x-java',
714
+ 'c': 'text/x-c',
715
+ 'cpp': 'text/x-c++',
716
+ 'h': 'text/x-c',
717
+ 'hpp': 'text/x-c++',
718
+ 'go': 'text/x-go',
719
+ 'rs': 'text/x-rust',
720
+ 'ts': 'text/typescript',
721
+ 'tsx': 'text/typescript',
722
+ 'jsx': 'text/javascript',
723
+ 'png': 'image/png',
724
+ 'jpg': 'image/jpeg',
725
+ 'jpeg': 'image/jpeg',
726
+ 'gif': 'image/gif',
727
+ 'webp': 'image/webp',
728
+ 'svg': 'image/svg+xml',
729
+ 'mp3': 'audio/mpeg',
730
+ 'wav': 'audio/wav',
731
+ 'mp4': 'video/mp4',
732
+ 'webm': 'video/webm'
733
+ };
734
+ const mimeType = mimeTypes[ext];
735
+ if (mimeType && fileData) {
736
+ node.parts.push({
737
+ inlineData: {
738
+ mimeType: mimeType,
739
+ data: fileData
740
+ }
741
+ });
742
+ }
743
+ }
744
+ break;
745
+ }
746
+ }
747
+ }
748
+ if (node.parts.length > 0) {
749
+ processedMessages.push(node);
750
+ }
751
+ } else if (role === 'assistant') {
752
+ // assistant -> model content
753
+ const node = { role: 'model', parts: [] };
754
+
755
+ // 处理文本内容
756
+ if (typeof content === 'string' && content) {
757
+ node.parts.push({ text: content });
758
+ } else if (Array.isArray(content)) {
759
+ for (const item of content) {
760
+ if (!item) continue;
761
+ if (item.type === 'text' && item.text) {
762
+ node.parts.push({ text: item.text });
763
+ } else if (item.type === 'image_url' && item.image_url) {
764
+ const imageUrl = typeof item.image_url === 'string'
765
+ ? item.image_url
766
+ : item.image_url.url;
767
+ if (imageUrl && imageUrl.startsWith('data:')) {
768
+ const commaIndex = imageUrl.indexOf(',');
769
+ if (commaIndex > 5) {
770
+ const header = imageUrl.substring(5, commaIndex);
771
+ const semicolonIndex = header.indexOf(';');
772
+ if (semicolonIndex > 0) {
773
+ const mimeType = header.substring(0, semicolonIndex);
774
+ const data = imageUrl.substring(commaIndex + 1);
775
+ node.parts.push({
776
+ inlineData: {
777
+ mimeType: mimeType,
778
+ data: data
779
+ },
780
+ thoughtSignature: OpenAIConverter.GEMINI_OPENAI_THOUGHT_SIGNATURE
781
+ });
782
+ }
783
+ }
784
+ }
785
+ }
786
+ }
787
+ }
788
+
789
+ // 处理 tool_calls -> functionCall
790
+ if (message.tool_calls && Array.isArray(message.tool_calls)) {
791
+ const functionCallIds = [];
792
+ for (const tc of message.tool_calls) {
793
+ if (tc.type !== 'function') continue;
794
+ const fid = tc.id || '';
795
+ const fname = tc.function?.name || '';
796
+ const fargs = tc.function?.arguments || '{}';
797
+
798
+ let argsObj;
799
+ try {
800
+ argsObj = typeof fargs === 'string' ? JSON.parse(fargs) : fargs;
801
+ } catch (e) {
802
+ argsObj = {};
803
+ }
804
+
805
+ node.parts.push({
806
+ functionCall: {
807
+ name: fname,
808
+ args: argsObj
809
+ },
810
+ thoughtSignature: OpenAIConverter.GEMINI_OPENAI_THOUGHT_SIGNATURE
811
+ });
812
+
813
+ if (fid) {
814
+ functionCallIds.push(fid);
815
+ }
816
+ }
817
+
818
+ // 添加 model 消息
819
+ if (node.parts.length > 0) {
820
+ processedMessages.push(node);
821
+ }
822
+
823
+ // 添加对应的 functionResponse(作为 user 消息)
824
+ if (functionCallIds.length > 0) {
825
+ const toolNode = { role: 'user', parts: [] };
826
+ for (const fid of functionCallIds) {
827
+ const name = tcID2Name[fid];
828
+ if (name) {
829
+ let resp = toolResponses[fid] || '{}';
830
+ // 确保 resp 是字符串
831
+ if (typeof resp !== 'string') {
832
+ resp = JSON.stringify(resp);
833
+ }
834
+ toolNode.parts.push({
835
+ functionResponse: {
836
+ name: name,
837
+ response: {
838
+ result: resp
839
+ }
840
+ }
841
+ });
842
+ }
843
+ }
844
+ if (toolNode.parts.length > 0) {
845
+ processedMessages.push(toolNode);
846
+ }
847
+ }
848
+ } else {
849
+ // 没有 tool_calls,直接添加
850
+ if (node.parts.length > 0) {
851
+ processedMessages.push(node);
852
+ }
853
+ }
854
+ }
855
+ // tool 消息已经在 assistant 的 tool_calls 处理中合并了,这里跳过
856
+ }
857
+
858
+ // 构建 Gemini 请求
859
+ const geminiRequest = {
860
+ contents: processedMessages.filter(item => item.parts && item.parts.length > 0)
861
+ };
862
+
863
+ // 添加 model
864
+ if (model) {
865
+ geminiRequest.model = model;
866
+ }
867
+
868
+ // 添加 system_instruction
869
+ if (systemInstruction) {
870
+ geminiRequest.system_instruction = systemInstruction;
871
+ }
872
+
873
+ // 处理 reasoning_effort -> thinkingConfig
874
+ if (openaiRequest.reasoning_effort) {
875
+ const effort = String(openaiRequest.reasoning_effort).toLowerCase().trim();
876
+ if (this.modelSupportsThinking(model)) {
877
+ if (this.isGemini3Model(model)) {
878
+ // Gemini 3 模型使用 thinkingLevel
879
+ if (effort === 'none') {
880
+ // 不添加 thinkingConfig
881
+ } else if (effort === 'auto') {
882
+ geminiRequest.generationConfig = geminiRequest.generationConfig || {};
883
+ geminiRequest.generationConfig.thinkingConfig = {
884
+ includeThoughts: true
885
+ };
886
+ } else {
887
+ const level = this.validateGemini3ThinkingLevel(model, effort);
888
+ if (level) {
889
+ geminiRequest.generationConfig = geminiRequest.generationConfig || {};
890
+ geminiRequest.generationConfig.thinkingConfig = {
891
+ thinkingLevel: level
892
+ };
893
+ }
894
+ }
895
+ } else if (!this.modelUsesThinkingLevels(model)) {
896
+ // 使用 thinkingBudget 的模型
897
+ geminiRequest.generationConfig = geminiRequest.generationConfig || {};
898
+ geminiRequest.generationConfig.thinkingConfig = this.applyReasoningEffortToGemini(effort);
899
+ }
900
+ }
901
+ }
902
+
903
+ // 处理 extra_body.google.thinking_config(Cherry Studio 扩展)
904
+ if (!openaiRequest.reasoning_effort && openaiRequest.extra_body?.google?.thinking_config) {
905
+ const tc = openaiRequest.extra_body.google.thinking_config;
906
+ if (this.modelSupportsThinking(model) && !this.modelUsesThinkingLevels(model)) {
907
+ geminiRequest.generationConfig = geminiRequest.generationConfig || {};
908
+ geminiRequest.generationConfig.thinkingConfig = geminiRequest.generationConfig.thinkingConfig || {};
909
+
910
+ let setBudget = false;
911
+ let budget = 0;
912
+
913
+ if (tc.thinkingBudget !== undefined) {
914
+ budget = parseInt(tc.thinkingBudget, 10);
915
+ geminiRequest.generationConfig.thinkingConfig.thinkingBudget = budget;
916
+ setBudget = true;
917
+ } else if (tc.thinking_budget !== undefined) {
918
+ budget = parseInt(tc.thinking_budget, 10);
919
+ geminiRequest.generationConfig.thinkingConfig.thinkingBudget = budget;
920
+ setBudget = true;
921
+ }
922
+
923
+ if (tc.includeThoughts !== undefined) {
924
+ geminiRequest.generationConfig.thinkingConfig.includeThoughts = tc.includeThoughts;
925
+ } else if (tc.include_thoughts !== undefined) {
926
+ geminiRequest.generationConfig.thinkingConfig.includeThoughts = tc.include_thoughts;
927
+ } else if (setBudget && budget !== 0) {
928
+ geminiRequest.generationConfig.thinkingConfig.includeThoughts = true;
929
+ }
930
+ }
931
+ }
932
+
933
+ // 处理 modalities -> responseModalities
934
+ if (openaiRequest.modalities && Array.isArray(openaiRequest.modalities)) {
935
+ const responseMods = [];
936
+ for (const m of openaiRequest.modalities) {
937
+ const mod = String(m).toLowerCase();
938
+ if (mod === 'text') {
939
+ responseMods.push('TEXT');
940
+ } else if (mod === 'image') {
941
+ responseMods.push('IMAGE');
942
+ }
943
+ }
944
+ if (responseMods.length > 0) {
945
+ geminiRequest.generationConfig = geminiRequest.generationConfig || {};
946
+ geminiRequest.generationConfig.responseModalities = responseMods;
947
+ }
948
+ }
949
+
950
+ // 处理 image_config(OpenRouter 风格)
951
+ if (openaiRequest.image_config) {
952
+ const imgCfg = openaiRequest.image_config;
953
+ if (imgCfg.aspect_ratio) {
954
+ geminiRequest.generationConfig = geminiRequest.generationConfig || {};
955
+ geminiRequest.generationConfig.imageConfig = geminiRequest.generationConfig.imageConfig || {};
956
+ geminiRequest.generationConfig.imageConfig.aspectRatio = imgCfg.aspect_ratio;
957
+ }
958
+ if (imgCfg.image_size) {
959
+ geminiRequest.generationConfig = geminiRequest.generationConfig || {};
960
+ geminiRequest.generationConfig.imageConfig = geminiRequest.generationConfig.imageConfig || {};
961
+ geminiRequest.generationConfig.imageConfig.imageSize = imgCfg.image_size;
962
+ }
963
+ }
964
+
965
+ // 处理 tools -> functionDeclarations
966
+ if (openaiRequest.tools?.length) {
967
+ const functionDeclarations = [];
968
+ let hasGoogleSearch = false;
969
+
970
+ for (const t of openaiRequest.tools) {
971
+ if (!t || typeof t !== 'object') continue;
972
+
973
+ if (t.type === 'function' && t.function) {
974
+ const func = t.function;
975
+ let fnDecl = {
976
+ name: String(func.name || ''),
977
+ description: String(func.description || '')
978
+ };
979
+
980
+ // 处理 parameters -> parametersJsonSchema
981
+ if (func.parameters) {
982
+ fnDecl.parametersJsonSchema = cleanJsonSchema(func.parameters);
983
+ } else {
984
+ fnDecl.parametersJsonSchema = {
985
+ type: 'object',
986
+ properties: {}
987
+ };
988
+ }
989
+
990
+ functionDeclarations.push(fnDecl);
991
+ }
992
+
993
+ // 处理 google_search 工具
994
+ if (t.google_search) {
995
+ hasGoogleSearch = true;
996
+ }
997
+ }
998
+
999
+ if (functionDeclarations.length > 0 || hasGoogleSearch) {
1000
+ geminiRequest.tools = [{}];
1001
+ if (functionDeclarations.length > 0) {
1002
+ geminiRequest.tools[0].functionDeclarations = functionDeclarations;
1003
+ }
1004
+ if (hasGoogleSearch) {
1005
+ const googleSearchTool = openaiRequest.tools.find(t => t.google_search);
1006
+ geminiRequest.tools[0].googleSearch = googleSearchTool.google_search;
1007
+ }
1008
+ }
1009
+ }
1010
+
1011
+ // 处理 tool_choice
1012
+ if (openaiRequest.tool_choice) {
1013
+ geminiRequest.toolConfig = this.buildGeminiToolConfig(openaiRequest.tool_choice);
1014
+ }
1015
+
1016
+ // 构建 generationConfig
1017
+ const config = this.buildGeminiGenerationConfig(openaiRequest, model);
1018
+ if (Object.keys(config).length) {
1019
+ geminiRequest.generationConfig = {
1020
+ ...config,
1021
+ ...(geminiRequest.generationConfig || {})
1022
+ };
1023
+ }
1024
+
1025
+ // 添加默认安全设置
1026
+ geminiRequest.safetySettings = this.getDefaultSafetySettings();
1027
+
1028
+ return geminiRequest;
1029
+ }
1030
+
1031
+ /**
1032
+ * 检查模型是否支持 thinking
1033
+ */
1034
+ modelSupportsThinking(model) {
1035
+ if (!model) return false;
1036
+ const m = model.toLowerCase();
1037
+ return m.includes('2.5') || m.includes('thinking') || m.includes('2.0-flash-thinking');
1038
+ }
1039
+
1040
+ /**
1041
+ * 检查是否是 Gemini 3 模型
1042
+ */
1043
+ isGemini3Model(model) {
1044
+ if (!model) return false;
1045
+ const m = model.toLowerCase();
1046
+ return m.includes('gemini-3') || m.includes('gemini3');
1047
+ }
1048
+
1049
+ /**
1050
+ * 检查模型是否使用 thinking levels(而不是 budget)
1051
+ */
1052
+ modelUsesThinkingLevels(model) {
1053
+ if (!model) return false;
1054
+ // Gemini 3 模型使用 levels,其他使用 budget
1055
+ return this.isGemini3Model(model);
1056
+ }
1057
+
1058
+ /**
1059
+ * 验证 Gemini 3 thinking level
1060
+ */
1061
+ validateGemini3ThinkingLevel(model, effort) {
1062
+ const validLevels = ['low', 'medium', 'high'];
1063
+ if (validLevels.includes(effort)) {
1064
+ return effort.toUpperCase();
1065
+ }
1066
+ return null;
1067
+ }
1068
+
1069
+ /**
1070
+ * 将 reasoning_effort 转换为 Gemini thinkingConfig
1071
+ */
1072
+ applyReasoningEffortToGemini(effort) {
1073
+ const effortToBudget = {
1074
+ 'low': 1024,
1075
+ 'medium': 8192,
1076
+ 'high': 24576
1077
+ };
1078
+ const budget = effortToBudget[effort] || effortToBudget['medium'];
1079
+ return {
1080
+ thinkingBudget: budget,
1081
+ includeThoughts: true
1082
+ };
1083
+ }
1084
+
1085
+ /**
1086
+ * 获取默认安全设置
1087
+ */
1088
+ getDefaultSafetySettings() {
1089
+ return [
1090
+ { category: "HARM_CATEGORY_HARASSMENT", threshold: "OFF" },
1091
+ { category: "HARM_CATEGORY_HATE_SPEECH", threshold: "OFF" },
1092
+ { category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold: "OFF" },
1093
+ { category: "HARM_CATEGORY_DANGEROUS_CONTENT", threshold: "OFF" },
1094
+ { category: "HARM_CATEGORY_CIVIC_INTEGRITY", threshold: "OFF" }
1095
+ ];
1096
+ }
1097
+
1098
+ /**
1099
+ * 处理OpenAI内容到Gemini parts
1100
+ */
1101
+ processOpenAIContentToGeminiParts(content) {
1102
+ if (!content) return [];
1103
+ if (typeof content === 'string') return [{ text: content }];
1104
+
1105
+ if (Array.isArray(content)) {
1106
+ const parts = [];
1107
+
1108
+ for (const item of content) {
1109
+ if (!item) continue;
1110
+
1111
+ if (item.type === 'text' && item.text) {
1112
+ parts.push({ text: item.text });
1113
+ } else if (item.type === 'image_url' && item.image_url) {
1114
+ const imageUrl = typeof item.image_url === 'string'
1115
+ ? item.image_url
1116
+ : item.image_url.url;
1117
+
1118
+ if (imageUrl.startsWith('data:')) {
1119
+ const [header, data] = imageUrl.split(',');
1120
+ const mimeType = header.match(/data:([^;]+)/)?.[1] || 'image/jpeg';
1121
+ parts.push({ inlineData: { mimeType, data } });
1122
+ } else {
1123
+ parts.push({
1124
+ fileData: { mimeType: 'image/jpeg', fileUri: imageUrl }
1125
+ });
1126
+ }
1127
+ }
1128
+ }
1129
+
1130
+ return parts;
1131
+ }
1132
+
1133
+ return [];
1134
+ }
1135
+
1136
+ /**
1137
+ * 构建Gemini工具配置
1138
+ */
1139
+ buildGeminiToolConfig(toolChoice) {
1140
+ if (typeof toolChoice === 'string' && ['none', 'auto'].includes(toolChoice)) {
1141
+ return { functionCallingConfig: { mode: toolChoice.toUpperCase() } };
1142
+ }
1143
+ if (typeof toolChoice === 'object' && toolChoice.function) {
1144
+ return { functionCallingConfig: { mode: 'ANY', allowedFunctionNames: [toolChoice.function.name] } };
1145
+ }
1146
+ return null;
1147
+ }
1148
+
1149
+ /**
1150
+ * 构建Gemini生成配置
1151
+ */
1152
+ buildGeminiGenerationConfig({ temperature, max_tokens, top_p, stop, tools, response_format }, model) {
1153
+ const config = {};
1154
+ config.temperature = checkAndAssignOrDefault(temperature, GEMINI_DEFAULT_TEMPERATURE);
1155
+ config.maxOutputTokens = checkAndAssignOrDefault(max_tokens, GEMINI_DEFAULT_MAX_TOKENS);
1156
+ config.topP = checkAndAssignOrDefault(top_p, GEMINI_DEFAULT_TOP_P);
1157
+ if (stop !== undefined) config.stopSequences = Array.isArray(stop) ? stop : [stop];
1158
+
1159
+ // Handle response_format
1160
+ if (response_format) {
1161
+ if (response_format.type === 'json_object') {
1162
+ config.responseMimeType = 'application/json';
1163
+ } else if (response_format.type === 'json_schema' && response_format.json_schema) {
1164
+ config.responseMimeType = 'application/json';
1165
+ if (response_format.json_schema.schema) {
1166
+ config.responseSchema = response_format.json_schema.schema;
1167
+ }
1168
+ }
1169
+ }
1170
+
1171
+ // Gemini 2.5 and thinking models require responseModalities: ["TEXT"]
1172
+ // But this parameter cannot be added when using tools (causes 400 error)
1173
+ const hasTools = tools && Array.isArray(tools) && tools.length > 0;
1174
+ if (!hasTools && model && (model.includes('2.5') || model.includes('thinking') || model.includes('2.0-flash-thinking'))) {
1175
+ console.log(`[OpenAI->Gemini] Adding responseModalities: ["TEXT"] for model: ${model}`);
1176
+ config.responseModalities = ["TEXT"];
1177
+ } else if (hasTools && model && (model.includes('2.5') || model.includes('thinking') || model.includes('2.0-flash-thinking'))) {
1178
+ console.log(`[OpenAI->Gemini] Skipping responseModalities for model ${model} because tools are present`);
1179
+ }
1180
+
1181
+ return config;
1182
+ }
1183
+ /**
1184
+ * 将OpenAI响应转换为Gemini响应格式
1185
+ */
1186
+ toGeminiResponse(openaiResponse, model) {
1187
+ if (!openaiResponse || !openaiResponse.choices || !openaiResponse.choices[0]) {
1188
+ return { candidates: [], usageMetadata: {} };
1189
+ }
1190
+
1191
+ const choice = openaiResponse.choices[0];
1192
+ const message = choice.message || {};
1193
+ const parts = [];
1194
+
1195
+ // 处理文本内容
1196
+ if (message.content) {
1197
+ parts.push({ text: message.content });
1198
+ }
1199
+
1200
+ // 处理工具调用
1201
+ if (message.tool_calls && message.tool_calls.length > 0) {
1202
+ for (const toolCall of message.tool_calls) {
1203
+ if (toolCall.type === 'function') {
1204
+ parts.push({
1205
+ functionCall: {
1206
+ name: toolCall.function.name,
1207
+ args: typeof toolCall.function.arguments === 'string'
1208
+ ? JSON.parse(toolCall.function.arguments)
1209
+ : toolCall.function.arguments
1210
+ }
1211
+ });
1212
+ }
1213
+ }
1214
+ }
1215
+
1216
+ // 映射finish_reason
1217
+ const finishReasonMap = {
1218
+ 'stop': 'STOP',
1219
+ 'length': 'MAX_TOKENS',
1220
+ 'tool_calls': 'STOP',
1221
+ 'content_filter': 'SAFETY'
1222
+ };
1223
+
1224
+ return {
1225
+ candidates: [{
1226
+ content: {
1227
+ role: 'model',
1228
+ parts: parts
1229
+ },
1230
+ finishReason: finishReasonMap[choice.finish_reason] || 'STOP'
1231
+ }],
1232
+ usageMetadata: openaiResponse.usage ? {
1233
+ promptTokenCount: openaiResponse.usage.prompt_tokens || 0,
1234
+ candidatesTokenCount: openaiResponse.usage.completion_tokens || 0,
1235
+ totalTokenCount: openaiResponse.usage.total_tokens || 0,
1236
+ cachedContentTokenCount: openaiResponse.usage.prompt_tokens_details?.cached_tokens || 0,
1237
+ promptTokensDetails: [{
1238
+ modality: "TEXT",
1239
+ tokenCount: openaiResponse.usage.prompt_tokens || 0
1240
+ }],
1241
+ candidatesTokensDetails: [{
1242
+ modality: "TEXT",
1243
+ tokenCount: openaiResponse.usage.completion_tokens || 0
1244
+ }],
1245
+ thoughtsTokenCount: openaiResponse.usage.completion_tokens_details?.reasoning_tokens || 0
1246
+ } : {}
1247
+ };
1248
+ }
1249
+
1250
+ /**
1251
+ * 将OpenAI流式响应块转换为Gemini流式响应格式
1252
+ */
1253
+ toGeminiStreamChunk(openaiChunk, model) {
1254
+ if (!openaiChunk || !openaiChunk.choices || !openaiChunk.choices[0]) {
1255
+ return null;
1256
+ }
1257
+
1258
+ const choice = openaiChunk.choices[0];
1259
+ const delta = choice.delta || {};
1260
+ const parts = [];
1261
+
1262
+ // 处理文本内容
1263
+ if (delta.content) {
1264
+ parts.push({ text: delta.content });
1265
+ }
1266
+
1267
+ // 处理工具调用
1268
+ if (delta.tool_calls && delta.tool_calls.length > 0) {
1269
+ for (const toolCall of delta.tool_calls) {
1270
+ if (toolCall.function) {
1271
+ const functionCall = {
1272
+ name: toolCall.function.name || '',
1273
+ args: {}
1274
+ };
1275
+
1276
+ if (toolCall.function.arguments) {
1277
+ try {
1278
+ functionCall.args = typeof toolCall.function.arguments === 'string'
1279
+ ? JSON.parse(toolCall.function.arguments)
1280
+ : toolCall.function.arguments;
1281
+ } catch (e) {
1282
+ // 部分参数,保持为字符串
1283
+ functionCall.args = { partial: toolCall.function.arguments };
1284
+ }
1285
+ }
1286
+
1287
+ parts.push({ functionCall });
1288
+ }
1289
+ }
1290
+ }
1291
+
1292
+ const result = {
1293
+ candidates: [{
1294
+ content: {
1295
+ role: 'model',
1296
+ parts: parts
1297
+ }
1298
+ }]
1299
+ };
1300
+
1301
+ // 添加finish_reason(如果存在)
1302
+ if (choice.finish_reason) {
1303
+ const finishReasonMap = {
1304
+ 'stop': 'STOP',
1305
+ 'length': 'MAX_TOKENS',
1306
+ 'tool_calls': 'STOP',
1307
+ 'content_filter': 'SAFETY'
1308
+ };
1309
+ result.candidates[0].finishReason = finishReasonMap[choice.finish_reason] || 'STOP';
1310
+ }
1311
+
1312
+ // 添加usage信息(如果存在)
1313
+ if (openaiChunk.usage) {
1314
+ result.usageMetadata = {
1315
+ promptTokenCount: openaiChunk.usage.prompt_tokens || 0,
1316
+ candidatesTokenCount: openaiChunk.usage.completion_tokens || 0,
1317
+ totalTokenCount: openaiChunk.usage.total_tokens || 0,
1318
+ cachedContentTokenCount: openaiChunk.usage.prompt_tokens_details?.cached_tokens || 0,
1319
+ promptTokensDetails: [{
1320
+ modality: "TEXT",
1321
+ tokenCount: openaiChunk.usage.prompt_tokens || 0
1322
+ }],
1323
+ candidatesTokensDetails: [{
1324
+ modality: "TEXT",
1325
+ tokenCount: openaiChunk.usage.completion_tokens || 0
1326
+ }],
1327
+ thoughtsTokenCount: openaiChunk.usage.completion_tokens_details?.reasoning_tokens || 0
1328
+ };
1329
+ }
1330
+
1331
+ return result;
1332
+ }
1333
+
1334
+ /**
1335
+ * OpenAI请求 -> Codex请求(委托给 CodexConverter)
1336
+ */
1337
+ toCodexRequest(openaiRequest) {
1338
+ return this.codexConverter.toCodexRequest(openaiRequest);
1339
+ }
1340
+
1341
+ /**
1342
+ * 将OpenAI请求转换为OpenAI Responses格式
1343
+ */
1344
+ toOpenAIResponsesRequest(openaiRequest) {
1345
+ const responsesRequest = {
1346
+ model: openaiRequest.model,
1347
+ messages: []
1348
+ };
1349
+
1350
+ // 转换messages
1351
+ if (openaiRequest.messages && openaiRequest.messages.length > 0) {
1352
+ responsesRequest.messages = openaiRequest.messages.map(msg => ({
1353
+ role: msg.role,
1354
+ content: typeof msg.content === 'string'
1355
+ ? [{ type: 'input_text', text: msg.content }]
1356
+ : msg.content
1357
+ }));
1358
+ }
1359
+
1360
+ // 转换其他参数
1361
+ if (openaiRequest.temperature !== undefined) {
1362
+ responsesRequest.temperature = openaiRequest.temperature;
1363
+ }
1364
+ if (openaiRequest.max_tokens !== undefined) {
1365
+ responsesRequest.max_output_tokens = openaiRequest.max_tokens;
1366
+ }
1367
+ if (openaiRequest.top_p !== undefined) {
1368
+ responsesRequest.top_p = openaiRequest.top_p;
1369
+ }
1370
+ if (openaiRequest.tools) {
1371
+ responsesRequest.tools = openaiRequest.tools;
1372
+ }
1373
+ if (openaiRequest.tool_choice) {
1374
+ responsesRequest.tool_choice = openaiRequest.tool_choice;
1375
+ }
1376
+
1377
+ return responsesRequest;
1378
+ }
1379
+
1380
+ /**
1381
+ * 将OpenAI响应转换为OpenAI Responses格式
1382
+ */
1383
+ toOpenAIResponsesResponse(openaiResponse, model) {
1384
+ if (!openaiResponse || !openaiResponse.choices || !openaiResponse.choices[0]) {
1385
+ return {
1386
+ id: `resp_${Date.now()}`,
1387
+ object: 'response',
1388
+ created_at: Math.floor(Date.now() / 1000),
1389
+ status: 'completed',
1390
+ model: model || 'unknown',
1391
+ output: [],
1392
+ usage: {
1393
+ input_tokens: 0,
1394
+ output_tokens: 0,
1395
+ total_tokens: 0
1396
+ }
1397
+ };
1398
+ }
1399
+
1400
+ const choice = openaiResponse.choices[0];
1401
+ const message = choice.message || {};
1402
+ const output = [];
1403
+
1404
+ // 构建message输出
1405
+ const messageContent = [];
1406
+ if (message.content) {
1407
+ messageContent.push({
1408
+ type: 'output_text',
1409
+ text: message.content
1410
+ });
1411
+ }
1412
+
1413
+ output.push({
1414
+ type: 'message',
1415
+ id: `msg_${Date.now()}`,
1416
+ status: 'completed',
1417
+ role: 'assistant',
1418
+ content: messageContent
1419
+ });
1420
+
1421
+ return {
1422
+ id: openaiResponse.id || `resp_${Date.now()}`,
1423
+ object: 'response',
1424
+ created_at: openaiResponse.created || Math.floor(Date.now() / 1000),
1425
+ status: choice.finish_reason === 'stop' ? 'completed' : 'in_progress',
1426
+ model: model || openaiResponse.model || 'unknown',
1427
+ output: output,
1428
+ usage: openaiResponse.usage ? {
1429
+ input_tokens: openaiResponse.usage.prompt_tokens || 0,
1430
+ input_tokens_details: {
1431
+ cached_tokens: openaiResponse.usage.prompt_tokens_details?.cached_tokens || 0
1432
+ },
1433
+ output_tokens: openaiResponse.usage.completion_tokens || 0,
1434
+ output_tokens_details: {
1435
+ reasoning_tokens: openaiResponse.usage.completion_tokens_details?.reasoning_tokens || 0
1436
+ },
1437
+ total_tokens: openaiResponse.usage.total_tokens || 0
1438
+ } : {
1439
+ input_tokens: 0,
1440
+ input_tokens_details: {
1441
+ cached_tokens: 0
1442
+ },
1443
+ output_tokens: 0,
1444
+ output_tokens_details: {
1445
+ reasoning_tokens: 0
1446
+ },
1447
+ total_tokens: 0
1448
+ }
1449
+ };
1450
+ }
1451
+
1452
+ /**
1453
+ * 将OpenAI流式响应转换为OpenAI Responses流式格式
1454
+ * 参考 ClaudeConverter.toOpenAIResponsesStreamChunk 的实现逻辑
1455
+ */
1456
+ toOpenAIResponsesStreamChunk(openaiChunk, model, requestId = null) {
1457
+ if (!openaiChunk || !openaiChunk.choices || !openaiChunk.choices[0]) {
1458
+ return [];
1459
+ }
1460
+
1461
+ const responseId = requestId || `resp_${uuidv4().replace(/-/g, '')}`;
1462
+ const choice = openaiChunk.choices[0];
1463
+ const delta = choice.delta || {};
1464
+ const events = [];
1465
+
1466
+ // 第一个chunk - role为assistant时调用 getOpenAIResponsesStreamChunkBegin
1467
+ if (delta.role === 'assistant') {
1468
+ events.push(
1469
+ generateResponseCreated(responseId, model || openaiChunk.model || 'unknown'),
1470
+ generateResponseInProgress(responseId),
1471
+ generateOutputItemAdded(responseId),
1472
+ generateContentPartAdded(responseId)
1473
+ );
1474
+ }
1475
+
1476
+ // 处理 reasoning_content(推理内容)
1477
+ if (delta.reasoning_content) {
1478
+ events.push({
1479
+ delta: delta.reasoning_content,
1480
+ item_id: `thinking_${uuidv4().replace(/-/g, '')}`,
1481
+ output_index: 0,
1482
+ sequence_number: 3,
1483
+ type: "response.reasoning_summary_text.delta"
1484
+ });
1485
+ }
1486
+
1487
+ // 处理 tool_calls(工具调用)
1488
+ if (delta.tool_calls && delta.tool_calls.length > 0) {
1489
+ for (const toolCall of delta.tool_calls) {
1490
+ const outputIndex = toolCall.index || 0;
1491
+
1492
+ // 如果有 function.name,说明是工具调用开始
1493
+ if (toolCall.function && toolCall.function.name) {
1494
+ events.push({
1495
+ item: {
1496
+ id: toolCall.id || `call_${uuidv4().replace(/-/g, '')}`,
1497
+ type: "function_call",
1498
+ name: toolCall.function.name,
1499
+ arguments: "",
1500
+ status: "in_progress"
1501
+ },
1502
+ output_index: outputIndex,
1503
+ sequence_number: 2,
1504
+ type: "response.output_item.added"
1505
+ });
1506
+ }
1507
+
1508
+ // 如果有 function.arguments,说明是参数增量
1509
+ if (toolCall.function && toolCall.function.arguments) {
1510
+ events.push({
1511
+ delta: toolCall.function.arguments,
1512
+ item_id: toolCall.id || `call_${uuidv4().replace(/-/g, '')}`,
1513
+ output_index: outputIndex,
1514
+ sequence_number: 3,
1515
+ type: "response.custom_tool_call_input.delta"
1516
+ });
1517
+ }
1518
+ }
1519
+ }
1520
+
1521
+ // 处理普通文本内容
1522
+ if (delta.content) {
1523
+ events.push({
1524
+ delta: delta.content,
1525
+ item_id: `msg_${uuidv4().replace(/-/g, '')}`,
1526
+ output_index: 0,
1527
+ sequence_number: 3,
1528
+ type: "response.output_text.delta"
1529
+ });
1530
+ }
1531
+
1532
+ // 处理完成状态 - 调用 getOpenAIResponsesStreamChunkEnd
1533
+ if (choice.finish_reason) {
1534
+ events.push(
1535
+ generateOutputTextDone(responseId),
1536
+ generateContentPartDone(responseId),
1537
+ generateOutputItemDone(responseId),
1538
+ generateResponseCompleted(responseId)
1539
+ );
1540
+
1541
+ // 如果有 usage 信息,更新最后一个事件
1542
+ if (openaiChunk.usage && events.length > 0) {
1543
+ const lastEvent = events[events.length - 1];
1544
+ if (lastEvent.response) {
1545
+ lastEvent.response.usage = {
1546
+ input_tokens: openaiChunk.usage.prompt_tokens || 0,
1547
+ input_tokens_details: {
1548
+ cached_tokens: openaiChunk.usage.prompt_tokens_details?.cached_tokens || 0
1549
+ },
1550
+ output_tokens: openaiChunk.usage.completion_tokens || 0,
1551
+ output_tokens_details: {
1552
+ reasoning_tokens: openaiChunk.usage.completion_tokens_details?.reasoning_tokens || 0
1553
+ },
1554
+ total_tokens: openaiChunk.usage.total_tokens || 0
1555
+ };
1556
+ }
1557
+ }
1558
+ }
1559
+
1560
+ return events;
1561
+ }
1562
+
1563
+ }
1564
+
1565
+ export default OpenAIConverter;
src/converters/strategies/OpenAIResponsesConverter.js ADDED
@@ -0,0 +1,582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * OpenAI Responses API 转换器
3
+ * 处理 OpenAI Responses API 格式与其他协议之间的转换
4
+ */
5
+
6
+ import { BaseConverter } from '../BaseConverter.js';
7
+ import { MODEL_PROTOCOL_PREFIX } from '../../utils/common.js';
8
+ import {
9
+ extractAndProcessSystemMessages as extractSystemMessages,
10
+ extractTextFromMessageContent as extractText,
11
+ CLAUDE_DEFAULT_MAX_TOKENS,
12
+ GEMINI_DEFAULT_INPUT_TOKEN_LIMIT,
13
+ GEMINI_DEFAULT_OUTPUT_TOKEN_LIMIT
14
+ } from '../utils.js';
15
+
16
+ /**
17
+ * OpenAI Responses API 转换器类
18
+ * 支持 OpenAI Responses 格式与 OpenAI、Claude、Gemini 之间的转换
19
+ */
20
+ export class OpenAIResponsesConverter extends BaseConverter {
21
+ constructor() {
22
+ super(MODEL_PROTOCOL_PREFIX.OPENAI_RESPONSES);
23
+ }
24
+
25
+ // =============================================================================
26
+ // 请求转换
27
+ // =============================================================================
28
+
29
+ /**
30
+ * 转换请求到目标协议
31
+ */
32
+ convertRequest(data, toProtocol) {
33
+ switch (toProtocol) {
34
+ case MODEL_PROTOCOL_PREFIX.OPENAI:
35
+ return this.toOpenAIRequest(data);
36
+ case MODEL_PROTOCOL_PREFIX.CLAUDE:
37
+ return this.toClaudeRequest(data);
38
+ case MODEL_PROTOCOL_PREFIX.GEMINI:
39
+ return this.toGeminiRequest(data);
40
+ default:
41
+ throw new Error(`Unsupported target protocol: ${toProtocol}`);
42
+ }
43
+ }
44
+
45
+ /**
46
+ * 转换响应到目标协议
47
+ */
48
+ convertResponse(data, toProtocol, model) {
49
+ switch (toProtocol) {
50
+ case MODEL_PROTOCOL_PREFIX.OPENAI:
51
+ return this.toOpenAIResponse(data, model);
52
+ case MODEL_PROTOCOL_PREFIX.CLAUDE:
53
+ return this.toClaudeResponse(data, model);
54
+ case MODEL_PROTOCOL_PREFIX.GEMINI:
55
+ return this.toGeminiResponse(data, model);
56
+ default:
57
+ throw new Error(`Unsupported target protocol: ${toProtocol}`);
58
+ }
59
+ }
60
+
61
+ /**
62
+ * 转换流式响应块到目标协议
63
+ */
64
+ convertStreamChunk(chunk, toProtocol, model) {
65
+ switch (toProtocol) {
66
+ case MODEL_PROTOCOL_PREFIX.OPENAI:
67
+ return this.toOpenAIStreamChunk(chunk, model);
68
+ case MODEL_PROTOCOL_PREFIX.CLAUDE:
69
+ return this.toClaudeStreamChunk(chunk, model);
70
+ case MODEL_PROTOCOL_PREFIX.GEMINI:
71
+ return this.toGeminiStreamChunk(chunk, model);
72
+ default:
73
+ throw new Error(`Unsupported target protocol: ${toProtocol}`);
74
+ }
75
+ }
76
+
77
+ /**
78
+ * 转换模型列表到目标协议
79
+ */
80
+ convertModelList(data, targetProtocol) {
81
+ switch (targetProtocol) {
82
+ case MODEL_PROTOCOL_PREFIX.OPENAI:
83
+ return this.toOpenAIModelList(data);
84
+ case MODEL_PROTOCOL_PREFIX.CLAUDE:
85
+ return this.toClaudeModelList(data);
86
+ case MODEL_PROTOCOL_PREFIX.GEMINI:
87
+ return this.toGeminiModelList(data);
88
+ default:
89
+ return data;
90
+ }
91
+ }
92
+
93
+ // =============================================================================
94
+ // 转换到 OpenAI 格式
95
+ // =============================================================================
96
+
97
+ /**
98
+ * 将 OpenAI Responses 请求转换为标准 OpenAI 请求
99
+ */
100
+ toOpenAIRequest(responsesRequest) {
101
+ const openaiRequest = {
102
+ model: responsesRequest.model,
103
+ messages: [],
104
+ stream: responsesRequest.stream || false
105
+ };
106
+
107
+ // OpenAI Responses API 使用 instructions 和 input 字段
108
+ // 需要转换为标准的 messages 格式
109
+ if (responsesRequest.instructions) {
110
+ // instructions 作为系统消息
111
+ openaiRequest.messages.push({
112
+ role: 'system',
113
+ content: responsesRequest.instructions
114
+ });
115
+ }
116
+
117
+ // input 包含用户消息和历史对话
118
+ if (responsesRequest.input && Array.isArray(responsesRequest.input)) {
119
+ responsesRequest.input.forEach(item => {
120
+ if (item.type === 'message') {
121
+ // 提取消息内容
122
+ const content = item.content
123
+ .filter(c => c.type === 'input_text')
124
+ .map(c => c.text)
125
+ .join('\n');
126
+
127
+ if (content) {
128
+ openaiRequest.messages.push({
129
+ role: item.role,
130
+ content: content
131
+ });
132
+ }
133
+ }
134
+ });
135
+ }
136
+
137
+ // 如果有标准的 messages 字段,也支持
138
+ if (responsesRequest.messages && Array.isArray(responsesRequest.messages)) {
139
+ responsesRequest.messages.forEach(msg => {
140
+ openaiRequest.messages.push({
141
+ role: msg.role,
142
+ content: msg.content
143
+ });
144
+ });
145
+ }
146
+
147
+ // 复制其他参数
148
+ if (responsesRequest.temperature !== undefined) {
149
+ openaiRequest.temperature = responsesRequest.temperature;
150
+ }
151
+ if (responsesRequest.max_tokens !== undefined) {
152
+ openaiRequest.max_tokens = responsesRequest.max_tokens;
153
+ }
154
+ if (responsesRequest.top_p !== undefined) {
155
+ openaiRequest.top_p = responsesRequest.top_p;
156
+ }
157
+
158
+ return openaiRequest;
159
+ }
160
+
161
+ /**
162
+ * 将 OpenAI Responses 响应转换为标准 OpenAI 响应
163
+ */
164
+ toOpenAIResponse(responsesResponse, model) {
165
+ // OpenAI Responses 格式已经很接近标准 OpenAI 格式
166
+ return {
167
+ id: responsesResponse.id || `chatcmpl-${Date.now()}`,
168
+ object: 'chat.completion',
169
+ created: responsesResponse.created || Math.floor(Date.now() / 1000),
170
+ model: model || responsesResponse.model,
171
+ choices: responsesResponse.choices || [{
172
+ index: 0,
173
+ message: {
174
+ role: 'assistant',
175
+ content: responsesResponse.content || ''
176
+ },
177
+ finish_reason: responsesResponse.finish_reason || 'stop'
178
+ }],
179
+ usage: responsesResponse.usage ? {
180
+ prompt_tokens: responsesResponse.usage.input_tokens || 0,
181
+ completion_tokens: responsesResponse.usage.output_tokens || 0,
182
+ total_tokens: responsesResponse.usage.total_tokens || 0,
183
+ prompt_tokens_details: {
184
+ cached_tokens: responsesResponse.usage.input_tokens_details?.cached_tokens || 0
185
+ },
186
+ completion_tokens_details: {
187
+ reasoning_tokens: responsesResponse.usage.output_tokens_details?.reasoning_tokens || 0
188
+ }
189
+ } : {
190
+ prompt_tokens: 0,
191
+ completion_tokens: 0,
192
+ total_tokens: 0,
193
+ prompt_tokens_details: {
194
+ cached_tokens: 0
195
+ },
196
+ completion_tokens_details: {
197
+ reasoning_tokens: 0
198
+ }
199
+ }
200
+ };
201
+ }
202
+
203
+ /**
204
+ * 将 OpenAI Responses 流式块转换为标准 OpenAI 流式块
205
+ */
206
+ toOpenAIStreamChunk(responsesChunk, model) {
207
+ return {
208
+ id: responsesChunk.id || `chatcmpl-${Date.now()}`,
209
+ object: 'chat.completion.chunk',
210
+ created: responsesChunk.created || Math.floor(Date.now() / 1000),
211
+ model: model || responsesChunk.model,
212
+ choices: responsesChunk.choices || [{
213
+ index: 0,
214
+ delta: {
215
+ content: responsesChunk.delta?.content || ''
216
+ },
217
+ finish_reason: responsesChunk.finish_reason || null
218
+ }]
219
+ };
220
+ }
221
+
222
+ // =============================================================================
223
+ // 转换到 Claude 格式
224
+ // =============================================================================
225
+
226
+ /**
227
+ * 将 OpenAI Responses 请求转换为 Claude 请求
228
+ */
229
+ toClaudeRequest(responsesRequest) {
230
+ const claudeRequest = {
231
+ model: responsesRequest.model,
232
+ messages: [],
233
+ max_tokens: responsesRequest.max_tokens || CLAUDE_DEFAULT_MAX_TOKENS,
234
+ stream: responsesRequest.stream || false
235
+ };
236
+
237
+ // 处理 instructions 作为系统消息
238
+ if (responsesRequest.instructions) {
239
+ claudeRequest.system = responsesRequest.instructions;
240
+ }
241
+
242
+ // 处理 input 数组中的消息
243
+ if (responsesRequest.input && Array.isArray(responsesRequest.input)) {
244
+ responsesRequest.input.forEach(item => {
245
+ if (item.type === 'message') {
246
+ const content = item.content
247
+ .filter(c => c.type === 'input_text')
248
+ .map(c => c.text)
249
+ .join('\n');
250
+
251
+ if (content) {
252
+ claudeRequest.messages.push({
253
+ role: item.role === 'assistant' ? 'assistant' : 'user',
254
+ content: content
255
+ });
256
+ }
257
+ }
258
+ });
259
+ }
260
+
261
+ // 如果有标准的 messages 字段,也支持
262
+ if (responsesRequest.messages && Array.isArray(responsesRequest.messages)) {
263
+ const { systemMessages, otherMessages } = extractSystemMessages(
264
+ responsesRequest.messages
265
+ );
266
+
267
+ if (!claudeRequest.system && systemMessages.length > 0) {
268
+ const systemTexts = systemMessages.map(msg => extractText(msg.content));
269
+ claudeRequest.system = systemTexts.join('\n');
270
+ }
271
+
272
+ otherMessages.forEach(msg => {
273
+ claudeRequest.messages.push({
274
+ role: msg.role === 'assistant' ? 'assistant' : 'user',
275
+ content: typeof msg.content === 'string' ? msg.content : extractText(msg.content)
276
+ });
277
+ });
278
+ }
279
+
280
+ // 复制其他参数
281
+ if (responsesRequest.temperature !== undefined) {
282
+ claudeRequest.temperature = responsesRequest.temperature;
283
+ }
284
+ if (responsesRequest.top_p !== undefined) {
285
+ claudeRequest.top_p = responsesRequest.top_p;
286
+ }
287
+
288
+ return claudeRequest;
289
+ }
290
+
291
+ /**
292
+ * 将 OpenAI Responses 响应转换为 Claude 响应
293
+ */
294
+ toClaudeResponse(responsesResponse, model) {
295
+ const content = responsesResponse.choices?.[0]?.message?.content ||
296
+ responsesResponse.content || '';
297
+
298
+ return {
299
+ id: responsesResponse.id || `msg_${Date.now()}`,
300
+ type: 'message',
301
+ role: 'assistant',
302
+ content: [{
303
+ type: 'text',
304
+ text: content
305
+ }],
306
+ model: model || responsesResponse.model,
307
+ stop_reason: responsesResponse.choices?.[0]?.finish_reason || 'end_turn',
308
+ usage: {
309
+ input_tokens: responsesResponse.usage?.input_tokens || responsesResponse.usage?.prompt_tokens || 0,
310
+ cache_creation_input_tokens: 0,
311
+ cache_read_input_tokens: responsesResponse.usage?.input_tokens_details?.cached_tokens || 0,
312
+ output_tokens: responsesResponse.usage?.output_tokens || responsesResponse.usage?.completion_tokens || 0,
313
+ prompt_tokens: responsesResponse.usage?.input_tokens || responsesResponse.usage?.prompt_tokens || 0,
314
+ completion_tokens: responsesResponse.usage?.output_tokens || responsesResponse.usage?.completion_tokens || 0,
315
+ total_tokens: responsesResponse.usage?.total_tokens ||
316
+ ((responsesResponse.usage?.input_tokens || responsesResponse.usage?.prompt_tokens || 0) +
317
+ (responsesResponse.usage?.output_tokens || responsesResponse.usage?.completion_tokens || 0)),
318
+ cached_tokens: responsesResponse.usage?.input_tokens_details?.cached_tokens || 0
319
+ }
320
+ };
321
+ }
322
+
323
+ /**
324
+ * 将 OpenAI Responses 流式块转换为 Claude 流式块
325
+ */
326
+ toClaudeStreamChunk(responsesChunk, model) {
327
+ const delta = responsesChunk.choices?.[0]?.delta || responsesChunk.delta || {};
328
+ const finishReason = responsesChunk.choices?.[0]?.finish_reason ||
329
+ responsesChunk.finish_reason;
330
+
331
+ if (finishReason) {
332
+ return {
333
+ type: 'message_stop'
334
+ };
335
+ }
336
+
337
+ if (delta.content) {
338
+ return {
339
+ type: 'content_block_delta',
340
+ index: 0,
341
+ delta: {
342
+ type: 'text_delta',
343
+ text: delta.content
344
+ }
345
+ };
346
+ }
347
+
348
+ return {
349
+ type: 'message_start',
350
+ message: {
351
+ id: responsesChunk.id || `msg_${Date.now()}`,
352
+ type: 'message',
353
+ role: 'assistant',
354
+ content: [],
355
+ model: model || responsesChunk.model
356
+ }
357
+ };
358
+ }
359
+
360
+ // =============================================================================
361
+ // 转换到 Gemini 格式
362
+ // =============================================================================
363
+
364
+ /**
365
+ * 将 OpenAI Responses 请求转换为 Gemini 请求
366
+ */
367
+ toGeminiRequest(responsesRequest) {
368
+ const geminiRequest = {
369
+ contents: [],
370
+ generationConfig: {}
371
+ };
372
+
373
+ // 处理 instructions 作为系统指令
374
+ if (responsesRequest.instructions) {
375
+ geminiRequest.systemInstruction = {
376
+ parts: [{
377
+ text: responsesRequest.instructions
378
+ }]
379
+ };
380
+ }
381
+
382
+ // 处理 input 数组中的消息
383
+ if (responsesRequest.input && Array.isArray(responsesRequest.input)) {
384
+ responsesRequest.input.forEach(item => {
385
+ // 如果 item 没有 type 属性,默认为 message
386
+ // 或者 item.type 明确为 message
387
+ if (!item.type || item.type === 'message') {
388
+ let content = '';
389
+ if (Array.isArray(item.content)) {
390
+ content = item.content
391
+ .filter(c => c.type === 'input_text')
392
+ .map(c => c.text)
393
+ .join('\n');
394
+ } else if (typeof item.content === 'string') {
395
+ content = item.content;
396
+ }
397
+
398
+ if (content) {
399
+ geminiRequest.contents.push({
400
+ role: item.role === 'assistant' ? 'model' : 'user',
401
+ parts: [{
402
+ text: content
403
+ }]
404
+ });
405
+ }
406
+ }
407
+ });
408
+ }
409
+
410
+ // 如果有标准的 messages 字段,也支持
411
+ if (responsesRequest.messages && Array.isArray(responsesRequest.messages)) {
412
+ const { systemMessages, otherMessages } = extractSystemMessages(
413
+ responsesRequest.messages
414
+ );
415
+
416
+ if (!geminiRequest.systemInstruction && systemMessages.length > 0) {
417
+ const systemTexts = systemMessages.map(msg => extractText(msg.content));
418
+ geminiRequest.systemInstruction = {
419
+ parts: [{
420
+ text: systemTexts.join('\n')
421
+ }]
422
+ };
423
+ }
424
+
425
+ otherMessages.forEach(msg => {
426
+ geminiRequest.contents.push({
427
+ role: msg.role === 'assistant' ? 'model' : 'user',
428
+ parts: [{
429
+ text: typeof msg.content === 'string' ? msg.content : extractText(msg.content)
430
+ }]
431
+ });
432
+ });
433
+ }
434
+
435
+ // 设置生成配置
436
+ if (responsesRequest.temperature !== undefined) {
437
+ geminiRequest.generationConfig.temperature = responsesRequest.temperature;
438
+ }
439
+ if (responsesRequest.max_tokens !== undefined) {
440
+ geminiRequest.generationConfig.maxOutputTokens = responsesRequest.max_tokens;
441
+ }
442
+ if (responsesRequest.top_p !== undefined) {
443
+ geminiRequest.generationConfig.topP = responsesRequest.top_p;
444
+ }
445
+
446
+ return geminiRequest;
447
+ }
448
+
449
+ /**
450
+ * 将 OpenAI Responses 响应转换为 Gemini 响应
451
+ */
452
+ toGeminiResponse(responsesResponse, model) {
453
+ const content = responsesResponse.choices?.[0]?.message?.content ||
454
+ responsesResponse.content || '';
455
+
456
+ return {
457
+ candidates: [{
458
+ content: {
459
+ parts: [{
460
+ text: content
461
+ }],
462
+ role: 'model'
463
+ },
464
+ finishReason: this.mapFinishReason(
465
+ responsesResponse.choices?.[0]?.finish_reason || 'STOP'
466
+ ),
467
+ index: 0
468
+ }],
469
+ usageMetadata: {
470
+ promptTokenCount: responsesResponse.usage?.input_tokens || responsesResponse.usage?.prompt_tokens || 0,
471
+ candidatesTokenCount: responsesResponse.usage?.output_tokens || responsesResponse.usage?.completion_tokens || 0,
472
+ totalTokenCount: responsesResponse.usage?.total_tokens ||
473
+ ((responsesResponse.usage?.input_tokens || responsesResponse.usage?.prompt_tokens || 0) +
474
+ (responsesResponse.usage?.output_tokens || responsesResponse.usage?.completion_tokens || 0)),
475
+ cachedContentTokenCount: responsesResponse.usage?.input_tokens_details?.cached_tokens || 0,
476
+ promptTokensDetails: [{
477
+ modality: "TEXT",
478
+ tokenCount: responsesResponse.usage?.input_tokens || responsesResponse.usage?.prompt_tokens || 0
479
+ }],
480
+ candidatesTokensDetails: [{
481
+ modality: "TEXT",
482
+ tokenCount: responsesResponse.usage?.output_tokens || responsesResponse.usage?.completion_tokens || 0
483
+ }],
484
+ thoughtsTokenCount: responsesResponse.usage?.output_tokens_details?.reasoning_tokens || 0
485
+ }
486
+ };
487
+ }
488
+
489
+ /**
490
+ * 将 OpenAI Responses 流式块转换为 Gemini 流式块
491
+ */
492
+ toGeminiStreamChunk(responsesChunk, model) {
493
+ const delta = responsesChunk.choices?.[0]?.delta || responsesChunk.delta || {};
494
+ const finishReason = responsesChunk.choices?.[0]?.finish_reason ||
495
+ responsesChunk.finish_reason;
496
+
497
+ return {
498
+ candidates: [{
499
+ content: {
500
+ parts: delta.content ? [{
501
+ text: delta.content
502
+ }] : [],
503
+ role: 'model'
504
+ },
505
+ finishReason: finishReason ? this.mapFinishReason(finishReason) : null,
506
+ index: 0
507
+ }]
508
+ };
509
+ }
510
+
511
+ // =============================================================================
512
+ // 辅助方法
513
+ // =============================================================================
514
+
515
+ /**
516
+ * 映射完成原因
517
+ */
518
+ mapFinishReason(reason) {
519
+ const reasonMap = {
520
+ 'stop': 'STOP',
521
+ 'length': 'MAX_TOKENS',
522
+ 'content_filter': 'SAFETY',
523
+ 'end_turn': 'STOP'
524
+ };
525
+ return reasonMap[reason] || 'STOP';
526
+ }
527
+
528
+ /**
529
+ * 将 OpenAI Responses 模型列表转换为标准 OpenAI 模型列表
530
+ */
531
+ toOpenAIModelList(responsesModels) {
532
+ // OpenAI Responses 格式的模型列表已经是标准 OpenAI 格式
533
+ // 如果输入已经是标准格式,直接返回
534
+ if (responsesModels.object === 'list' && responsesModels.data) {
535
+ return responsesModels;
536
+ }
537
+
538
+ // 如果是其他格式,转换为标准格式
539
+ return {
540
+ object: "list",
541
+ data: (responsesModels.models || responsesModels.data || []).map(m => ({
542
+ id: m.id || m.name,
543
+ object: "model",
544
+ created: m.created || Math.floor(Date.now() / 1000),
545
+ owned_by: m.owned_by || "openai",
546
+ })),
547
+ };
548
+ }
549
+
550
+ /**
551
+ * 将 OpenAI Responses 模型列表转换为 Claude 模型列表
552
+ */
553
+ toClaudeModelList(responsesModels) {
554
+ const models = responsesModels.data || responsesModels.models || [];
555
+ return {
556
+ models: models.map(m => ({
557
+ name: m.id || m.name,
558
+ description: m.description || "",
559
+ })),
560
+ };
561
+ }
562
+
563
+ /**
564
+ * 将 OpenAI Responses 模型列表转换为 Gemini 模型列表
565
+ */
566
+ toGeminiModelList(responsesModels) {
567
+ const models = responsesModels.data || responsesModels.models || [];
568
+ return {
569
+ models: models.map(m => ({
570
+ name: `models/${m.id || m.name}`,
571
+ version: m.version || "1.0.0",
572
+ displayName: m.displayName || m.id || m.name,
573
+ description: m.description || `A generative model for text and chat generation. ID: ${m.id || m.name}`,
574
+ inputTokenLimit: m.inputTokenLimit || GEMINI_DEFAULT_INPUT_TOKEN_LIMIT,
575
+ outputTokenLimit: m.outputTokenLimit || GEMINI_DEFAULT_OUTPUT_TOKEN_LIMIT,
576
+ supportedGenerationMethods: m.supportedGenerationMethods || ["generateContent", "streamGenerateContent"]
577
+ }))
578
+ };
579
+ }
580
+
581
+ }
582
+
src/converters/utils.js ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * 转换器公共工具函数模块
3
+ * 提供各种协议转换所需的通用辅助函数
4
+ */
5
+
6
+ import { v4 as uuidv4 } from 'uuid';
7
+
8
+ // =============================================================================
9
+ // 常量定义
10
+ // =============================================================================
11
+
12
+ // 通用默认值
13
+ export const DEFAULT_MAX_TOKENS = 8192;
14
+ export const DEFAULT_TEMPERATURE = 1;
15
+ export const DEFAULT_TOP_P = 0.95;
16
+
17
+ // =============================================================================
18
+ // OpenAI 相关常量
19
+ // =============================================================================
20
+ export const OPENAI_DEFAULT_MAX_TOKENS = 128000;
21
+ export const OPENAI_DEFAULT_TEMPERATURE = 1;
22
+ export const OPENAI_DEFAULT_TOP_P = 0.95;
23
+ export const OPENAI_DEFAULT_INPUT_TOKEN_LIMIT = 32768;
24
+ export const OPENAI_DEFAULT_OUTPUT_TOKEN_LIMIT = 128000;
25
+
26
+ // =============================================================================
27
+ // Claude 相关常量
28
+ // =============================================================================
29
+ export const CLAUDE_DEFAULT_MAX_TOKENS = 200000;
30
+ export const CLAUDE_DEFAULT_TEMPERATURE = 1;
31
+ export const CLAUDE_DEFAULT_TOP_P = 0.95;
32
+
33
+ // =============================================================================
34
+ // Gemini 相关常量
35
+ // =============================================================================
36
+ export const GEMINI_DEFAULT_MAX_TOKENS = 65534;
37
+ export const GEMINI_DEFAULT_TEMPERATURE = 1;
38
+ export const GEMINI_DEFAULT_TOP_P = 0.95;
39
+ export const GEMINI_DEFAULT_INPUT_TOKEN_LIMIT = 32768;
40
+ export const GEMINI_DEFAULT_OUTPUT_TOKEN_LIMIT = 65534;
41
+
42
+ // =============================================================================
43
+ // OpenAI Responses 相关常量
44
+ // =============================================================================
45
+ export const OPENAI_RESPONSES_DEFAULT_MAX_TOKENS = 128000;
46
+ export const OPENAI_RESPONSES_DEFAULT_TEMPERATURE = 1;
47
+ export const OPENAI_RESPONSES_DEFAULT_TOP_P = 0.95;
48
+ export const OPENAI_RESPONSES_DEFAULT_INPUT_TOKEN_LIMIT = 32768;
49
+ export const OPENAI_RESPONSES_DEFAULT_OUTPUT_TOKEN_LIMIT = 128000;
50
+
51
+ // =============================================================================
52
+ // Ollama 相关常量
53
+ // =============================================================================
54
+ export const OLLAMA_DEFAULT_CONTEXT_LENGTH = 65534;
55
+ export const OLLAMA_DEFAULT_MAX_OUTPUT_TOKENS = 8192;
56
+
57
+ // Claude 模型上下文长度
58
+ export const OLLAMA_CLAUDE_DEFAULT_CONTEXT_LENGTH = 200000;
59
+ export const OLLAMA_CLAUDE_SONNET_45_CONTEXT_LENGTH = 200000;
60
+ export const OLLAMA_CLAUDE_SONNET_45_MAX_OUTPUT_TOKENS = 200000;
61
+ export const OLLAMA_CLAUDE_HAIKU_45_CONTEXT_LENGTH = 200000;
62
+ export const OLLAMA_CLAUDE_HAIKU_45_MAX_OUTPUT_TOKENS = 200000;
63
+ export const OLLAMA_CLAUDE_OPUS_41_CONTEXT_LENGTH = 200000;
64
+ export const OLLAMA_CLAUDE_OPUS_41_MAX_OUTPUT_TOKENS = 32000;
65
+ export const OLLAMA_CLAUDE_SONNET_40_CONTEXT_LENGTH = 200000;
66
+ export const OLLAMA_CLAUDE_SONNET_40_MAX_OUTPUT_TOKENS = 200000;
67
+ export const OLLAMA_CLAUDE_SONNET_37_CONTEXT_LENGTH = 200000;
68
+ export const OLLAMA_CLAUDE_SONNET_37_MAX_OUTPUT_TOKENS = 200000;
69
+ export const OLLAMA_CLAUDE_OPUS_40_CONTEXT_LENGTH = 200000;
70
+ export const OLLAMA_CLAUDE_OPUS_40_MAX_OUTPUT_TOKENS = 32000;
71
+ export const OLLAMA_CLAUDE_HAIKU_35_CONTEXT_LENGTH = 200000;
72
+ export const OLLAMA_CLAUDE_HAIKU_35_MAX_OUTPUT_TOKENS = 200000;
73
+ export const OLLAMA_CLAUDE_HAIKU_30_CONTEXT_LENGTH = 200000;
74
+ export const OLLAMA_CLAUDE_HAIKU_30_MAX_OUTPUT_TOKENS = 8192;
75
+ export const OLLAMA_CLAUDE_SONNET_35_CONTEXT_LENGTH = 200000;
76
+ export const OLLAMA_CLAUDE_SONNET_35_MAX_OUTPUT_TOKENS = 200000;
77
+ export const OLLAMA_CLAUDE_OPUS_30_CONTEXT_LENGTH = 200000;
78
+ export const OLLAMA_CLAUDE_OPUS_30_MAX_OUTPUT_TOKENS = 8192;
79
+
80
+ // Gemini 模型上下文长度
81
+ export const OLLAMA_GEMINI_25_PRO_CONTEXT_LENGTH = 1048576;
82
+ export const OLLAMA_GEMINI_25_PRO_MAX_OUTPUT_TOKENS = 65534;
83
+ export const OLLAMA_GEMINI_25_FLASH_CONTEXT_LENGTH = 1048576;
84
+ export const OLLAMA_GEMINI_25_FLASH_MAX_OUTPUT_TOKENS = 65534;
85
+ export const OLLAMA_GEMINI_25_IMAGE_CONTEXT_LENGTH = 65534;
86
+ export const OLLAMA_GEMINI_25_IMAGE_MAX_OUTPUT_TOKENS = 32768;
87
+ export const OLLAMA_GEMINI_25_LIVE_CONTEXT_LENGTH = 131072;
88
+ export const OLLAMA_GEMINI_25_LIVE_MAX_OUTPUT_TOKENS = 65534;
89
+ export const OLLAMA_GEMINI_25_TTS_CONTEXT_LENGTH = 65534;
90
+ export const OLLAMA_GEMINI_25_TTS_MAX_OUTPUT_TOKENS = 16384;
91
+ export const OLLAMA_GEMINI_20_FLASH_CONTEXT_LENGTH = 1048576;
92
+ export const OLLAMA_GEMINI_20_FLASH_MAX_OUTPUT_TOKENS = 65534;
93
+ export const OLLAMA_GEMINI_20_IMAGE_CONTEXT_LENGTH = 32768;
94
+ export const OLLAMA_GEMINI_20_IMAGE_MAX_OUTPUT_TOKENS = 65534;
95
+ export const OLLAMA_GEMINI_15_PRO_CONTEXT_LENGTH = 2097152;
96
+ export const OLLAMA_GEMINI_15_PRO_MAX_OUTPUT_TOKENS = 65534;
97
+ export const OLLAMA_GEMINI_15_FLASH_CONTEXT_LENGTH = 1048576;
98
+ export const OLLAMA_GEMINI_15_FLASH_MAX_OUTPUT_TOKENS = 65534;
99
+ export const OLLAMA_GEMINI_DEFAULT_CONTEXT_LENGTH = 1048576;
100
+ export const OLLAMA_GEMINI_DEFAULT_MAX_OUTPUT_TOKENS = 65534;
101
+
102
+ // GPT 模型上下文长度
103
+ export const OLLAMA_GPT4_TURBO_CONTEXT_LENGTH = 128000;
104
+ export const OLLAMA_GPT4_TURBO_MAX_OUTPUT_TOKENS = 8192;
105
+ export const OLLAMA_GPT4_32K_CONTEXT_LENGTH = 32768;
106
+ export const OLLAMA_GPT4_32K_MAX_OUTPUT_TOKENS = 8192;
107
+ export const OLLAMA_GPT4_BASE_CONTEXT_LENGTH = 200000;
108
+ export const OLLAMA_GPT4_BASE_MAX_OUTPUT_TOKENS = 8192;
109
+ export const OLLAMA_GPT35_16K_CONTEXT_LENGTH = 16385;
110
+ export const OLLAMA_GPT35_16K_MAX_OUTPUT_TOKENS = 8192;
111
+ export const OLLAMA_GPT35_BASE_CONTEXT_LENGTH = 8192;
112
+ export const OLLAMA_GPT35_BASE_MAX_OUTPUT_TOKENS = 8192;
113
+
114
+ // Qwen 模型上下文长度
115
+ export const OLLAMA_QWEN_CODER_PLUS_CONTEXT_LENGTH = 128000;
116
+ export const OLLAMA_QWEN_CODER_PLUS_MAX_OUTPUT_TOKENS = 65534;
117
+ export const OLLAMA_QWEN_VL_PLUS_CONTEXT_LENGTH = 262144;
118
+ export const OLLAMA_QWEN_VL_PLUS_MAX_OUTPUT_TOKENS = 32768;
119
+ export const OLLAMA_QWEN_CODER_FLASH_CONTEXT_LENGTH = 128000;
120
+ export const OLLAMA_QWEN_CODER_FLASH_MAX_OUTPUT_TOKENS = 65534;
121
+ export const OLLAMA_QWEN_DEFAULT_CONTEXT_LENGTH = 32768;
122
+ export const OLLAMA_QWEN_DEFAULT_MAX_OUTPUT_TOKENS = 200000;
123
+
124
+ export const OLLAMA_DEFAULT_FILE_TYPE = 2;
125
+ export const OLLAMA_DEFAULT_QUANTIZATION_VERSION = 2;
126
+ export const OLLAMA_DEFAULT_ROPE_FREQ_BASE = 10000.0;
127
+ export const OLLAMA_DEFAULT_TEMPERATURE = 0.7;
128
+ export const OLLAMA_DEFAULT_TOP_P = 0.9;
129
+ export const OLLAMA_DEFAULT_QUANTIZATION_LEVEL = 'Q4_0';
130
+ export const OLLAMA_SHOW_QUANTIZATION_LEVEL = 'Q4_K_M';
131
+
132
+ // =============================================================================
133
+ // 通用辅助函数
134
+ // =============================================================================
135
+
136
+ /**
137
+ * 判断值是否为 undefined 或 0,并返回默认值
138
+ * @param {*} value - 要检查的值
139
+ * @param {*} defaultValue - 默认值
140
+ * @returns {*} 处理后的值
141
+ */
142
+ export function checkAndAssignOrDefault(value, defaultValue) {
143
+ if (value !== undefined && value !== 0) {
144
+ return value;
145
+ }
146
+ return defaultValue;
147
+ }
148
+
149
+ /**
150
+ * 生成唯一ID
151
+ * @param {string} prefix - ID前缀
152
+ * @returns {string} 生成的ID
153
+ */
154
+ export function generateId(prefix = '') {
155
+ return prefix ? `${prefix}_${uuidv4()}` : uuidv4();
156
+ }
157
+
158
+ /**
159
+ * 安全解析JSON字符串
160
+ * @param {string} str - JSON字符串
161
+ * @returns {*} 解析后的对象或原始字符串
162
+ */
163
+ export function safeParseJSON(str) {
164
+ if (!str) {
165
+ return str;
166
+ }
167
+ let cleanedStr = str;
168
+
169
+ // 处理可能被截断的转义序列
170
+ if (cleanedStr.endsWith('\\') && !cleanedStr.endsWith('\\\\')) {
171
+ cleanedStr = cleanedStr.substring(0, cleanedStr.length - 1);
172
+ } else if (cleanedStr.endsWith('\\u') || cleanedStr.endsWith('\\u0') || cleanedStr.endsWith('\\u00')) {
173
+ const idx = cleanedStr.lastIndexOf('\\u');
174
+ cleanedStr = cleanedStr.substring(0, idx);
175
+ }
176
+
177
+ try {
178
+ return JSON.parse(cleanedStr || '{}');
179
+ } catch (e) {
180
+ return str;
181
+ }
182
+ }
183
+
184
+ /**
185
+ * 提取消息内容中的文本
186
+ * @param {string|Array} content - 消息内容
187
+ * @returns {string} 提取的文本
188
+ */
189
+ export function extractTextFromMessageContent(content) {
190
+ if (typeof content === 'string') {
191
+ return content;
192
+ }
193
+ if (Array.isArray(content)) {
194
+ return content
195
+ .filter(part => part.type === 'text' && part.text)
196
+ .map(part => part.text)
197
+ .join('\n');
198
+ }
199
+ return '';
200
+ }
201
+
202
+ /**
203
+ * 提取并处理系统消息
204
+ * @param {Array} messages - 消息数组
205
+ * @returns {{systemInstruction: Object|null, nonSystemMessages: Array}}
206
+ */
207
+ export function extractAndProcessSystemMessages(messages) {
208
+ const systemContents = [];
209
+ const nonSystemMessages = [];
210
+
211
+ for (const message of messages) {
212
+ if (message.role === 'system') {
213
+ systemContents.push(extractTextFromMessageContent(message.content));
214
+ } else {
215
+ nonSystemMessages.push(message);
216
+ }
217
+ }
218
+
219
+ let systemInstruction = null;
220
+ if (systemContents.length > 0) {
221
+ systemInstruction = {
222
+ parts: [{
223
+ text: systemContents.join('\n')
224
+ }]
225
+ };
226
+ }
227
+ return { systemInstruction, nonSystemMessages };
228
+ }
229
+
230
+ /**
231
+ * 清理JSON Schema属性(移除Gemini不支持的属性)
232
+ * Google Gemini API 只支持有限的 JSON Schema 属性,不支持以下属性:
233
+ * - exclusiveMinimum, exclusiveMaximum, minimum, maximum
234
+ * - minLength, maxLength, minItems, maxItems
235
+ * - pattern, format, default, const
236
+ * - additionalProperties, $schema, $ref, $id
237
+ * - allOf, anyOf, oneOf, not
238
+ * @param {Object} schema - JSON Schema
239
+ * @returns {Object} 清理后的JSON Schema
240
+ */
241
+ export function cleanJsonSchemaProperties(schema) {
242
+ if (!schema || typeof schema !== 'object') {
243
+ return schema;
244
+ }
245
+
246
+ // 如果是数组,递归处理每个元素
247
+ if (Array.isArray(schema)) {
248
+ return schema.map(item => cleanJsonSchemaProperties(item));
249
+ }
250
+
251
+ // Gemini 支持的 JSON Schema 属性白名���
252
+ const allowedKeys = [
253
+ "type",
254
+ "description",
255
+ "properties",
256
+ "required",
257
+ "enum",
258
+ "items",
259
+ "nullable"
260
+ ];
261
+
262
+ const sanitized = {};
263
+ for (const [key, value] of Object.entries(schema)) {
264
+ if (allowedKeys.includes(key)) {
265
+ // 对于需要递归处理的属性
266
+ if (key === 'properties' && typeof value === 'object' && value !== null) {
267
+ const cleanProperties = {};
268
+ for (const [propName, propSchema] of Object.entries(value)) {
269
+ cleanProperties[propName] = cleanJsonSchemaProperties(propSchema);
270
+ }
271
+ sanitized[key] = cleanProperties;
272
+ } else if (key === 'items') {
273
+ sanitized[key] = cleanJsonSchemaProperties(value);
274
+ } else {
275
+ sanitized[key] = value;
276
+ }
277
+ }
278
+ // 其他属性(如 exclusiveMinimum, minimum, maximum, pattern 等)被忽略
279
+ }
280
+
281
+ return sanitized;
282
+ }
283
+
284
+ /**
285
+ * 映射结束原因
286
+ * @param {string} reason - 结束原因
287
+ * @param {string} sourceFormat - 源格式
288
+ * @param {string} targetFormat - 目标格式
289
+ * @returns {string} 映射后的结束原因
290
+ */
291
+ export function mapFinishReason(reason, sourceFormat, targetFormat) {
292
+ const reasonMappings = {
293
+ openai: {
294
+ anthropic: {
295
+ stop: "end_turn",
296
+ length: "max_tokens",
297
+ content_filter: "stop_sequence",
298
+ tool_calls: "tool_use"
299
+ }
300
+ },
301
+ gemini: {
302
+ anthropic: {
303
+ STOP: "end_turn",
304
+ MAX_TOKENS: "max_tokens",
305
+ SAFETY: "stop_sequence",
306
+ RECITATION: "stop_sequence",
307
+ stop: "end_turn",
308
+ length: "max_tokens",
309
+ safety: "stop_sequence",
310
+ recitation: "stop_sequence",
311
+ other: "end_turn"
312
+ }
313
+ }
314
+ };
315
+
316
+ try {
317
+ return reasonMappings[sourceFormat][targetFormat][reason] || "end_turn";
318
+ } catch (e) {
319
+ return "end_turn";
320
+ }
321
+ }
322
+
323
+ /**
324
+ * 根据budget_tokens智能判断OpenAI reasoning_effort等级
325
+ * @param {number|null} budgetTokens - Anthropic thinking的budget_tokens值
326
+ * @returns {string} OpenAI reasoning_effort等级
327
+ */
328
+ export function determineReasoningEffortFromBudget(budgetTokens) {
329
+ if (budgetTokens === null || budgetTokens === undefined) {
330
+ console.info("No budget_tokens provided, defaulting to reasoning_effort='high'");
331
+ return "high";
332
+ }
333
+
334
+ const LOW_THRESHOLD = 50;
335
+ const HIGH_THRESHOLD = 200;
336
+
337
+ console.debug(`Threshold configuration: low <= ${LOW_THRESHOLD}, medium <= ${HIGH_THRESHOLD}, high > ${HIGH_THRESHOLD}`);
338
+
339
+ let effort;
340
+ if (budgetTokens <= LOW_THRESHOLD) {
341
+ effort = "low";
342
+ } else if (budgetTokens <= HIGH_THRESHOLD) {
343
+ effort = "medium";
344
+ } else {
345
+ effort = "high";
346
+ }
347
+
348
+ console.info(`🎯 Budget tokens ${budgetTokens} -> reasoning_effort '${effort}' (thresholds: low<=${LOW_THRESHOLD}, high<=${HIGH_THRESHOLD})`);
349
+ return effort;
350
+ }
351
+
352
+ /**
353
+ * 从OpenAI文本中提取thinking内容
354
+ * @param {string} text - 文本内容
355
+ * @returns {string|Array} 提取后的内容
356
+ */
357
+ export function extractThinkingFromOpenAIText(text) {
358
+ const thinkingPattern = /<thinking>\s*(.*?)\s*<\/thinking>/gs;
359
+ const matches = [...text.matchAll(thinkingPattern)];
360
+
361
+ const contentBlocks = [];
362
+ let lastEnd = 0;
363
+
364
+ for (const match of matches) {
365
+ const beforeText = text.substring(lastEnd, match.index).trim();
366
+ if (beforeText) {
367
+ contentBlocks.push({
368
+ type: "text",
369
+ text: beforeText
370
+ });
371
+ }
372
+
373
+ const thinkingText = match[1].trim();
374
+ if (thinkingText) {
375
+ contentBlocks.push({
376
+ type: "thinking",
377
+ thinking: thinkingText
378
+ });
379
+ }
380
+
381
+ lastEnd = match.index + match[0].length;
382
+ }
383
+
384
+ const afterText = text.substring(lastEnd).trim();
385
+ if (afterText) {
386
+ contentBlocks.push({
387
+ type: "text",
388
+ text: afterText
389
+ });
390
+ }
391
+
392
+ if (contentBlocks.length === 0) {
393
+ return text;
394
+ }
395
+
396
+ if (contentBlocks.length === 1 && contentBlocks[0].type === "text") {
397
+ return contentBlocks[0].text;
398
+ }
399
+
400
+ return contentBlocks;
401
+ }
402
+
403
+ // =============================================================================
404
+ // 工具状态管理器(单例模式)
405
+ // =============================================================================
406
+
407
+ /**
408
+ * 全局工具状态管理器
409
+ */
410
+ class ToolStateManager {
411
+ constructor() {
412
+ if (ToolStateManager.instance) {
413
+ return ToolStateManager.instance;
414
+ }
415
+ ToolStateManager.instance = this;
416
+ this._toolMappings = {};
417
+ return this;
418
+ }
419
+
420
+ storeToolMapping(funcName, toolId) {
421
+ this._toolMappings[funcName] = toolId;
422
+ }
423
+
424
+ getToolId(funcName) {
425
+ return this._toolMappings[funcName] || null;
426
+ }
427
+
428
+ clearMappings() {
429
+ this._toolMappings = {};
430
+ }
431
+ }
432
+
433
+ export const toolStateManager = new ToolStateManager();
src/core/config-manager.js ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import * as fs from 'fs';
2
+ import { promises as pfs } from 'fs';
3
+ import { INPUT_SYSTEM_PROMPT_FILE, MODEL_PROVIDER } from '../utils/common.js';
4
+
5
+ export let CONFIG = {}; // Make CONFIG exportable
6
+ export let PROMPT_LOG_FILENAME = ''; // Make PROMPT_LOG_FILENAME exportable
7
+
8
+ const ALL_MODEL_PROVIDERS = Object.values(MODEL_PROVIDER);
9
+
10
+ function normalizeConfiguredProviders(config) {
11
+ const fallbackProvider = MODEL_PROVIDER.GEMINI_CLI;
12
+ const dedupedProviders = [];
13
+
14
+ const addProvider = (value) => {
15
+ if (typeof value !== 'string') {
16
+ return;
17
+ }
18
+ const trimmed = value.trim();
19
+ if (!trimmed) {
20
+ return;
21
+ }
22
+ const matched = ALL_MODEL_PROVIDERS.find((provider) => provider.toLowerCase() === trimmed.toLowerCase());
23
+ if (!matched) {
24
+ console.warn(`[Config Warning] Unknown model provider '${trimmed}'. This entry will be ignored.`);
25
+ return;
26
+ }
27
+ if (!dedupedProviders.includes(matched)) {
28
+ dedupedProviders.push(matched);
29
+ }
30
+ };
31
+
32
+ const rawValue = config.MODEL_PROVIDER;
33
+ if (Array.isArray(rawValue)) {
34
+ rawValue.forEach((entry) => addProvider(typeof entry === 'string' ? entry : String(entry)));
35
+ } else if (typeof rawValue === 'string') {
36
+ rawValue.split(',').forEach(addProvider);
37
+ } else if (rawValue != null) {
38
+ addProvider(String(rawValue));
39
+ }
40
+
41
+ if (dedupedProviders.length === 0) {
42
+ dedupedProviders.push(fallbackProvider);
43
+ }
44
+
45
+ config.DEFAULT_MODEL_PROVIDERS = dedupedProviders;
46
+ config.MODEL_PROVIDER = dedupedProviders[0];
47
+ }
48
+
49
+ /**
50
+ * Initializes the server configuration from config.json and command-line arguments.
51
+ * @param {string[]} args - Command-line arguments.
52
+ * @param {string} [configFilePath='configs/config.json'] - Path to the configuration file.
53
+ * @returns {Object} The initialized configuration object.
54
+ */
55
+ export async function initializeConfig(args = process.argv.slice(2), configFilePath = 'configs/config.json') {
56
+ let currentConfig = {};
57
+
58
+ try {
59
+ const configData = fs.readFileSync(configFilePath, 'utf8');
60
+ currentConfig = JSON.parse(configData);
61
+ console.log('[Config] Loaded configuration from configs/config.json');
62
+ } catch (error) {
63
+ console.error('[Config Error] Failed to load configs/config.json:', error.message);
64
+ // Fallback to default values if config.json is not found or invalid
65
+ currentConfig = {
66
+ REQUIRED_API_KEY: "123456",
67
+ SERVER_PORT: 3000,
68
+ HOST: '0.0.0.0',
69
+ MODEL_PROVIDER: MODEL_PROVIDER.GEMINI_CLI,
70
+ SYSTEM_PROMPT_FILE_PATH: INPUT_SYSTEM_PROMPT_FILE, // Default value
71
+ SYSTEM_PROMPT_MODE: 'append',
72
+ PROXY_URL: null, // HTTP/HTTPS/SOCKS5 代理地址,如 http://127.0.0.1:7890 或 socks5://127.0.0.1:1080
73
+ PROXY_ENABLED_PROVIDERS: [], // 启用代理的提供商列表,如 ['gemini-cli-oauth', 'claude-kiro-oauth']
74
+ PROMPT_LOG_BASE_NAME: "prompt_log",
75
+ PROMPT_LOG_MODE: "none",
76
+ REQUEST_MAX_RETRIES: 3,
77
+ REQUEST_BASE_DELAY: 1000,
78
+ CREDENTIAL_SWITCH_MAX_RETRIES: 5, // 坏凭证切换最大重试次数(用于认证错误后切换凭证)
79
+ CRON_NEAR_MINUTES: 15,
80
+ CRON_REFRESH_TOKEN: false,
81
+ PROVIDER_POOLS_FILE_PATH: null, // 新增号池配置文件路径
82
+ MAX_ERROR_COUNT: 10, // 提供商最大错误次数
83
+ providerFallbackChain: {} // 跨类型 Fallback 链配置
84
+ };
85
+ console.log('[Config] Using default configuration.');
86
+ }
87
+
88
+ // Parse command-line arguments
89
+ for (let i = 0; i < args.length; i++) {
90
+ if (args[i] === '--api-key') {
91
+ if (i + 1 < args.length) {
92
+ currentConfig.REQUIRED_API_KEY = args[i + 1];
93
+ i++;
94
+ } else {
95
+ console.warn(`[Config Warning] --api-key flag requires a value.`);
96
+ }
97
+ } else if (args[i] === '--log-prompts') {
98
+ if (i + 1 < args.length) {
99
+ const mode = args[i + 1];
100
+ if (mode === 'console' || mode === 'file') {
101
+ currentConfig.PROMPT_LOG_MODE = mode;
102
+ } else {
103
+ console.warn(`[Config Warning] Invalid mode for --log-prompts. Expected 'console' or 'file'. Prompt logging is disabled.`);
104
+ }
105
+ i++;
106
+ } else {
107
+ console.warn(`[Config Warning] --log-prompts flag requires a value.`);
108
+ }
109
+ } else if (args[i] === '--port') {
110
+ if (i + 1 < args.length) {
111
+ currentConfig.SERVER_PORT = parseInt(args[i + 1], 10);
112
+ i++;
113
+ } else {
114
+ console.warn(`[Config Warning] --port flag requires a value.`);
115
+ }
116
+ } else if (args[i] === '--model-provider') {
117
+ if (i + 1 < args.length) {
118
+ currentConfig.MODEL_PROVIDER = args[i + 1];
119
+ i++;
120
+ } else {
121
+ console.warn(`[Config Warning] --model-provider flag requires a value.`);
122
+ }
123
+ } else if (args[i] === '--system-prompt-file') {
124
+ if (i + 1 < args.length) {
125
+ currentConfig.SYSTEM_PROMPT_FILE_PATH = args[i + 1];
126
+ i++;
127
+ } else {
128
+ console.warn(`[Config Warning] --system-prompt-file flag requires a value.`);
129
+ }
130
+ } else if (args[i] === '--system-prompt-mode') {
131
+ if (i + 1 < args.length) {
132
+ const mode = args[i + 1];
133
+ if (mode === 'overwrite' || mode === 'append') {
134
+ currentConfig.SYSTEM_PROMPT_MODE = mode;
135
+ } else {
136
+ console.warn(`[Config Warning] Invalid mode for --system-prompt-mode. Expected 'overwrite' or 'append'. Using default 'overwrite'.`);
137
+ }
138
+ i++;
139
+ } else {
140
+ console.warn(`[Config Warning] --system-prompt-mode flag requires a value.`);
141
+ }
142
+ } else if (args[i] === '--host') {
143
+ if (i + 1 < args.length) {
144
+ currentConfig.HOST = args[i + 1];
145
+ i++;
146
+ } else {
147
+ console.warn(`[Config Warning] --host flag requires a value.`);
148
+ }
149
+ } else if (args[i] === '--prompt-log-base-name') {
150
+ if (i + 1 < args.length) {
151
+ currentConfig.PROMPT_LOG_BASE_NAME = args[i + 1];
152
+ i++;
153
+ } else {
154
+ console.warn(`[Config Warning] --prompt-log-base-name flag requires a value.`);
155
+ }
156
+ } else if (args[i] === '--cron-near-minutes') {
157
+ if (i + 1 < args.length) {
158
+ currentConfig.CRON_NEAR_MINUTES = parseInt(args[i + 1], 10);
159
+ i++;
160
+ } else {
161
+ console.warn(`[Config Warning] --cron-near-minutes flag requires a value.`);
162
+ }
163
+ } else if (args[i] === '--cron-refresh-token') {
164
+ if (i + 1 < args.length) {
165
+ currentConfig.CRON_REFRESH_TOKEN = args[i + 1].toLowerCase() === 'true';
166
+ i++;
167
+ } else {
168
+ console.warn(`[Config Warning] --cron-refresh-token flag requires a value.`);
169
+ }
170
+ } else if (args[i] === '--provider-pools-file') {
171
+ if (i + 1 < args.length) {
172
+ currentConfig.PROVIDER_POOLS_FILE_PATH = args[i + 1];
173
+ i++;
174
+ } else {
175
+ console.warn(`[Config Warning] --provider-pools-file flag requires a value.`);
176
+ }
177
+ } else if (args[i] === '--max-error-count') {
178
+ if (i + 1 < args.length) {
179
+ currentConfig.MAX_ERROR_COUNT = parseInt(args[i + 1], 10);
180
+ i++;
181
+ } else {
182
+ console.warn(`[Config Warning] --max-error-count flag requires a value.`);
183
+ }
184
+ }
185
+ }
186
+
187
+ normalizeConfiguredProviders(currentConfig);
188
+
189
+ if (!currentConfig.SYSTEM_PROMPT_FILE_PATH) {
190
+ currentConfig.SYSTEM_PROMPT_FILE_PATH = INPUT_SYSTEM_PROMPT_FILE;
191
+ }
192
+ currentConfig.SYSTEM_PROMPT_CONTENT = await getSystemPromptFileContent(currentConfig.SYSTEM_PROMPT_FILE_PATH);
193
+
194
+ // 加载号池配置
195
+ if (!currentConfig.PROVIDER_POOLS_FILE_PATH) {
196
+ currentConfig.PROVIDER_POOLS_FILE_PATH = 'configs/provider_pools.json';
197
+ }
198
+ if (currentConfig.PROVIDER_POOLS_FILE_PATH) {
199
+ try {
200
+ const poolsData = await pfs.readFile(currentConfig.PROVIDER_POOLS_FILE_PATH, 'utf8');
201
+ currentConfig.providerPools = JSON.parse(poolsData);
202
+ console.log(`[Config] Loaded provider pools from ${currentConfig.PROVIDER_POOLS_FILE_PATH}`);
203
+ } catch (error) {
204
+ console.error(`[Config Error] Failed to load provider pools from ${currentConfig.PROVIDER_POOLS_FILE_PATH}: ${error.message}`);
205
+ currentConfig.providerPools = {};
206
+ }
207
+ } else {
208
+ currentConfig.providerPools = {};
209
+ }
210
+
211
+ // Set PROMPT_LOG_FILENAME based on the determined config
212
+ if (currentConfig.PROMPT_LOG_MODE === 'file') {
213
+ const now = new Date();
214
+ const pad = (num) => String(num).padStart(2, '0');
215
+ const timestamp = `${now.getFullYear()}${pad(now.getMonth() + 1)}${pad(now.getDate())}-${pad(now.getHours())}${pad(now.getMinutes())}${pad(now.getSeconds())}`;
216
+ PROMPT_LOG_FILENAME = `${currentConfig.PROMPT_LOG_BASE_NAME}-${timestamp}.log`;
217
+ } else {
218
+ PROMPT_LOG_FILENAME = ''; // Clear if not logging to file
219
+ }
220
+
221
+ // Assign to the exported CONFIG
222
+ Object.assign(CONFIG, currentConfig);
223
+ return CONFIG;
224
+ }
225
+
226
+ /**
227
+ * Gets system prompt content from the specified file path.
228
+ * @param {string} filePath - Path to the system prompt file.
229
+ * @returns {Promise<string|null>} File content, or null if the file does not exist, is empty, or an error occurs.
230
+ */
231
+ export async function getSystemPromptFileContent(filePath) {
232
+ try {
233
+ await pfs.access(filePath, pfs.constants.F_OK);
234
+ } catch (error) {
235
+ if (error.code === 'ENOENT') {
236
+ console.warn(`[System Prompt] Specified system prompt file not found: ${filePath}`);
237
+ } else {
238
+ console.error(`[System Prompt] Error accessing system prompt file ${filePath}: ${error.message}`);
239
+ }
240
+ return null;
241
+ }
242
+
243
+ try {
244
+ const content = await pfs.readFile(filePath, 'utf8');
245
+ if (!content.trim()) {
246
+ return null;
247
+ }
248
+ console.log(`[System Prompt] Loaded system prompt from ${filePath}`);
249
+ return content;
250
+ } catch (error) {
251
+ console.error(`[System Prompt] Error reading system prompt file ${filePath}: ${error.message}`);
252
+ return null;
253
+ }
254
+ }
255
+
256
+ export { ALL_MODEL_PROVIDERS };
257
+
src/core/master.js ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * 主进程 (Master Process)
3
+ *
4
+ * 负责管理子进程的生命周期,包括:
5
+ * - 启动子进程
6
+ * - 监控子进程状态
7
+ * - 处理子进程重启请求
8
+ * - 提供 IPC 通信
9
+ *
10
+ * 使用方式:
11
+ * node src/core/master.js [原有的命令行参数]
12
+ */
13
+
14
+ import { fork } from 'child_process';
15
+ import * as http from 'http';
16
+ import * as path from 'path';
17
+ import { fileURLToPath } from 'url';
18
+
19
+ const __filename = fileURLToPath(import.meta.url);
20
+ const __dirname = path.dirname(__filename);
21
+
22
+ // 子进程实例
23
+ let workerProcess = null;
24
+
25
+ // 子进程状态
26
+ let workerStatus = {
27
+ pid: null,
28
+ startTime: null,
29
+ restartCount: 0,
30
+ lastRestartTime: null,
31
+ isRestarting: false
32
+ };
33
+
34
+ // 配置
35
+ const config = {
36
+ workerScript: path.join(__dirname, '../services/api-server.js'),
37
+ maxRestartAttempts: 10,
38
+ restartDelay: 1000, // 重启延迟(毫秒)
39
+ masterPort: parseInt(process.env.MASTER_PORT) || 3100, // 主进程管理端口
40
+ args: process.argv.slice(2) // 传递给子进程的参数
41
+ };
42
+
43
+ /**
44
+ * 启动子进程
45
+ */
46
+ function startWorker() {
47
+ if (workerProcess) {
48
+ console.log('[Master] Worker process already running, PID:', workerProcess.pid);
49
+ return;
50
+ }
51
+
52
+ console.log('[Master] Starting worker process...');
53
+ console.log('[Master] Worker script:', config.workerScript);
54
+ console.log('[Master] Worker args:', config.args.join(' '));
55
+
56
+ workerProcess = fork(config.workerScript, config.args, {
57
+ stdio: ['inherit', 'inherit', 'inherit', 'ipc'],
58
+ env: {
59
+ ...process.env,
60
+ IS_WORKER_PROCESS: 'true'
61
+ }
62
+ });
63
+
64
+ workerStatus.pid = workerProcess.pid;
65
+ workerStatus.startTime = new Date().toISOString();
66
+
67
+ console.log('[Master] Worker process started, PID:', workerProcess.pid);
68
+
69
+ // 监听子进程消息
70
+ workerProcess.on('message', (message) => {
71
+ console.log('[Master] Received message from worker:', message);
72
+ handleWorkerMessage(message);
73
+ });
74
+
75
+ // 监听子进程退出
76
+ workerProcess.on('exit', (code, signal) => {
77
+ console.log(`[Master] Worker process exited with code ${code}, signal ${signal}`);
78
+ workerProcess = null;
79
+ workerStatus.pid = null;
80
+
81
+ // 如果不是主动重启导致的退出,尝试自动重启
82
+ if (!workerStatus.isRestarting && code !== 0) {
83
+ console.log('[Master] Worker crashed, attempting auto-restart...');
84
+ scheduleRestart();
85
+ }
86
+ });
87
+
88
+ // 监听子进程错误
89
+ workerProcess.on('error', (error) => {
90
+ console.error('[Master] Worker process error:', error.message);
91
+ });
92
+ }
93
+
94
+ /**
95
+ * 停止子进程
96
+ * @param {boolean} graceful - 是否优雅关闭
97
+ * @returns {Promise<void>}
98
+ */
99
+ function stopWorker(graceful = true) {
100
+ return new Promise((resolve) => {
101
+ if (!workerProcess) {
102
+ console.log('[Master] No worker process to stop');
103
+ resolve();
104
+ return;
105
+ }
106
+
107
+ console.log('[Master] Stopping worker process, PID:', workerProcess.pid);
108
+
109
+ const timeout = setTimeout(() => {
110
+ if (workerProcess) {
111
+ console.log('[Master] Force killing worker process...');
112
+ workerProcess.kill('SIGKILL');
113
+ }
114
+ resolve();
115
+ }, 5000); // 5秒超时后强制杀死
116
+
117
+ workerProcess.once('exit', () => {
118
+ clearTimeout(timeout);
119
+ workerProcess = null;
120
+ workerStatus.pid = null;
121
+ console.log('[Master] Worker process stopped');
122
+ resolve();
123
+ });
124
+
125
+ if (graceful) {
126
+ // 发送优雅关闭信号
127
+ workerProcess.send({ type: 'shutdown' });
128
+ workerProcess.kill('SIGTERM');
129
+ } else {
130
+ workerProcess.kill('SIGKILL');
131
+ }
132
+ });
133
+ }
134
+
135
+ /**
136
+ * 重启子进程
137
+ * @returns {Promise<Object>}
138
+ */
139
+ async function restartWorker() {
140
+ if (workerStatus.isRestarting) {
141
+ console.log('[Master] Restart already in progress');
142
+ return { success: false, message: 'Restart already in progress' };
143
+ }
144
+
145
+ workerStatus.isRestarting = true;
146
+ workerStatus.restartCount++;
147
+ workerStatus.lastRestartTime = new Date().toISOString();
148
+
149
+ console.log('[Master] Restarting worker process...');
150
+
151
+ try {
152
+ await stopWorker(true);
153
+
154
+ // 等待一小段时间确保端口释放
155
+ await new Promise(resolve => setTimeout(resolve, config.restartDelay));
156
+
157
+ startWorker();
158
+ workerStatus.isRestarting = false;
159
+
160
+ return {
161
+ success: true,
162
+ message: 'Worker restarted successfully',
163
+ pid: workerStatus.pid,
164
+ restartCount: workerStatus.restartCount
165
+ };
166
+ } catch (error) {
167
+ workerStatus.isRestarting = false;
168
+ console.error('[Master] Failed to restart worker:', error.message);
169
+ return {
170
+ success: false,
171
+ message: 'Failed to restart worker: ' + error.message
172
+ };
173
+ }
174
+ }
175
+
176
+ /**
177
+ * 计划重启(用于崩溃后自动重启)
178
+ */
179
+ function scheduleRestart() {
180
+ if (workerStatus.restartCount >= config.maxRestartAttempts) {
181
+ console.error('[Master] Max restart attempts reached, giving up');
182
+ return;
183
+ }
184
+
185
+ const delay = Math.min(config.restartDelay * Math.pow(2, workerStatus.restartCount), 30000);
186
+ console.log(`[Master] Scheduling restart in ${delay}ms...`);
187
+
188
+ setTimeout(() => {
189
+ restartWorker();
190
+ }, delay);
191
+ }
192
+
193
+ /**
194
+ * 处理来自子进程的消息
195
+ * @param {Object} message - 消息对象
196
+ */
197
+ function handleWorkerMessage(message) {
198
+ if (!message || !message.type) return;
199
+
200
+ switch (message.type) {
201
+ case 'ready':
202
+ console.log('[Master] Worker is ready');
203
+ break;
204
+ case 'restart_request':
205
+ console.log('[Master] Worker requested restart');
206
+ restartWorker();
207
+ break;
208
+ case 'status':
209
+ console.log('[Master] Worker status:', message.data);
210
+ break;
211
+ default:
212
+ console.log('[Master] Unknown message type:', message.type);
213
+ }
214
+ }
215
+
216
+ /**
217
+ * 获取状态信息
218
+ * @returns {Object}
219
+ */
220
+ function getStatus() {
221
+ return {
222
+ master: {
223
+ pid: process.pid,
224
+ uptime: process.uptime(),
225
+ memoryUsage: process.memoryUsage()
226
+ },
227
+ worker: {
228
+ pid: workerStatus.pid,
229
+ startTime: workerStatus.startTime,
230
+ restartCount: workerStatus.restartCount,
231
+ lastRestartTime: workerStatus.lastRestartTime,
232
+ isRestarting: workerStatus.isRestarting,
233
+ isRunning: workerProcess !== null
234
+ }
235
+ };
236
+ }
237
+
238
+ /**
239
+ * 创建主进程管理 HTTP 服务器
240
+ */
241
+ function createMasterServer() {
242
+ const server = http.createServer(async (req, res) => {
243
+ const url = new URL(req.url, `http://${req.headers.host}`);
244
+ const path = url.pathname;
245
+ const method = req.method;
246
+
247
+ // 设置 CORS 头
248
+ res.setHeader('Access-Control-Allow-Origin', '*');
249
+ res.setHeader('Access-Control-Allow-Methods', 'GET, POST, OPTIONS');
250
+ res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization');
251
+
252
+ if (method === 'OPTIONS') {
253
+ res.writeHead(204);
254
+ res.end();
255
+ return;
256
+ }
257
+
258
+ // 状态端点
259
+ if (method === 'GET' && path === '/master/status') {
260
+ res.writeHead(200, { 'Content-Type': 'application/json' });
261
+ res.end(JSON.stringify(getStatus()));
262
+ return;
263
+ }
264
+
265
+ // 重启端点
266
+ if (method === 'POST' && path === '/master/restart') {
267
+ console.log('[Master] Restart requested via API');
268
+ const result = await restartWorker();
269
+ res.writeHead(result.success ? 200 : 500, { 'Content-Type': 'application/json' });
270
+ res.end(JSON.stringify(result));
271
+ return;
272
+ }
273
+
274
+ // 停止端点
275
+ if (method === 'POST' && path === '/master/stop') {
276
+ console.log('[Master] Stop requested via API');
277
+ await stopWorker(true);
278
+ res.writeHead(200, { 'Content-Type': 'application/json' });
279
+ res.end(JSON.stringify({ success: true, message: 'Worker stopped' }));
280
+ return;
281
+ }
282
+
283
+ // 启动端点
284
+ if (method === 'POST' && path === '/master/start') {
285
+ console.log('[Master] Start requested via API');
286
+ if (workerProcess) {
287
+ res.writeHead(400, { 'Content-Type': 'application/json' });
288
+ res.end(JSON.stringify({ success: false, message: 'Worker already running' }));
289
+ return;
290
+ }
291
+ startWorker();
292
+ res.writeHead(200, { 'Content-Type': 'application/json' });
293
+ res.end(JSON.stringify({ success: true, message: 'Worker started', pid: workerStatus.pid }));
294
+ return;
295
+ }
296
+
297
+ // 健康检查
298
+ if (method === 'GET' && path === '/master/health') {
299
+ res.writeHead(200, { 'Content-Type': 'application/json' });
300
+ res.end(JSON.stringify({
301
+ status: 'healthy',
302
+ workerRunning: workerProcess !== null,
303
+ timestamp: new Date().toISOString()
304
+ }));
305
+ return;
306
+ }
307
+
308
+ // 404
309
+ res.writeHead(404, { 'Content-Type': 'application/json' });
310
+ res.end(JSON.stringify({ error: 'Not Found' }));
311
+ });
312
+
313
+ server.listen(config.masterPort, () => {
314
+ console.log(`[Master] Management server listening on port ${config.masterPort}`);
315
+ console.log(`[Master] Available endpoints:`);
316
+ console.log(` GET /master/status - Get master and worker status`);
317
+ console.log(` GET /master/health - Health check`);
318
+ console.log(` POST /master/restart - Restart worker process`);
319
+ console.log(` POST /master/stop - Stop worker process`);
320
+ console.log(` POST /master/start - Start worker process`);
321
+ });
322
+
323
+ return server;
324
+ }
325
+
326
+ /**
327
+ * 处理进程信号
328
+ */
329
+ function setupSignalHandlers() {
330
+ // 优雅关闭
331
+ process.on('SIGTERM', async () => {
332
+ console.log('[Master] Received SIGTERM, shutting down...');
333
+ await stopWorker(true);
334
+ process.exit(0);
335
+ });
336
+
337
+ process.on('SIGINT', async () => {
338
+ console.log('[Master] Received SIGINT, shutting down...');
339
+ await stopWorker(true);
340
+ process.exit(0);
341
+ });
342
+
343
+ // 未捕获的异常
344
+ process.on('uncaughtException', (error) => {
345
+ console.error('[Master] Uncaught exception:', error);
346
+ });
347
+
348
+ process.on('unhandledRejection', (reason, promise) => {
349
+ console.error('[Master] Unhandled rejection at:', promise, 'reason:', reason);
350
+ });
351
+ }
352
+
353
+ /**
354
+ * 主函数
355
+ */
356
+ async function main() {
357
+ console.log('='.repeat(50));
358
+ console.log('[Master] AIClient2API Master Process');
359
+ console.log('[Master] PID:', process.pid);
360
+ console.log('[Master] Node version:', process.version);
361
+ console.log('[Master] Working directory:', process.cwd());
362
+ console.log('='.repeat(50));
363
+
364
+ // 设置信号处理
365
+ setupSignalHandlers();
366
+
367
+ // 创建管理服务器
368
+ createMasterServer();
369
+
370
+ // 启动子进程
371
+ startWorker();
372
+ }
373
+
374
+ // 启动主进程
375
+ main().catch(error => {
376
+ console.error('[Master] Failed to start:', error);
377
+ process.exit(1);
378
+ });
src/core/plugin-manager.js ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * 插件管理器 - 可插拔插件系统核心
3
+ *
4
+ * 功能:
5
+ * 1. 插件注册与加载
6
+ * 2. 生命周期管理(init/destroy)
7
+ * 3. 扩展点管理(中间件、路由、钩子)
8
+ * 4. 插件配置管理
9
+ */
10
+
11
+ import { promises as fs } from 'fs';
12
+ import { existsSync } from 'fs';
13
+ import path from 'path';
14
+
15
+ // 插件配置文件路径
16
+ const PLUGINS_CONFIG_FILE = path.join(process.cwd(), 'configs', 'plugins.json');
17
+
18
+ /**
19
+ * 插件类型常量
20
+ */
21
+ export const PLUGIN_TYPE = {
22
+ AUTH: 'auth', // 认证插件,参与认证流程
23
+ MIDDLEWARE: 'middleware' // 普通中间件,不参与认证
24
+ };
25
+
26
+ /**
27
+ * 插件接口定义(JSDoc 类型)
28
+ * @typedef {Object} Plugin
29
+ * @property {string} name - 插件名称(唯一标识)
30
+ * @property {string} version - 插件版本
31
+ * @property {string} [description] - 插件描述
32
+ * @property {string} [type] - 插件类型:'auth'(认证插件)或 'middleware'(普通中间件,默认)
33
+ * @property {boolean} [enabled] - 是否启用(默认 true)
34
+ * @property {number} [_priority] - 优先级,数字越小越先执行(默认 100)
35
+ * @property {boolean} [_builtin] - 是否为内置插件(内置插件最后执行)
36
+ * @property {Function} [init] - 初始化钩子 (config) => Promise<void>
37
+ * @property {Function} [destroy] - 销毁钩子 () => Promise<void>
38
+ * @property {Function} [middleware] - 请求中间件 (req, res, requestUrl, config) => Promise<{handled: boolean, data?: Object}>
39
+ * @property {Function} [authenticate] - 认证方法(仅 type='auth' 时有效)(req, res, requestUrl, config) => Promise<{handled: boolean, authorized: boolean|null, error?: Object, data?: Object}>
40
+ * @property {Array<{method: string, path: string|RegExp, handler: Function}>} [routes] - 路由定义
41
+ * @property {string[]} [staticPaths] - 静态文件路径(相对于 static 目录)
42
+ * @property {Object} [hooks] - 钩子函数
43
+ * @property {Function} [hooks.onBeforeRequest] - 请求前钩子 (req, config) => Promise<void>
44
+ * @property {Function} [hooks.onAfterResponse] - 响应后钩子 (req, res, config) => Promise<void>
45
+ * @property {Function} [hooks.onContentGenerated] - 内容生成后钩子 (config) => Promise<void>
46
+ */
47
+
48
+ /**
49
+ * 插件管理器类
50
+ */
51
+ class PluginManager {
52
+ constructor() {
53
+ /** @type {Map<string, Plugin>} */
54
+ this.plugins = new Map();
55
+ /** @type {Object} */
56
+ this.pluginsConfig = { plugins: {} };
57
+ /** @type {boolean} */
58
+ this.initialized = false;
59
+ }
60
+
61
+ /**
62
+ * 加载插件配置文件
63
+ */
64
+ async loadConfig() {
65
+ try {
66
+ if (existsSync(PLUGINS_CONFIG_FILE)) {
67
+ const content = await fs.readFile(PLUGINS_CONFIG_FILE, 'utf8');
68
+ this.pluginsConfig = JSON.parse(content);
69
+ } else {
70
+ // 扫描 plugins 目录生成默认配置
71
+ this.pluginsConfig = await this.generateDefaultConfig();
72
+ await this.saveConfig();
73
+ }
74
+ } catch (error) {
75
+ console.error('[PluginManager] Failed to load config:', error.message);
76
+ this.pluginsConfig = { plugins: {} };
77
+ }
78
+ }
79
+
80
+ /**
81
+ * 扫描 plugins 目录生成默认配置
82
+ * @returns {Promise<Object>} 默认插件配置
83
+ */
84
+ async generateDefaultConfig() {
85
+ const defaultConfig = { plugins: {} };
86
+ const pluginsDir = path.join(process.cwd(), 'src', 'plugins');
87
+
88
+ try {
89
+ if (!existsSync(pluginsDir)) {
90
+ return defaultConfig;
91
+ }
92
+
93
+ const entries = await fs.readdir(pluginsDir, { withFileTypes: true });
94
+
95
+ for (const entry of entries) {
96
+ if (!entry.isDirectory()) continue;
97
+
98
+ const pluginPath = path.join(pluginsDir, entry.name, 'index.js');
99
+ if (!existsSync(pluginPath)) continue;
100
+
101
+ try {
102
+ // 动态导入插件以获取其元信息
103
+ const pluginModule = await import(`file://${pluginPath}`);
104
+ const plugin = pluginModule.default || pluginModule;
105
+
106
+ if (plugin && plugin.name) {
107
+ defaultConfig.plugins[plugin.name] = {
108
+ enabled: true,
109
+ description: plugin.description || ''
110
+ };
111
+ console.log(`[PluginManager] Found plugin for default config: ${plugin.name}`);
112
+ }
113
+ } catch (importError) {
114
+ // 如果导入失败,使用目录名作为插件名
115
+ defaultConfig.plugins[entry.name] = {
116
+ enabled: true,
117
+ description: ''
118
+ };
119
+ console.warn(`[PluginManager] Could not import plugin ${entry.name}, using directory name:`, importError.message);
120
+ }
121
+ }
122
+ } catch (error) {
123
+ console.error('[PluginManager] Failed to scan plugins directory:', error.message);
124
+ }
125
+
126
+ return defaultConfig;
127
+ }
128
+
129
+ /**
130
+ * 保存插件配置文件
131
+ */
132
+ async saveConfig() {
133
+ try {
134
+ const dir = path.dirname(PLUGINS_CONFIG_FILE);
135
+ if (!existsSync(dir)) {
136
+ await fs.mkdir(dir, { recursive: true });
137
+ }
138
+ await fs.writeFile(PLUGINS_CONFIG_FILE, JSON.stringify(this.pluginsConfig, null, 2), 'utf8');
139
+ } catch (error) {
140
+ console.error('[PluginManager] Failed to save config:', error.message);
141
+ }
142
+ }
143
+
144
+ /**
145
+ * 注册插件
146
+ * @param {Plugin} plugin - 插件对象
147
+ */
148
+ register(plugin) {
149
+ if (!plugin.name) {
150
+ throw new Error('Plugin must have a name');
151
+ }
152
+ if (this.plugins.has(plugin.name)) {
153
+ console.warn(`[PluginManager] Plugin "${plugin.name}" is already registered, skipping`);
154
+ return;
155
+ }
156
+ this.plugins.set(plugin.name, plugin);
157
+ console.log(`[PluginManager] Registered plugin: ${plugin.name} v${plugin.version || '1.0.0'}`);
158
+ }
159
+
160
+ /**
161
+ * 初始化所有已启用的插件
162
+ * @param {Object} config - 服务器配置
163
+ */
164
+ async initAll(config) {
165
+ await this.loadConfig();
166
+
167
+ for (const [name, plugin] of this.plugins) {
168
+ const pluginConfig = this.pluginsConfig.plugins[name] || {};
169
+ const enabled = pluginConfig.enabled !== false; // 默认启用
170
+
171
+ if (!enabled) {
172
+ console.log(`[PluginManager] Plugin "${name}" is disabled, skipping init`);
173
+ continue;
174
+ }
175
+
176
+ try {
177
+ if (typeof plugin.init === 'function') {
178
+ await plugin.init(config);
179
+ console.log(`[PluginManager] Initialized plugin: ${name}`);
180
+ }
181
+ plugin._enabled = true;
182
+ } catch (error) {
183
+ console.error(`[PluginManager] Failed to init plugin "${name}":`, error.message);
184
+ plugin._enabled = false;
185
+ }
186
+ }
187
+
188
+ this.initialized = true;
189
+ }
190
+
191
+ /**
192
+ * 销毁所有插件
193
+ */
194
+ async destroyAll() {
195
+ for (const [name, plugin] of this.plugins) {
196
+ if (!plugin._enabled) continue;
197
+
198
+ try {
199
+ if (typeof plugin.destroy === 'function') {
200
+ await plugin.destroy();
201
+ console.log(`[PluginManager] Destroyed plugin: ${name}`);
202
+ }
203
+ } catch (error) {
204
+ console.error(`[PluginManager] Failed to destroy plugin "${name}":`, error.message);
205
+ }
206
+ }
207
+ this.initialized = false;
208
+ }
209
+
210
+ /**
211
+ * 检查插件是否启用
212
+ * @param {string} name - 插件名称
213
+ * @returns {boolean}
214
+ */
215
+ isEnabled(name) {
216
+ const plugin = this.plugins.get(name);
217
+ return plugin && plugin._enabled === true;
218
+ }
219
+
220
+ /**
221
+ * 获取所有启用的插件(按优先级排序)
222
+ * 优先级数字越小越先执行,内置插件(_builtin: true)最后执行
223
+ * @returns {Plugin[]}
224
+ */
225
+ getEnabledPlugins() {
226
+ return Array.from(this.plugins.values())
227
+ .filter(p => p._enabled)
228
+ .sort((a, b) => {
229
+ // 内置插件排在最后
230
+ const aBuiltin = a._builtin ? 1 : 0;
231
+ const bBuiltin = b._builtin ? 1 : 0;
232
+ if (aBuiltin !== bBuiltin) return aBuiltin - bBuiltin;
233
+
234
+ // 按优先级排序(数字越小越先执行)
235
+ const aPriority = a._priority || 100;
236
+ const bPriority = b._priority || 100;
237
+ return aPriority - bPriority;
238
+ });
239
+ }
240
+
241
+ /**
242
+ * 获取所有认证插件(按优先级排序)
243
+ * @returns {Plugin[]}
244
+ */
245
+ getAuthPlugins() {
246
+ return this.getEnabledPlugins().filter(p =>
247
+ p.type === PLUGIN_TYPE.AUTH && typeof p.authenticate === 'function'
248
+ );
249
+ }
250
+
251
+ /**
252
+ * 获取所有普通中间件插件(按优先级排序)
253
+ * @returns {Plugin[]}
254
+ */
255
+ getMiddlewarePlugins() {
256
+ return this.getEnabledPlugins().filter(p =>
257
+ p.type !== PLUGIN_TYPE.AUTH && typeof p.middleware === 'function'
258
+ );
259
+ }
260
+
261
+ /**
262
+ * 执行认证流程
263
+ * 只有 type='auth' 的插件会参与认证
264
+ *
265
+ * 认证插件返回值说明:
266
+ * - { handled: true } - 请求已被处理(如发送了错误响应),停止后续处理
267
+ * - { authorized: true, data: {...} } - 认证成功,可附带数据
268
+ * - { authorized: false } - 认证失败,已发送错误响应
269
+ * - { authorized: null } - 此插件不处理该请求,继续下一个认证插件
270
+ *
271
+ * @param {http.IncomingMessage} req - HTTP 请求
272
+ * @param {http.ServerResponse} res - HTTP 响应
273
+ * @param {URL} requestUrl - 解析后的 URL
274
+ * @param {Object} config - 服务器配置
275
+ * @returns {Promise<{handled: boolean, authorized: boolean}>}
276
+ */
277
+ async executeAuth(req, res, requestUrl, config) {
278
+ const authPlugins = this.getAuthPlugins();
279
+
280
+ for (const plugin of authPlugins) {
281
+ try {
282
+ const result = await plugin.authenticate(req, res, requestUrl, config);
283
+
284
+ if (!result) continue;
285
+
286
+ // 如果请求已被处理(如发送了错误响应),停止执行
287
+ if (result.handled) {
288
+ return { handled: true, authorized: false };
289
+ }
290
+
291
+ // 如果认证失败,停止执行
292
+ if (result.authorized === false) {
293
+ return { handled: true, authorized: false };
294
+ }
295
+
296
+ // 如果认证成功,合并数据并返回
297
+ if (result.authorized === true) {
298
+ if (result.data) {
299
+ Object.assign(config, result.data);
300
+ }
301
+ return { handled: false, authorized: true };
302
+ }
303
+
304
+ // authorized === null 表示此插件不处理,继续下一个
305
+ } catch (error) {
306
+ console.error(`[PluginManager] Auth error in plugin "${plugin.name}":`, error.message);
307
+ }
308
+ }
309
+
310
+ // 没有任何认证插件处理,返回未授权
311
+ return { handled: false, authorized: false };
312
+ }
313
+
314
+ /**
315
+ * 执行普通中间件
316
+ * 只有 type!='auth' 的插件会执行
317
+ *
318
+ * 中间件返回值说明:
319
+ * - { handled: true } - 请求已被处理,停止后续处理
320
+ * - { handled: false, data: {...} } - 继续处理,可附带数据
321
+ * - null/undefined - 继续执行下一个中间件
322
+ *
323
+ * @param {http.IncomingMessage} req - HTTP 请求
324
+ * @param {http.ServerResponse} res - HTTP 响应
325
+ * @param {URL} requestUrl - 解析后的 URL
326
+ * @param {Object} config - 服务器配置
327
+ * @returns {Promise<{handled: boolean}>}
328
+ */
329
+ async executeMiddleware(req, res, requestUrl, config) {
330
+ const middlewarePlugins = this.getMiddlewarePlugins();
331
+
332
+ for (const plugin of middlewarePlugins) {
333
+ try {
334
+ const result = await plugin.middleware(req, res, requestUrl, config);
335
+
336
+ if (!result) continue;
337
+
338
+ // 如果请求已被处理,停止执行
339
+ if (result.handled) {
340
+ return { handled: true };
341
+ }
342
+
343
+ // 合并数据
344
+ if (result.data) {
345
+ Object.assign(config, result.data);
346
+ }
347
+ } catch (error) {
348
+ console.error(`[PluginManager] Middleware error in plugin "${plugin.name}":`, error.message);
349
+ }
350
+ }
351
+
352
+ return { handled: false };
353
+ }
354
+
355
+ /**
356
+ * 执行所有插件的路由处理
357
+ * @param {string} method - HTTP 方法
358
+ * @param {string} path - 请求路径
359
+ * @param {http.IncomingMessage} req - HTTP 请求
360
+ * @param {http.ServerResponse} res - HTTP 响应
361
+ * @returns {Promise<boolean>} - 是否已处理
362
+ */
363
+ async executeRoutes(method, path, req, res) {
364
+ for (const plugin of this.getEnabledPlugins()) {
365
+ if (!Array.isArray(plugin.routes)) continue;
366
+
367
+ for (const route of plugin.routes) {
368
+ const methodMatch = route.method === '*' || route.method.toUpperCase() === method;
369
+ if (!methodMatch) continue;
370
+
371
+ let pathMatch = false;
372
+ if (route.path instanceof RegExp) {
373
+ pathMatch = route.path.test(path);
374
+ } else if (typeof route.path === 'string') {
375
+ pathMatch = path === route.path || path.startsWith(route.path + '/');
376
+ }
377
+
378
+ if (pathMatch) {
379
+ try {
380
+ const handled = await route.handler(method, path, req, res);
381
+ if (handled) return true;
382
+ } catch (error) {
383
+ console.error(`[PluginManager] Route error in plugin "${plugin.name}":`, error.message);
384
+ }
385
+ }
386
+ }
387
+ }
388
+ return false;
389
+ }
390
+
391
+ /**
392
+ * 获取所有插件的静态文件路径
393
+ * @returns {string[]}
394
+ */
395
+ getStaticPaths() {
396
+ const paths = [];
397
+ for (const plugin of this.getEnabledPlugins()) {
398
+ if (Array.isArray(plugin.staticPaths)) {
399
+ paths.push(...plugin.staticPaths);
400
+ }
401
+ }
402
+ return paths;
403
+ }
404
+
405
+ /**
406
+ * 检查路径是否是插件静态文件
407
+ * @param {string} path - 请求路径
408
+ * @returns {boolean}
409
+ */
410
+ isPluginStaticPath(path) {
411
+ const staticPaths = this.getStaticPaths();
412
+ return staticPaths.some(sp => path === sp || path === '/' + sp);
413
+ }
414
+
415
+ /**
416
+ * 执行钩子函数
417
+ * @param {string} hookName - 钩子名称
418
+ * @param {...any} args - 钩子参数
419
+ */
420
+ async executeHook(hookName, ...args) {
421
+ for (const plugin of this.getEnabledPlugins()) {
422
+ if (!plugin.hooks || typeof plugin.hooks[hookName] !== 'function') continue;
423
+
424
+ try {
425
+ await plugin.hooks[hookName](...args);
426
+ } catch (error) {
427
+ console.error(`[PluginManager] Hook "${hookName}" error in plugin "${plugin.name}":`, error.message);
428
+ }
429
+ }
430
+ }
431
+
432
+ /**
433
+ * 获取插件列表(用于 API)
434
+ * @returns {Object[]}
435
+ */
436
+ getPluginList() {
437
+ const list = [];
438
+ for (const [name, plugin] of this.plugins) {
439
+ const pluginConfig = this.pluginsConfig.plugins[name] || {};
440
+ list.push({
441
+ name: plugin.name,
442
+ version: plugin.version || '1.0.0',
443
+ description: plugin.description || pluginConfig.description || '',
444
+ enabled: plugin._enabled === true,
445
+ hasMiddleware: typeof plugin.middleware === 'function',
446
+ hasRoutes: Array.isArray(plugin.routes) && plugin.routes.length > 0,
447
+ hasHooks: plugin.hooks && Object.keys(plugin.hooks).length > 0
448
+ });
449
+ }
450
+ return list;
451
+ }
452
+
453
+ /**
454
+ * 启用/禁用插件
455
+ * @param {string} name - 插件名称
456
+ * @param {boolean} enabled - 是否启用
457
+ */
458
+ async setPluginEnabled(name, enabled) {
459
+ if (!this.pluginsConfig.plugins[name]) {
460
+ this.pluginsConfig.plugins[name] = {};
461
+ }
462
+ this.pluginsConfig.plugins[name].enabled = enabled;
463
+ await this.saveConfig();
464
+
465
+ const plugin = this.plugins.get(name);
466
+ if (plugin) {
467
+ plugin._enabled = enabled;
468
+ }
469
+ }
470
+ }
471
+
472
+ // 单例实例
473
+ const pluginManager = new PluginManager();
474
+
475
+ /**
476
+ * 自动发现并加载插件
477
+ * 扫描 src/plugins/ 目录下的所有插件
478
+ */
479
+ export async function discoverPlugins() {
480
+ const pluginsDir = path.join(process.cwd(), 'src', 'plugins');
481
+
482
+ try {
483
+ if (!existsSync(pluginsDir)) {
484
+ await fs.mkdir(pluginsDir, { recursive: true });
485
+ console.log('[PluginManager] Created plugins directory');
486
+ }
487
+
488
+ const entries = await fs.readdir(pluginsDir, { withFileTypes: true });
489
+
490
+ for (const entry of entries) {
491
+ if (!entry.isDirectory()) continue;
492
+
493
+ const pluginPath = path.join(pluginsDir, entry.name, 'index.js');
494
+ if (!existsSync(pluginPath)) continue;
495
+
496
+ try {
497
+ // 动态导入插件
498
+ const pluginModule = await import(`file://${pluginPath}`);
499
+ const plugin = pluginModule.default || pluginModule;
500
+
501
+ if (plugin && plugin.name) {
502
+ pluginManager.register(plugin);
503
+ }
504
+ } catch (error) {
505
+ console.error(`[PluginManager] Failed to load plugin from ${entry.name}:`, error.message);
506
+ }
507
+ }
508
+ } catch (error) {
509
+ console.error('[PluginManager] Failed to discover plugins:', error.message);
510
+ }
511
+ }
512
+
513
+ /**
514
+ * 获取插件管理器实例
515
+ * @returns {PluginManager}
516
+ */
517
+ export function getPluginManager() {
518
+ return pluginManager;
519
+ }
520
+
521
+ // 导出类和实例
522
+ export { PluginManager, pluginManager };
src/example/claude/oldResponse1762501965597.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"id":"msg_chatcmpl-690da54ca66df0e03c78831b","type":"message","role":"assistant","content":[{"type":"text","text":"I"}],"model":"claude-haiku-4-5-20251001","stop_reason":"max_tokens","stop_sequence":null,"usage":{"input_tokens":10855,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":1,"prompt_tokens":10855,"cached_tokens":10855,"completion_tokens":1,"total_tokens":10856}}
src/example/claude/oldResponseChunk1762501803864.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"type":"message_start","message":{"id":"msg_chatcmpl-690da4a34b300f384a5563b3","type":"message","role":"assistant","content":[],"model":"claude-sonnet-4-5-20250929","stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":22969,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":0,"service_tier":"standard","prompt_tokens":22969,"cached_tokens":0}}}
2
+ {"type":"content_block_start","index":0,"content_block":{"type":"tool_use","id":"TodoWrite_3","name":"TodoWrite","input":{}}}
3
+ {"type":"content_block_delta","index":0,"delta":{"type":"input_json_delta","partial_json":"{\""}}
4
+ {"type":"content_block_delta","index":0,"delta":{"type":"input_json_delta","partial_json":"t"}}
5
+ {"type":"content_block_delta","index":0,"delta":{"type":"input_json_delta","partial_json":"odos"}}
6
+ {"type":"content_block_delta","index":0,"delta":{"type":"input_json_delta","partial_json":"\":[{\""}}
7
+ {"type":"content_block_delta","index":0,"delta":{"type":"input_json_delta","partial_json":"content"}}
8
+ {"type":"content_block_delta","index":0,"delta":{"type":"input_json_delta","partial_json":"\":\""}}
9
+ {"type":"content_block_delta","index":0,"delta":{"type":"input_json_delta","partial_json":"读取"}}
10
+ {"type":"content_block_delta","index":0,"delta":{"type":"input_json_delta","partial_json":"当前的"}}
11
+ {"type":"content_block_delta","index":0,"delta":{"type":"input_json_delta","partial_json":"README"}}
12
+ {"type":"content_block_delta","index":0,"delta":{"type":"input_json_delta","partial_json":"文件"}}
13
+ {"type":"content_block_delta","index":0,"delta":{"type":"input_json_delta","partial_json":"\",\""}}
14
+ {"type":"content_block_delta","index":0,"delta":{"type":"input_json_delta","partial_json":"status"}}
15
+ {"type":"content_block_delta","index":0,"delta":{"type":"input_json_delta","partial_json":"\"}"}}
16
+ {"type":"content_block_delta","index":0,"delta":{"type":"input_json_delta","partial_json":"]}"}}
17
+ {"type":"content_block_stop","index":0}
18
+ {"type":"message_delta","delta":{"stop_reason":"tool_use","stop_sequence":null},"usage":{"input_tokens":22969,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":80,"prompt_tokens":22969,"completion_tokens":80,"total_tokens":23049,"cached_tokens":15872}}
19
+ {"type":"message_stop"}
src/example/claude/originalRequestBody1762501795790.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model":"claude-sonnet-4-5-20250929","messages":[{"role":"user","content":[{"type":"text","text":"<ide_opened_file>The user opened the file d:\\ai\\code\\AIClient2API\\src\\example\\openai\\oldResponse1762501667708.json in the IDE. This may or may not be related to the current task.</ide_opened_file>"},{"type":"text","text":"更新readme,写入一行随机数"}]},{"role":"assistant","content":[{"type":"text","text":"我将帮您更新README文件,添加一行随机数。"},{"type":"tool_use","id":"TodoWrite_0","name":"TodoWrite","input":{"todos":[{"content":"读取当前的README文件","status":"pending","activeForm":"读取当前的README文件"},{"content":"在README文件中添加一行随机数","status":"pending","activeForm":"在README文件中添加一行随机数"},{"content":"保存更新后的README文件","status":"pending","activeForm":"保存更新后的README文件"}]}}]},{"role":"user","content":[{"tool_use_id":"TodoWrite_0","type":"tool_result","content":"Todos have been modified successfully. Ensure that you continue to use the todo list to track your progress. Please proceed with the current tasks if applicable"}]},{"role":"assistant","content":[{"type":"tool_use","id":"TodoWrite_1","name":"TodoWrite","input":{"todos":[{"content":"读取当前的README文件","status":"in_progress","activeForm":"正在读取README文件"},{"content":"在README文件中添加一行随机数","status":"pending","activeForm":"在README文件中添加一行随机数"},{"content":"保存更新后的README文件","status":"pending","activeForm":"保存更新后的README文件"}]}}]},{"role":"user","content":[{"tool_use_id":"TodoWrite_1","type":"tool_result","content":"Todos have been modified successfully. Ensure that you continue to use the todo list to track your progress. Please proceed with the current tasks if applicable"}]},{"role":"assistant","content":[{"type":"tool_use","id":"Read_2","name":"Read","input":{"file_path":"D:\\ai\\code\\AIClient2API\\README.md"},"cache_control":{"type":"ephemeral"}}]},{"role":"user","content":[{"tool_use_id":"Read_2","type":"tool_result","content":" 1→<div align=\"center\">\n 2→\n 3→![logo](src/img/logo-min.webp)\n 4→\n 5→# AIClient-2-API 🚀\n 6→\n 7→**A powerful proxy that can unify the requests of various large model APIs (Gemini CLI, Qwen Code Plus, Kiro Claude...) that are only used within the client into a local OpenAI compatible interface.**\n 8→\n 9→</div>\n 10→\n 11→<div align=\"center\">\n 12→\n 13→<a href=\"https://deepwiki.com/justlovemaki/AIClient-2-API\"><img src=\"https://deepwiki.com/badge.svg\" alt=\"Ask DeepWiki\" style=\"width: 134px; height: 23px;margin-bottom: 3px;\"></a>\n 14→\n 15→[![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0)\n 16→[![Node.js](https://img.shields.io/badge/Node.js-≥20.0.0-green.svg)](https://nodejs.org/)\n 17→[![Docker](https://img.shields.io/badge/docker-≥20.0.0-blue.svg)](https://aiproxy.justlikemaki.vip/en/docs/installation/docker-deployment.html)\n 18→\n 19→\n 20→[**中文**](./README-ZH.md) | [**English**](./README.md) | [**日本語**](./README-JA.md) | [**📚 Complete Documentation**](https://aiproxy.justlikemaki.vip/en/)\n 21→\n 22→</div>\n 23→\n 24→`AIClient2API` is an API proxy service that breaks through client limitations, converting free large models originally restricted to client use only (such as Gemini CLI, Qwen Code Plus, Kiro Claude) into standard OpenAI-compatible interfaces that can be called by any application. Built on Node.js, it supports intelligent conversion between three major protocols (OpenAI, Claude, Gemini), enabling tools like Cherry-Studio, NextChat, and Cline to freely use advanced models such as Claude Sonnet 4.5, Gemini 2.5 Flash, and Qwen3 Coder Plus at scale. The project adopts a modular architecture based on strategy and adapter patterns, with built-in account pool management, intelligent polling, automatic failover, and health check mechanisms, ensuring 99.9% service availability.\n 25→\n 26→> [!NOTE]\n 27→> **🎉 Important Milestone**\n 28→>\n 29→> - Thanks to Ruan Yifeng for the recommendation in [Weekly Issue 359](https://www.ruanyifeng.com/blog/2025/08/weekly-issue-359.html)\n 30→>\n 31→> **📅 Version Update Log**\n 32→>\n 33→> - **2025.11.06** - Added support for Gemini 3 Preview, enhanced model compatibility and performance optimization\n 34→> - **2025.10.18** - Kiro open registration, new accounts get 500 credits, full support for Claude Sonnet 4.5\n 35→> - **2025.09.01** - Integrated Qwen Code CLI, added `qwen3-coder-plus` model support\n 36→> - **2025.08.29** - Released account pool management feature, supporting multi-account polling, intelligent failover, and automatic degradation strategies\n 37→> - Configuration: Add `PROVIDER_POOLS_FILE_PATH` parameter in config.json\n 38→> - Reference configuration: [provider_pools.json](./provider_pools.json.example)\n 39→\n 40→---\n 41→\n 42→## 💡 Core Advantages\n 43→\n 44→### 🎯 Unified Access, One-Stop Management\n 45→* **Multi-Model Unified Interface**: Through standard OpenAI-compatible protocol, configure once to access mainstream large models including Gemini, Claude, GPT, Qwen Code, Kimi K2, GLM-4.6\n 46→* **Flexible Switching Mechanism**: Support dynamic model switching via startup parameters, Path routing, or environment variables to meet different scenario requirements\n 47→* **Zero-Cost Migration**: Fully compatible with OpenAI API specifications, tools like Cherry-Studio, NextChat, Cline can be used without modification\n 48→* **Multi-Protocol Intelligent Conversion**: Support intelligent conversion between OpenAI, Claude, and Gemini protocols for cross-protocol model invocation\n 49→ * Call Claude models using OpenAI protocol: Use `claude-custom` or `claude-kiro-oauth` providers\n 50→ * Call Gemini models using OpenAI protocol: Use `gemini-cli-oauth` provider\n 51→ * Call Gemini models using Claude protocol: Use `gemini-cli-oauth` provider\n 52→ * Call OpenAI models using Claude protocol: Use `openai-custom` or `openai-qwen-oauth` providers\n 53→\n 54→### 🚀 Break Through Limitations, Improve Efficiency\n 55→* **Bypass Official Restrictions**: Utilize OAuth authorization mechanism to effectively break through rate and quota limits of free APIs like Gemini\n 56→* **Free Advanced Models**: Use Claude Sonnet 4.5 for free via Kiro API mode, use Qwen3 Coder Plus via Qwen OAuth mode, reducing usage costs\n 57→* **Intelligent Account Pool Scheduling**: Support multi-account polling, automatic failover, and configuration degradation, ensuring 99.9% service availability\n 58→\n 59→### 🛡️ Secure and Controllable, Data Transparent\n 60→* **Full-Chain Log Recording**: Capture all request and response data, supporting auditing and debugging\n 61→* **Private Dataset Construction**: Quickly build proprietary training datasets based on log data\n 62→* **System Prompt Management**: Support override and append modes, achieving perfect combination of unified base instructions and personalized extensions\n 63→\n 64→### 🔧 Developer-Friendly, Easy to Extend\n 65→* **Modular Architecture**: Based on strategy and adapter patterns, adding new model providers requires only 3 steps\n 66→* **Complete Test Coverage**: Integration and unit test coverage 90%+, ensuring code quality\n 67→* **Containerized Deployment**: Provides Docker support, one-click deployment, cross-platform operation\n 68→* **MCP Protocol Support**: Perfectly compatible with Model Context Protocol, easily extend functionality\n 69→\n 70→---\n 71→\n 72→## 📑 Quick Navigation\n 73→\n 74→- [🐳 Docker Deployment](https://aiproxy.justlikemaki.vip/en/docs/installation/docker-deployment.html)\n 75→- [🎨 Model Protocol and Provider Relationship Diagram](#-model-protocol-and-provider-relationship-diagram)\n 76→- [🔧 Usage Instructions](#-usage-instructions)\n 77→- [🚀 Project Startup Parameters](#-project-startup-parameters)\n 78→- [📄 Open Source License](#-open-source-license)\n 79→- [🙏 Acknowledgements](#-acknowledgements)\n 80→- [⚠️ Disclaimer](#-disclaimer)\n 81→\n 82→---\n 83→\n 84→## 🎨 Model Protocol and Provider Relationship Diagram\n 85→\n 86→This project supports multiple model providers through different protocols. The following is an overview of their relationships:\n 87→\n 88→* **OpenAI Protocol (P_OPENAI)**: Implemented by `openai-custom`, `gemini-cli-oauth`, `claude-custom`, `claude-kiro-oauth`, and `openai-qwen-oauth` model providers.\n 89→* **Claude Protocol (P_CLAUDE)**: Implemented by `claude-custom`, `claude-kiro-oauth`, `gemini-cli-oauth`, `openai-custom`, and `openai-qwen-oauth` model providers.\n 90→* **Gemini Protocol (P_GEMINI)**: Implemented by `gemini-cli-oauth` model provider.\n 91→\n 92→Detailed relationship diagram:\n 93→\n 94→ ```mermaid\n 95→ \n 96→ graph TD\n 97→ subgraph Core_Protocols[\"Core Protocols\"]\n 98→ P_OPENAI[OpenAI Protocol]\n 99→ P_GEMINI[Gemini Protocol]\n 100→ P_CLAUDE[Claude Protocol]\n 101→ end\n 102→ \n 103→ subgraph Supported_Model_Providers[\"Supported Model Providers\"]\n 104→ MP_OPENAI[openai-custom]\n 105→ MP_GEMINI[gemini-cli-oauth]\n 106→ MP_CLAUDE_C[claude-custom]\n 107→ MP_CLAUDE_K[claude-kiro-oauth]\n 108→ MP_QWEN[openai-qwen-oauth]\n 109→ end\n 110→ \n 111→ P_OPENAI ---|Support| MP_OPENAI\n 112→ P_OPENAI ---|Support| MP_QWEN\n 113→ P_OPENAI ---|Support| MP_GEMINI\n 114→ P_OPENAI ---|Support| MP_CLAUDE_C\n 115→ P_OPENAI ---|Support| MP_CLAUDE_K\n 116→ \n 117→ P_GEMINI ---|Support| MP_GEMINI\n 118→ \n 119→ P_CLAUDE ---|Support| MP_CLAUDE_C\n 120→ P_CLAUDE ---|Support| MP_CLAUDE_K\n 121→ P_CLAUDE ---|Support| MP_GEMINI\n 122→ P_CLAUDE ---|Support| MP_OPENAI\n 123→ P_CLAUDE ---|Support| MP_QWEN\n 124→ \n 125→ style P_OPENAI fill:#f9f,stroke:#333,stroke-width:2px\n 126→ style P_GEMINI fill:#ccf,stroke:#333,stroke-width:2px\n 127→ style P_CLAUDE fill:#cfc,stroke:#333,stroke-width:2px\n 128→\n 129→ ```\n 130→\n 131→---\n 132→\n 133→## 🔧 Usage Instructions\n 134→\n 135→### 📋 Core Features\n 136→\n 137→#### MCP Protocol Support\n 138→This project is fully compatible with **Model Context Protocol (MCP)**, enabling seamless integration with MCP-supporting clients for powerful functional extensions.\n 139→\n 140→#### Multimodal Input Capabilities\n 141→Supports various input types including images and documents, providing richer interactive experiences and more powerful application scenarios.\n 142→\n 143→#### Latest Model Support\n 144→Seamlessly supports the following latest large models, simply configure the corresponding OpenAI or Claude compatible interface in [`config.json`](./config.json):\n 145→* **Kimi K2** - Moonshot AI's latest flagship model\n 146→* **GLM-4.5** - Zhipu AI's latest version\n 147→* **Qwen Code** - Alibaba Tongyi Qianwen code-specific model\n 148→\n 149→---\n 150→\n 151→### 🔐 Authorization Configuration Guide\n 152→\n 153→#### Gemini CLI OAuth Configuration\n 154→1. **Obtain OAuth Credentials**: Visit [Google Cloud Console](https://console.cloud.google.com/) to create a project and enable Gemini API\n 155→2. **First Authorization**: After using Gemini service, the command line will print Google authorization page, copy the page to browser for authorization, then return to command line\n 156→3. **Credential Storage**: After successful authorization, `oauth_creds.json` file will be automatically generated and saved to `~/.gemini` directory\n 157→4. **Project Configuration**: Need to provide a valid Google Cloud project ID, can be specified via startup parameter `--project-id`\n 158→\n 159→#### Qwen Code OAuth Configuration\n 160→1. **First Authorization**: After starting the service, the system will automatically open the authorization page in the browser\n 161→2. **Credential Storage**: After successful authorization, `oauth_creds.json` file will be automatically generated and saved to `~/.qwen` directory\n 162→3. **Recommended Parameters**: Use official default parameters for best results\n 163→ ```json\n 164→ {\n 165→ \"temperature\": 0,\n 166→ \"top_p\": 1\n 167→ }\n 168→ ```\n 169→\n 170→#### Kiro API Configuration\n 171→1. **Environment Preparation**: [Download and install Kiro client](https://aibook.ren/archives/kiro-install)\n 172→2. **Complete Authorization**: Log in to your account in the client to generate `kiro-auth-token.json` credential file\n 173→3. **Best Practice**: Recommended to use with **Claude Code** for optimal experience\n 174→4. **Important Notice**: Kiro service usage policy has been updated, please visit the official website for the latest usage restrictions and terms\n 175→\n 176→#### OpenAI Responses API\n 177→* **Application Scenario**: Suitable for scenarios requiring structured dialogue using OpenAI Responses API, such as Codex\n 178→* **Configuration Method**:\n 179→ * Method 1: Set `MODEL_PROVIDER` to `openaiResponses-custom` in [`config.json`](./config.json)\n 180→ * Method 2: Use startup parameter `--model-provider openaiResponses-custom`\n 181→ * Method 3: Use path routing `/openaiResponses-custom`\n 182→* **Required Parameters**: Provide valid API key and base URL\n 183→\n 184→---\n 185→\n 186→### 🔄 Model Provider Switching\n 187→\n 188→This project provides two flexible model switching methods to meet different usage scenario requirements.\n 189→\n 190→#### Method 1: Startup Parameter Switching\n 191→\n 192→Specify the default model provider via command line parameters:\n 193→\n 194→```bash\n 195→# Use Gemini provider\n 196→node src/api-server.js --model-provider gemini-cli-oauth --project-id your-project-id\n 197→\n 198→# Use Claude Kiro provider\n 199→node src/api-server.js --model-provider claude-kiro-oauth\n 200→\n 201→# Use Qwen provider\n 202→node src/api-server.js --model-provider openai-qwen-oauth\n 203→```\n 204→\n 205→**Available Model Provider Identifiers**:\n 206→- `openai-custom` - Standard OpenAI API\n 207→- `claude-custom` - Official Claude API\n 208→- `gemini-cli-oauth` - Gemini CLI OAuth\n 209→- `claude-kiro-oauth` - Kiro Claude OAuth\n 210→- `openai-qwen-oauth` - Qwen Code OAuth\n 211→- `openaiResponses-custom` - OpenAI Responses API\n 212→\n 213→#### Method 2: Path Routing Switching (Recommended)\n 214→\n 215→Achieve instant switching by specifying provider identifier in API request path:\n 216→\n 217→| Route Path | Description | Use Case |\n 218→|---------|------|---------|\n 219→| `/claude-custom` | Use Claude API from config file | Official Claude API calls |\n 220→| `/claude-kiro-oauth` | Access Claude via Kiro OAuth | Free use of Claude Sonnet 4.5 |\n 221→| `/openai-custom` | Use OpenAI provider to handle requests | Standard OpenAI API calls |\n 222→| `/gemini-cli-oauth` | Access via Gemini CLI OAuth | Break through Gemini free limits |\n 223→| `/openai-qwen-oauth` | Access via Qwen OAuth | Use Qwen Code Plus |\n 224→| `/openaiResponses-custom` | OpenAI Responses API | Structured dialogue scenarios |\n 225→\n 226→**Usage Examples**:\n 227→```bash\n 228→# Configure in programming agents like Cline, Kilo\n 229→API_ENDPOINT=http://localhost:3000/claude-kiro-oauth\n 230→\n 231→# Direct API call\n 232→curl http://localhost:3000/gemini-cli-oauth/v1/chat/completions \\\n 233→ -H \"Content-Type: application/json\" \\\n 234→ -d '{\"model\":\"gemini-2.0-flash-exp\",\"messages\":[...]}'\n 235→```\n 236→\n 237→---\n 238→\n 239→### 📁 Authorization File Storage Paths\n 240→\n 241→Default storage locations for authorization credential files of each service:\n 242→\n 243→| Service | Default Path | Description |\n 244→|------|---------|------|\n 245→| **Gemini** | `~/.gemini/oauth_creds.json` | OAuth authentication credentials |\n 246→| **Kiro** | `~/.aws/sso/cache/kiro-auth-token.json` | Kiro authentication token |\n 247→| **Qwen** | `~/.qwen/oauth_creds.json` | Qwen OAuth credentials |\n 248→\n 249→> **Note**: `~` represents the user home directory (Windows: `C:\\Users\\username`, Linux/macOS: `/home/username` or `/Users/username`)\n 250→>\n 251→> **Custom Path**: Can specify custom storage location via relevant parameters in configuration file or environment variables\n 252→\n 253→---\n 254→\n 255→## 🚀 Project Startup Parameters\n 256→\n 257→This project supports rich command-line parameter configuration, allowing flexible adjustment of service behavior as needed. The following is a detailed explanation of all startup parameters, displayed in functional groups:\n 258→\n 259→### 🔧 Server Configuration Parameters\n 260→\n 261→| Parameter | Type | Default Value | Description |\n 262→|------|------|--------|------|\n 263→| `--host` | string | localhost | Server listening address |\n 264→| `--port` | number | 3000 | Server listening port |\n 265→| `--api-key` | string | 123456 | API key for authentication |\n 266→\n 267→### 🤖 Model Provider Configuration Parameters\n 268→\n 269→| Parameter | Type | Default Value | Description |\n 270→|------|------|--------|------|\n 271→| `--model-provider` | string | gemini-cli-oauth | AI model provider, optional values: openai-custom, claude-custom, gemini-cli-oauth, claude-kiro-oauth, openai-qwen-oauth |\n 272→\n 273→### 🧠 OpenAI Compatible Provider Parameters\n 274→\n 275→| Parameter | Type | Default Value | Description |\n 276→|------|------|--------|------|\n 277→| `--openai-api-key` | string | null | OpenAI API key (required when `model-provider` is `openai-custom`) |\n 278→| `--openai-base-url` | string | null | OpenAI API base URL (required when `model-provider` is `openai-custom`) |\n 279→\n 280→### 🖥️ Claude Compatible Provider Parameters\n 281→\n 282→| Parameter | Type | Default Value | Description |\n 283→|------|------|--------|------|\n 284→| `--claude-api-key` | string | null | Claude API key (required when `model-provider` is `claude-custom`) |\n 285→| `--claude-base-url` | string | null | Claude API base URL (required when `model-provider` is `claude-custom`) |\n 286→\n 287→### 🔐 Gemini OAuth Authentication Parameters\n 288→\n 289→| Parameter | Type | Default Value | Description |\n 290→|------|------|--------|------|\n 291→| `--gemini-oauth-creds-base64` | string | null | Base64 string of Gemini OAuth credentials (optional when `model-provider` is `gemini-cli-oauth`, choose one with `--gemini-oauth-creds-file`) |\n 292→| `--gemini-oauth-creds-file` | string | null | Gemini OAuth credentials JSON file path (optional when `model-provider` is `gemini-cli-oauth`, choose one with `--gemini-oauth-creds-base64`) |\n 293→| `--project-id` | string | null | Google Cloud project ID (required when `model-provider` is `gemini-cli-oauth`) |\n 294→\n 295→### 🎮 Kiro OAuth Authentication Parameters\n 296→\n 297→| Parameter | Type | Default Value | Description |\n 298→|------|------|--------|------|\n 299→| `--kiro-oauth-creds-base64` | string | null | Base64 string of Kiro OAuth credentials (optional when `model-provider` is `claude-kiro-oauth`, choose one with `--kiro-oauth-creds-file`) |\n 300→| `--kiro-oauth-creds-file` | string | null | Kiro OAuth credentials JSON file path (optional when `model-provider` is `claude-kiro-oauth`, choose one with `--kiro-oauth-creds-base64`) |\n 301→\n 302→### 🐼 Qwen OAuth Authentication Parameters\n 303→\n 304→| Parameter | Type | Default Value | Description |\n 305→|------|------|--------|------|\n 306→| `--qwen-oauth-creds-file` | string | null | Qwen OAuth credentials JSON file path (required when `model-provider` is `openai-qwen-oauth`) |\n 307→\n 308→### 🔄 OpenAI Responses API Parameters\n 309→\n 310→| Parameter | Type | Default Value | Description |\n 311→|------|------|--------|------|\n 312→| `--model-provider` | string | openaiResponses-custom | Model provider, set to `openaiResponses-custom` when using OpenAI Responses API |\n 313→| `--openai-api-key` | string | null | OpenAI API key (required when `model-provider` is `openaiResponses-custom`) |\n 314→| `--openai-base-url` | string | null | OpenAI API base URL (required when `model-provider` is `openaiResponses-custom`) |\n 315→\n 316→### 📝 System Prompt Configuration Parameters\n 317→\n 318→| Parameter | Type | Default Value | Description |\n 319→|------|------|--------|------|\n 320→| `--system-prompt-file` | string | input_system_prompt.txt | System prompt file path |\n 321→| `--system-prompt-mode` | string | overwrite | System prompt mode, optional values: overwrite (override), append (append) |\n 322→\n 323→### 📊 Log Configuration Parameters\n 324→\n 325→| Parameter | Type | Default Value | Description |\n 326→|------|------|--------|------|\n 327→| `--log-prompts` | string | none | Prompt log mode, optional values: console (console), file (file), none (none) |\n 328→| `--prompt-log-base-name` | string | prompt_log | Prompt log file base name |\n 329→\n 330→### 🔄 Retry Mechanism Parameters\n 331→\n 332→| Parameter | Type | Default Value | Description |\n 333→|------|------|--------|------|\n 334→| `--request-max-retries` | number | 3 | Maximum number of automatic retries when API requests fail |\n 335→| `--request-base-delay` | number | 1000 | Base delay time (milliseconds) between automatic retries, delay increases after each retry |\n 336→\n 337→### ⏰ Scheduled Task Parameters\n 338→\n 339→| Parameter | Type | Default Value | Description |\n 340→|------|------|--------|------|\n 341→| `--cron-near-minutes` | number | 15 | Interval time (minutes) for OAuth token refresh task schedule |\n 342→| `--cron-refresh-token` | boolean | true | Whether to enable automatic OAuth token refresh task |\n 343→\n 344→### 🎯 Account Pool Configuration Parameters\n 345→\n 346→| Parameter | Type | Default Value | Description |\n 347→|------|------|--------|------|\n 348→| `--provider-pools-file` | string | null | Provider account pool configuration file path |\n 349→\n 350→### Usage Examples\n 351→\n 352→```bash\n 353→# Basic usage\n 354→node src/api-server.js\n 355→\n 356→# Specify port and API key\n 357→node src/api-server.js --port 8080 --api-key my-secret-key\n 358→\n 359→# Use OpenAI provider\n 360→node src/api-server.js --model-provider openai-custom --openai-api-key sk-xxx --openai-base-url https://api.openai.com/v1\n 361→\n 362→# Use Claude provider\n 363→node src/api-server.js --model-provider claude-custom --claude-api-key sk-ant-xxx --claude-base-url https://api.anthropic.com\n 364→\n 365→# Use OpenAI Responses API provider\n 366→node src/api-server.js --model-provider openaiResponses-custom --openai-api-key sk-xxx --openai-base-url https://api.openai.com/v1\n 367→\n 368→# Use Gemini provider (Base64 credentials)\n 369→node src/api-server.js --model-provider gemini-cli-oauth --gemini-oauth-creds-base64 eyJ0eXBlIjoi... --project-id your-project-id\n 370→\n 371→# Use Gemini provider (credentials file)\n 372→node src/api-server.js --model-provider gemini-cli-oauth --gemini-oauth-creds-file /path/to/credentials.json --project-id your-project-id\n 373→\n 374→# Configure system prompt\n 375→node src/api-server.js --system-prompt-file custom-prompt.txt --system-prompt-mode append\n 376→\n 377→# Configure logging\n 378→node src/api-server.js --log-prompts console\n 379→node src/api-server.js --log-prompts file --prompt-log-base-name my-logs\n 380→\n 381→# Complete example\n 382→node src/api-server.js \\\n 383→ --host 0.0.0.0 \\\n 384→ --port 3000 \\\n 385→ --api-key my-secret-key \\\n 386→ --model-provider gemini-cli-oauth \\\n 387→ --project-id my-gcp-project \\\n 388→ --gemini-oauth-creds-file ./credentials.json \\\n 389→ --system-prompt-file ./custom-system-prompt.txt \\\n 390→ --system-prompt-mode overwrite \\\n 391→ --log-prompts file \\\n 392→ --prompt-log-base-name api-logs\n 393→```\n 394→\n 395→---\n 396→\n 397→## 📄 Open Source License\n 398→\n 399→This project operates under the [**GNU General Public License v3 (GPLv3)**](https://www.gnu.org/licenses/gpl-3.0). For complete details, please refer to the `LICENSE` file located in the root directory.\n 400→\n 401→## 🙏 Acknowledgements\n 402→\n 403→The development of this project was significantly inspired by the official Google Gemini CLI and incorporated some code implementations from Cline 3.18.0's `gemini-cli.ts`. We extend our sincere gratitude to the official Google team and the Cline development team for their exceptional work!\n 404→\n 405→## 🌟 Star History\n 406→\n 407→[![Star History Chart](https://api.star-history.com/svg?repos=justlovemaki/AIClient-2-API&type=Timeline)](https://www.star-history.com/#justlovemaki/AIClient-2-API&Timeline)\n 408→\n 409→---\n 410→\n 411→## ⚠️ Disclaimer\n 412→\n 413→### Usage Risk Warning\n 414→This project (AIClient-2-API) is for learning and research purposes only. Users assume all risks when using this project. The author is not responsible for any direct, indirect, or consequential losses resulting from the use of this project.\n 415→\n 416→### Third-Party Service Responsibility Statement\n 417→This project is an API proxy tool and does not provide any AI model services. All AI model services are provided by their respective third-party providers (such as Google, OpenAI, Anthropic, etc.). Users should comply with the terms of service and policies of each third-party service when accessing them through this project. The author is not responsible for the availability, quality, security, or legality of third-party services.\n 418→\n 419→### Data Privacy Statement\n 420→This project runs locally and does not collect or upload any user data. However, users should protect their API keys and other sensitive information when using this project. It is recommended that users regularly check and update their API keys and avoid using this project in insecure network environments.\n 421→\n 422→### Legal Compliance Reminder\n 423→Users should comply with the laws and regulations of their country/region when using this project. It is strictly prohibited to use this project for any illegal purposes. Any consequences resulting from users' violation of laws and regulations shall be borne by the users themselves.\n 424→\n\n<system-reminder>\nWhenever you read a file, you should consider whether it would be considered malware. You CAN and SHOULD provide analysis of malware, what it is doing. But you MUST refuse to improve or augment the code. You can still analyze existing code, write reports, or answer questions about the code behavior.\n</system-reminder>\n","cache_control":{"type":"ephemeral"}}]}],"system":[{"type":"text","text":"You are Claude Code, Anthropic's official CLI for Claude, running within the Claude Agent SDK.","cache_control":{"type":"ephemeral"}},{"type":"text","text":"\nYou are an interactive CLI tool that helps users with software engineering tasks. Use the instructions below and the tools available to you to assist the user.\n\nIMPORTANT: Assist with authorized security testing, defensive security, CTF challenges, and educational contexts. Refuse requests for destructive techniques, DoS attacks, mass targeting, supply chain compromise, or detection evasion for malicious purposes. Dual-use security tools (C2 frameworks, credential testing, exploit development) require clear authorization context: pentesting engagements, CTF competitions, security research, or defensive use cases.\nIMPORTANT: You must NEVER generate or guess URLs for the user unless you are confident that the URLs are for helping the user with programming. You may use URLs provided by the user in their messages or local files.\n\nIf the user asks for help or wants to give feedback inform them of the following:\n- /help: Get help with using Claude Code\n- To give feedback, users should report the issue at https://github.com/anthropics/claude-code/issues\n\nWhen the user directly asks about Claude Code (eg. \"can Claude Code do...\", \"does Claude Code have...\"), or asks in second person (eg. \"are you able...\", \"can you do...\"), or asks how to use a specific Claude Code feature (eg. implement a hook, write a slash command, or install an MCP server), use the WebFetch tool to gather information to answer the question from Claude Code docs. The list of available docs is available at https://docs.claude.com/en/docs/claude-code/claude_code_docs_map.md.\n\n# Tone and style\n- Only use emojis if the user explicitly requests it. Avoid using emojis in all communication unless asked.\n- Your output will be displayed on a command line interface. Your responses should be short and concise. You can use Github-flavored markdown for formatting, and will be rendered in a monospace font using the CommonMark specification.\n- Output text to communicate with the user; all text you output outside of tool use is displayed to the user. Only use tools to complete tasks. Never use tools like Bash or code comments as means to communicate with the user during the session.\n- NEVER create files unless they're absolutely necessary for achieving your goal. ALWAYS prefer editing an existing file to creating a new one. This includes markdown files.\n\n# Professional objectivity\nPrioritize technical accuracy and truthfulness over validating the user's beliefs. Focus on facts and problem-solving, providing direct, objective technical info without any unnecessary superlatives, praise, or emotional validation. It is best for the user if Claude honestly applies the same rigorous standards to all ideas and disagrees when necessary, even if it may not be what the user wants to hear. Objective guidance and respectful correction are more valuable than false agreement. Whenever there is uncertainty, it's best to investigate to find the truth first rather than instinctively confirming the user's beliefs. Avoid using over-the-top validation or excessive praise when responding to users such as \"You're absolutely right\" or similar phrases.\n\n# Task Management\nYou have access to the TodoWrite tools to help you manage and plan tasks. Use these tools VERY frequently to ensure that you are tracking your tasks and giving the user visibility into your progress.\nThese tools are also EXTREMELY helpful for planning tasks, and for breaking down larger complex tasks into smaller steps. If you do not use this tool when planning, you may forget to do important tasks - and that is unacceptable.\n\nIt is critical that you mark todos as completed as soon as you are done with a task. Do not batch up multiple tasks before marking them as completed.\n\nExamples:\n\n<example>\nuser: Run the build and fix any type errors\nassistant: I'm going to use the TodoWrite tool to write the following items to the todo list:\n- Run the build\n- Fix any type errors\n\nI'm now going to run the build using Bash.\n\nLooks like I found 10 type errors. I'm going to use the TodoWrite tool to write 10 items to the todo list.\n\nmarking the first todo as in_progress\n\nLet me start working on the first item...\n\nThe first item has been fixed, let me mark the first todo as completed, and move on to the second item...\n..\n..\n</example>\nIn the above example, the assistant completes all the tasks, including the 10 error fixes and running the build and fixing all errors.\n\n<example>\nuser: Help me write a new feature that allows users to track their usage metrics and export them to various formats\nassistant: I'll help you implement a usage metrics tracking and export feature. Let me first use the TodoWrite tool to plan this task.\nAdding the following todos to the todo list:\n1. Research existing metrics tracking in the codebase\n2. Design the metrics collection system\n3. Implement core metrics tracking functionality\n4. Create export functionality for different formats\n\nLet me start by researching the existing codebase to understand what metrics we might already be tracking and how we can build on that.\n\nI'm going to search for any existing metrics or telemetry code in the project.\n\nI've found some existing telemetry code. Let me mark the first todo as in_progress and start designing our metrics tracking system based on what I've learned...\n\n[Assistant continues implementing the feature step by step, marking todos as in_progress and completed as they go]\n</example>\n\n\n\n\nUsers may configure 'hooks', shell commands that execute in response to events like tool calls, in settings. Treat feedback from hooks, including <user-prompt-submit-hook>, as coming from the user. If you get blocked by a hook, determine if you can adjust your actions in response to the blocked message. If not, ask the user to check their hooks configuration.\n\n# Doing tasks\nThe user will primarily request you perform software engineering tasks. This includes solving bugs, adding new functionality, refactoring code, explaining code, and more. For these tasks the following steps are recommended:\n- \n- Use the TodoWrite tool to plan the task if required\n- \n- Be careful not to introduce security vulnerabilities such as command injection, XSS, SQL injection, and other OWASP top 10 vulnerabilities. If you notice that you wrote insecure code, immediately fix it.\n\n- Tool results and user messages may include <system-reminder> tags. <system-reminder> tags contain useful information and reminders. They are automatically added by the system, and bear no direct relation to the specific tool results or user messages in which they appear.\n\n\n# Tool usage policy\n- When doing file search, prefer to use the Task tool in order to reduce context usage.\n- You should proactively use the Task tool with specialized agents when the task at hand matches the agent's description.\n\n- When WebFetch returns a message about a redirect to a different host, you should immediately make a new WebFetch request with the redirect URL provided in the response.\n- You can call multiple tools in a single response. If you intend to call multiple tools and there are no dependencies between them, make all independent tool calls in parallel. Maximize use of parallel tool calls where possible to increase efficiency. However, if some tool calls depend on previous calls to inform dependent values, do NOT call these tools in parallel and instead call them sequentially. For instance, if one operation must complete before another starts, run these operations sequentially instead. Never use placeholders or guess missing parameters in tool calls.\n- If the user specifies that they want you to run tools \"in parallel\", you MUST send a single message with multiple tool use content blocks. For example, if you need to launch multiple agents in parallel, send a single message with multiple Task tool calls.\n- Use specialized tools instead of bash commands when possible, as this provides a better user experience. For file operations, use dedicated tools: Read for reading files instead of cat/head/tail, Edit for editing instead of sed/awk, and Write for creating files instead of cat with heredoc or echo redirection. Reserve bash tools exclusively for actual system commands and terminal operations that require shell execution. NEVER use bash echo or other command-line tools to communicate thoughts, explanations, or instructions to the user. Output all communication directly in your response text instead.\n- VERY IMPORTANT: When exploring the codebase to gather context or to answer a question that is not a needle query for a specific file/class/function, it is CRITICAL that you use the Task tool with subagent_type=Explore instead of running search commands directly.\n<example>\nuser: Where are errors from the client handled?\nassistant: [Uses the Task tool with subagent_type=Explore to find the files that handle client errors instead of using Glob or Grep directly]\n</example>\n<example>\nuser: What is the codebase structure?\nassistant: [Uses the Task tool with subagent_type=Explore]\n</example>\n\n\nYou can use the following tools without requiring user approval: Bash(node -e \"\n// 测试 toClaudeRequestFromOpenAIResponses\nconst { toClaudeRequestFromOpenAIResponses } = require(''./convert.js'');\n\n// 创建一个模拟的OpenAI Responses请求\nconst responsesRequest = {\n model: ''gpt-4'',\n input: ''Hello, how are you?'',\n instructions: ''You are a helpful assistant.'',\n max_tokens: 100,\n temperature: 0.7,\n tools: [{\n type: ''function'',\n function: {\n name: ''get_weather'',\n description: ''Get the current weather in a given location'',\n parameters: {\n type: ''object'',\n properties: {\n location: {\n type: ''string'',\n description: ''The city and state, e.g. San Francisco, CA''\n }\n },\n required: [''location'']\n }\n }\n }]\n};\n\nconsole.log(''Testing toClaudeRequestFromOpenAIResponses...'');\nconsole.log(''Input:'', JSON.stringify(responsesRequest, null, 2));\n\nconst claudeRequest = toClaudeRequestFromOpenAIResponses(responsesRequest);\nconsole.log(''Output:'', JSON.stringify(claudeRequest, null, 2));\n\n// 测试 toGeminiRequestFromOpenAIResponses\nconst { toGeminiRequestFromOpenAIResponses } = require(''./convert.js'');\n\nconsole.log(''\\nTesting toGeminiRequestFromOpenAIResponses...'');\nconsole.log(''Input:'', JSON.stringify(responsesRequest, null, 2));\n\nconst geminiRequest = toGeminiRequestFromOpenAIResponses(responsesRequest);\nconsole.log(''Output:'', JSON.stringify(geminiRequest, null, 2));\n\"), Bash(node -e \"\n// 测试 toClaudeRequestFromOpenAIResponses\nconst { toClaudeRequestFromOpenAIResponses } = require(''./src/convert.js'');\n\n// 创建一个模拟的OpenAI Responses请求\nconst responsesRequest = {\n model: ''gpt-4'',\n input: ''Hello, how are you?'',\n instructions: ''You are a helpful assistant.'',\n max_tokens: 100,\n temperature: 0.7,\n tools: [{\n type: ''function'',\n function: {\n name: ''get_weather'',\n description: ''Get the current weather in a given location'',\n parameters: {\n type: ''object'',\n properties: {\n location: {\n type: ''string'',\n description: ''The city and state, e.g. San Francisco, CA''\n }\n },\n required: [''location'']\n }\n }\n }]\n};\n\nconsole.log(''Testing toClaudeRequestFromOpenAIResponses...'');\nconsole.log(''Input:'', JSON.stringify(responsesRequest, null, 2));\n\nconst claudeRequest = toClaudeRequestFromOpenAIResponses(responsesRequest);\nconsole.log(''Output:'', JSON.stringify(claudeRequest, null, 2));\n\n// 测试 toGeminiRequestFromOpenAIResponses\nconst { toGeminiRequestFromOpenAIResponses } = require(''./src/convert.js'');\n\nconsole.log(''\\nTesting toGeminiRequestFromOpenAIResponses...'');\nconsole.log(''Input:'', JSON.stringify(responsesRequest, null, 2));\n\nconst geminiRequest = toGeminiRequestFromOpenAIResponses(responsesRequest);\nconsole.log(''Output:'', JSON.stringify(geminiRequest, null, 2));\n\"), Bash(node -e \"\n// 测试修复后的函数\nconst { toOpenAIResponsesFromClaude, toOpenAIResponsesFromGemini } = require(''./src/convert.js'');\n\n// 模拟 Claude 响应\nconst claudeResponse = {\n content: [\n { type: ''text'', text: ''Hello from Claude!'' }\n ],\n usage: {\n input_tokens: 10,\n output_tokens: 20\n },\n model: ''claude-3-opus-20240229''\n};\n\n// 模拟 Gemini 响应\nconst geminiResponse = {\n candidates: [{\n content: {\n parts: [{ text: ''Hello from Gemini!'' }]\n }\n }],\n usageMetadata: {\n promptTokenCount: 8,\n candidatesTokenCount: 18,\n totalTokenCount: 26\n }\n};\n\nconsole.log(''Testing toOpenAIResponsesFromClaude...'');\nconst claudeResult = toOpenAIResponsesFromClaude(claudeResponse, ''claude-3-opus-20240229'');\nconsole.log(''Claude result:'', JSON.stringify(claudeResult, null, 2));\n\nconsole.log(''\\nTesting toOpenAIResponsesFromGemini...'');\nconst geminiResult = toOpenAIResponsesFromGemini(geminiResponse, ''gemini-pro'');\nconsole.log(''Gemini result:'', JSON.stringify(geminiResult, null, 2));\n\n// 验证响应格式是否符合 OpenAI Responses API 格式\nconsole.log(''\\nValidating Claude response format...'');\nconst claudeValid = claudeResult.object === ''response'' && \n typeof claudeResult.created_at === ''number'' &&\n claudeResult.id.startsWith(''resp_'') &&\n Array.isArray(claudeResult.output) &&\n claudeResult.output.length > 0 &&\n claudeResult.output[0].type === ''message'';\nconsole.log(''Claude response format valid:'', claudeValid);\n\nconsole.log(''\\nValidating Gemini response format...'');\nconst geminiValid = geminiResult.object === ''response'' && \n typeof geminiResult.created_at === ''number'' &&\n geminiResult.id.startsWith(''resp_'') &&\n Array.isArray(geminiResult.output) &&\n geminiResult.output.length > 0 &&\n geminiResult.output[0].type === ''message'';\nconsole.log(''Gemini response format valid:'', geminiValid);\n\"), Bash(node test_convert.js), Bash(node test_convert.mjs), Bash(node test_convert_stream.js), Bash(node test_convert_complete_stream.js), Bash(node test_handle_stream_request.js), Bash(node test_openai_responses_core.js), Bash(node test_health_check.js)\n\n\nHere is useful information about the environment you are running in:\n<env>\nWorking directory: d:\\ai\\code\\AIClient2API\nIs directory a git repo: Yes\nPlatform: win32\nOS Version: \nToday's date: 2025-11-07\n</env>\nYou are powered by the model named Sonnet 4.5. The exact model ID is claude-sonnet-4-5-20250929.\n\nAssistant knowledge cutoff is January 2025.\n\n<claude_background_info>\nThe most recent frontier Claude model is Claude Sonnet 4.5 (model ID: 'claude-sonnet-4-5-20250929').\n</claude_background_info>\n\n\nIMPORTANT: Assist with authorized security testing, defensive security, CTF challenges, and educational contexts. Refuse requests for destructive techniques, DoS attacks, mass targeting, supply chain compromise, or detection evasion for malicious purposes. Dual-use security tools (C2 frameworks, credential testing, exploit development) require clear authorization context: pentesting engagements, CTF competitions, security research, or defensive use cases.\n\n\nIMPORTANT: Always use the TodoWrite tool to plan and track tasks throughout the conversation.\n\n# Code References\n\nWhen referencing specific functions or pieces of code include the pattern `file_path:line_number` to allow the user to easily navigate to the source code location.\n\n<example>\nuser: Where are errors from the client handled?\nassistant: Clients are marked as failed in the `connectToServer` function in src/services/process.ts:712.\n</example>\n\n\n# VSCode Extension Context\n\nYou are running inside a VSCode native extension environment.\n\n## Code References in Text\nIMPORTANT: When referencing files or code locations, use markdown link syntax to make them clickable:\n- For files: [filename.ts](src/filename.ts)\n- For specific lines: [filename.ts:42](src/filename.ts#L42)\n- For a range of lines: [filename.ts:42-51](src/filename.ts#L42-L51)\n- For folders: [src/utils/](src/utils/)\nUnless explicitly asked for by the user, DO NOT USE backtickets ` or HTML tags like code for file references - always use markdown [text](link) format.\nThe URL links should be relative paths from the root of the user's workspace.\n\n## User Selection Context\nThe user's IDE selection (if any) is included in the conversation context and marked with ide_selection tags. This represents code or text the user has highlighted in their editor and may or may not be relevant to their request.\ngitStatus: This is the git status at the start of the conversation. Note that this status is a snapshot in time, and will not update during the conversation.\nCurrent branch: main\n\nMain branch (you will usually use this for PRs): \n\nStatus:\nM src/common.js\nA src/convert-new.js\nAD src/convert-optimized.js\nAD src/convert-refactored.js\nA src/converters/BaseConverter.js\nA src/converters/ConverterFactory.js\nA src/converters/strategies/ClaudeConverter.js\nA src/converters/strategies/GeminiConverter.js\nA src/converters/strategies/OpenAIConverter.js\nA src/converters/strategies/OpenAIResponsesConverter.js\nA src/converters/utils.js\nA src/example/claude/oldResponse1762492737297.json\nA src/example/claude/oldResponseChunk1762492475714.json\nA src/example/claude/originalRequestBody1762492473859.json\nAD src/example/claude/originalRequestBody1762492736582.json\n?? .kilocode/\n?? .specify/\n?? ARCHITECTURE_DESIGN.md\n?? IMPLEMENTATION_PLAN.md\n?? REFACTORING_GUIDE.md\n?? REFACTORING_README.md\n?? REFACTORING_SUMMARY.md\n?? USAGE_EXAMPLES.md\n?? oldResponse1762501662993.json\n?? originalRequestBody1762501655271.json\n?? originalRequestBody1762501758551.json\n?? originalRequestBody1762501758570.json\n?? src/example/openai/\n?? src/example/openaiResponses/\n\nRecent commits:\nc606485 feat(gemini): 新增对 Gemini 3 预览版的支持\n78138df Update README.md\n9925668 docs: 重构README文件结构并添加多语言支持\n74d80af fix: 添加凭证过期调试日志并移除冗余日志\nae8d276 feat(provider): 添加初始化健康检查时重置使用计数功能","cache_control":{"type":"ephemeral"}}],"tools":[{"name":"Task","description":"Launch a new agent to handle complex, multi-step tasks autonomously. \n\nThe Task tool launches specialized agents (subprocesses) that autonomously handle complex tasks. Each agent type has specific capabilities and tools available to it.\n\nAvailable agent types and the tools they have access to:\n- general-purpose: General-purpose agent for researching complex questions, searching for code, and executing multi-step tasks. When you are searching for a keyword or file and are not confident that you will find the right match in the first few tries use this agent to perform the search for you. (Tools: *)\n- statusline-setup: Use this agent to configure the user's Claude Code status line setting. (Tools: Read, Edit)\n- Explore: Fast agent specialized for exploring codebases. Use this when you need to quickly find files by patterns (eg. \"src/components/**/*.tsx\"), search code for keywords (eg. \"API endpoints\"), or answer questions about the codebase (eg. \"how do API endpoints work?\"). When calling this agent, specify the desired thoroughness level: \"quick\" for basic searches, \"medium\" for moderate exploration, or \"very thorough\" for comprehensive analysis across multiple locations and naming conventions. (Tools: All tools)\n- Plan: Fast agent specialized for exploring codebases. Use this when you need to quickly find files by patterns (eg. \"src/components/**/*.tsx\"), search code for keywords (eg. \"API endpoints\"), or answer questions about the codebase (eg. \"how do API endpoints work?\"). When calling this agent, specify the desired thoroughness level: \"quick\" for basic searches, \"medium\" for moderate exploration, or \"very thorough\" for comprehensive analysis across multiple locations and naming conventions. (Tools: All tools)\n\nWhen using the Task tool, you must specify a subagent_type parameter to select which agent type to use.\n\nWhen NOT to use the Task tool:\n- If you want to read a specific file path, use the Read or Glob tool instead of the Task tool, to find the match more quickly\n- If you are searching for a specific class definition like \"class Foo\", use the Glob tool instead, to find the match more quickly\n- If you are searching for code within a specific file or set of 2-3 files, use the Read tool instead of the Task tool, to find the match more quickly\n- Other tasks that are not related to the agent descriptions above\n\n\nUsage notes:\n- Launch multiple agents concurrently whenever possible, to maximize performance; to do that, use a single message with multiple tool uses\n- When the agent is done, it will return a single message back to you. The result returned by the agent is not visible to the user. To show the user the result, you should send a text message back to the user with a concise summary of the result.\n- Each agent invocation is stateless. You will not be able to send additional messages to the agent, nor will the agent be able to communicate with you outside of its final report. Therefore, your prompt should contain a highly detailed task description for the agent to perform autonomously and you should specify exactly what information the agent should return back to you in its final and only message to you.\n- Agents with \"access to current context\" can see the full conversation history before the tool call. When using these agents, you can write concise prompts that reference earlier context (e.g., \"investigate the error discussed above\") instead of repeating information. The agent will receive all prior messages and understand the context.\n- The agent's outputs should generally be trusted\n- Clearly tell the agent whether you expect it to write code or just to do research (search, file reads, web fetches, etc.), since it is not aware of the user's intent\n- If the agent description mentions that it should be used proactively, then you should try your best to use it without the user having to ask for it first. Use your judgement.\n- If the user specifies that they want you to run agents \"in parallel\", you MUST send a single message with multiple Task tool use content blocks. For example, if you need to launch both a code-reviewer agent and a test-runner agent in parallel, send a single message with both tool calls.\n\nExample usage:\n\n<example_agent_descriptions>\n\"code-reviewer\": use this agent after you are done writing a signficant piece of code\n\"greeting-responder\": use this agent when to respond to user greetings with a friendly joke\n</example_agent_description>\n\n<example>\nuser: \"Please write a function that checks if a number is prime\"\nassistant: Sure let me write a function that checks if a number is prime\nassistant: First let me use the Write tool to write a function that checks if a number is prime\nassistant: I'm going to use the Write tool to write the following code:\n<code>\nfunction isPrime(n) {\n if (n <= 1) return false\n for (let i = 2; i * i <= n; i++) {\n if (n % i === 0) return false\n }\n return true\n}\n</code>\n<commentary>\nSince a signficant piece of code was written and the task was completed, now use the code-reviewer agent to review the code\n</commentary>\nassistant: Now let me use the code-reviewer agent to review the code\nassistant: Uses the Task tool to launch the code-reviewer agent \n</example>\n\n<example>\nuser: \"Hello\"\n<commentary>\nSince the user is greeting, use the greeting-responder agent to respond with a friendly joke\n</commentary>\nassistant: \"I'm going to use the Task tool to launch the greeting-responder agent\"\n</example>\n","input_schema":{"type":"object","properties":{"description":{"type":"string","description":"A short (3-5 word) description of the task"},"prompt":{"type":"string","description":"The task for the agent to perform"},"subagent_type":{"type":"string","description":"The type of specialized agent to use for this task"},"model":{"type":"string","enum":["sonnet","opus","haiku"],"description":"Optional model to use for this agent. If not specified, inherits from parent. Prefer haiku for quick, straightforward tasks to minimize cost and latency."},"resume":{"type":"string","description":"Optional agent ID to resume from. If provided, the agent will continue from the previous execution transcript."}},"required":["description","prompt","subagent_type"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"Bash","description":"Executes a given bash command in a persistent shell session with optional timeout, ensuring proper handling and security measures.\n\nIMPORTANT: This tool is for terminal operations like git, npm, docker, etc. DO NOT use it for file operations (reading, writing, editing, searching, finding files) - use the specialized tools for this instead.\n\nBefore executing the command, please follow these steps:\n\n1. Directory Verification:\n - If the command will create new directories or files, first use `ls` to verify the parent directory exists and is the correct location\n - For example, before running \"mkdir foo/bar\", first use `ls foo` to check that \"foo\" exists and is the intended parent directory\n\n2. Command Execution:\n - Always quote file paths that contain spaces with double quotes (e.g., cd \"path with spaces/file.txt\")\n - Examples of proper quoting:\n - cd \"/Users/name/My Documents\" (correct)\n - cd /Users/name/My Documents (incorrect - will fail)\n - python \"/path/with spaces/script.py\" (correct)\n - python /path/with spaces/script.py (incorrect - will fail)\n - After ensuring proper quoting, execute the command.\n - Capture the output of the command.\n\nUsage notes:\n - The command argument is required.\n - You can specify an optional timeout in milliseconds (up to 600000ms / 10 minutes). If not specified, commands will timeout after 120000ms (2 minutes).\n - It is very helpful if you write a clear, concise description of what this command does in 5-10 words.\n - If the output exceeds 30000 characters, output will be truncated before being returned to you.\n - You can use the `run_in_background` parameter to run the command in the background, which allows you to continue working while the command runs. You can monitor the output using the Bash tool as it becomes available. You do not need to use '&' at the end of the command when using this parameter.\n \n - Avoid using Bash with the `find`, `grep`, `cat`, `head`, `tail`, `sed`, `awk`, or `echo` commands, unless explicitly instructed or when these commands are truly necessary for the task. Instead, always prefer using the dedicated tools for these commands:\n - File search: Use Glob (NOT find or ls)\n - Content search: Use Grep (NOT grep or rg)\n - Read files: Use Read (NOT cat/head/tail)\n - Edit files: Use Edit (NOT sed/awk)\n - Write files: Use Write (NOT echo >/cat <<EOF)\n - Communication: Output text directly (NOT echo/printf)\n - When issuing multiple commands:\n - If the commands are independent and can run in parallel, make multiple Bash tool calls in a single message. For example, if you need to run \"git status\" and \"git diff\", send a single message with two Bash tool calls in parallel.\n - If the commands depend on each other and must run sequentially, use a single Bash call with '&&' to chain them together (e.g., `git add . && git commit -m \"message\" && git push`). For instance, if one operation must complete before another starts (like mkdir before cp, Write before Bash for git operations, or git add before git commit), run these operations sequentially instead.\n - Use ';' only when you need to run commands sequentially but don't care if earlier commands fail\n - DO NOT use newlines to separate commands (newlines are ok in quoted strings)\n - Try to maintain your current working directory throughout the session by using absolute paths and avoiding usage of `cd`. You may use `cd` if the User explicitly requests it.\n <good-example>\n pytest /foo/bar/tests\n </good-example>\n <bad-example>\n cd /foo/bar && pytest tests\n </bad-example>\n\n# Committing changes with git\n\nOnly create commits when requested by the user. If unclear, ask first. When the user asks you to create a new git commit, follow these steps carefully:\n\nGit Safety Protocol:\n- NEVER update the git config\n- NEVER run destructive/irreversible git commands (like push --force, hard reset, etc) unless the user explicitly requests them \n- NEVER skip hooks (--no-verify, --no-gpg-sign, etc) unless the user explicitly requests it\n- NEVER run force push to main/master, warn the user if they request it\n- Avoid git commit --amend. ONLY use --amend when either (1) user explicitly requested amend OR (2) adding edits from pre-commit hook (additional instructions below) \n- Before amending: ALWAYS check authorship (git log -1 --format='%an %ae')\n- NEVER commit changes unless the user explicitly asks you to. It is VERY IMPORTANT to only commit when explicitly asked, otherwise the user will feel that you are being too proactive.\n\n1. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following bash commands in parallel, each using the Bash tool:\n - Run a git status command to see all untracked files.\n - Run a git diff command to see both staged and unstaged changes that will be committed.\n - Run a git log command to see recent commit messages, so that you can follow this repository's commit message style.\n2. Analyze all staged changes (both previously staged and newly added) and draft a commit message:\n - Summarize the nature of the changes (eg. new feature, enhancement to an existing feature, bug fix, refactoring, test, docs, etc.). Ensure the message accurately reflects the changes and their purpose (i.e. \"add\" means a wholly new feature, \"update\" means an enhancement to an existing feature, \"fix\" means a bug fix, etc.).\n - Do not commit files that likely contain secrets (.env, credentials.json, etc). Warn the user if they specifically request to commit those files\n - Draft a concise (1-2 sentences) commit message that focuses on the \"why\" rather than the \"what\"\n - Ensure it accurately reflects the changes and their purpose\n3. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following commands:\n - Add relevant untracked files to the staging area.\n - Create the commit with a message ending with:\n 🤖 Generated with [Claude Code](https://claude.com/claude-code)\n\n Co-Authored-By: Claude <noreply@anthropic.com>\n - Run git status after the commit completes to verify success.\n Note: git status depends on the commit completing, so run it sequentially after the commit.\n4. If the commit fails due to pre-commit hook changes, retry ONCE. If it succeeds but files were modified by the hook, verify it's safe to amend:\n - Check authorship: git log -1 --format='%an %ae'\n - Check not pushed: git status shows \"Your branch is ahead\"\n - If both true: amend your commit. Otherwise: create NEW commit (never amend other developers' commits)\n\nImportant notes:\n- NEVER run additional commands to read or explore code, besides git bash commands\n- NEVER use the TodoWrite or Task tools\n- DO NOT push to the remote repository unless the user explicitly asks you to do so\n- IMPORTANT: Never use git commands with the -i flag (like git rebase -i or git add -i) since they require interactive input which is not supported.\n- If there are no changes to commit (i.e., no untracked files and no modifications), do not create an empty commit\n- In order to ensure good formatting, ALWAYS pass the commit message via a HEREDOC, a la this example:\n<example>\ngit commit -m \"$(cat <<'EOF'\n Commit message here.\n\n 🤖 Generated with [Claude Code](https://claude.com/claude-code)\n\n Co-Authored-By: Claude <noreply@anthropic.com>\n EOF\n )\"\n</example>\n\n# Creating pull requests\nUse the gh command via the Bash tool for ALL GitHub-related tasks including working with issues, pull requests, checks, and releases. If given a Github URL use the gh command to get the information needed.\n\nIMPORTANT: When the user asks you to create a pull request, follow these steps carefully:\n\n1. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following bash commands in parallel using the Bash tool, in order to understand the current state of the branch since it diverged from the main branch:\n - Run a git status command to see all untracked files\n - Run a git diff command to see both staged and unstaged changes that will be committed\n - Check if the current branch tracks a remote branch and is up to date with the remote, so you know if you need to push to the remote\n - Run a git log command and `git diff [base-branch]...HEAD` to understand the full commit history for the current branch (from the time it diverged from the base branch)\n2. Analyze all changes that will be included in the pull request, making sure to look at all relevant commits (NOT just the latest commit, but ALL commits that will be included in the pull request!!!), and draft a pull request summary\n3. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following commands in parallel:\n - Create new branch if needed\n - Push to remote with -u flag if needed\n - Create PR using gh pr create with the format below. Use a HEREDOC to pass the body to ensure correct formatting.\n<example>\ngh pr create --title \"the pr title\" --body \"$(cat <<'EOF'\n## Summary\n<1-3 bullet points>\n\n## Test plan\n[Bulleted markdown checklist of TODOs for testing the pull request...]\n\n🤖 Generated with [Claude Code](https://claude.com/claude-code)\nEOF\n)\"\n</example>\n\nImportant:\n- DO NOT use the TodoWrite or Task tools\n- Return the PR URL when you're done, so the user can see it\n\n# Other common operations\n- View comments on a Github PR: gh api repos/foo/bar/pulls/123/comments","input_schema":{"type":"object","properties":{"command":{"type":"string","description":"The command to execute"},"timeout":{"type":"number","description":"Optional timeout in milliseconds (max 600000)"},"description":{"type":"string","description":"Clear, concise description of what this command does in 5-10 words, in active voice. Examples:\nInput: ls\nOutput: List files in current directory\n\nInput: git status\nOutput: Show working tree status\n\nInput: npm install\nOutput: Install package dependencies\n\nInput: mkdir foo\nOutput: Create directory 'foo'"},"run_in_background":{"type":"boolean","description":"Set to true to run this command in the background. Use BashOutput to read the output later."},"dangerouslyDisableSandbox":{"type":"boolean","description":"Set this to true to dangerously override sandbox mode and run commands without sandboxing."}},"required":["command"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"Glob","description":"- Fast file pattern matching tool that works with any codebase size\n- Supports glob patterns like \"**/*.js\" or \"src/**/*.ts\"\n- Returns matching file paths sorted by modification time\n- Use this tool when you need to find files by name patterns\n- When you are doing an open ended search that may require multiple rounds of globbing and grepping, use the Agent tool instead\n- You can call multiple tools in a single response. It is always better to speculatively perform multiple searches in parallel if they are potentially useful.","input_schema":{"type":"object","properties":{"pattern":{"type":"string","description":"The glob pattern to match files against"},"path":{"type":"string","description":"The directory to search in. If not specified, the current working directory will be used. IMPORTANT: Omit this field to use the default directory. DO NOT enter \"undefined\" or \"null\" - simply omit it for the default behavior. Must be a valid directory path if provided."}},"required":["pattern"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"Grep","description":"A powerful search tool built on ripgrep\n\n Usage:\n - ALWAYS use Grep for search tasks. NEVER invoke `grep` or `rg` as a Bash command. The Grep tool has been optimized for correct permissions and access.\n - Supports full regex syntax (e.g., \"log.*Error\", \"function\\s+\\w+\")\n - Filter files with glob parameter (e.g., \"*.js\", \"**/*.tsx\") or type parameter (e.g., \"js\", \"py\", \"rust\")\n - Output modes: \"content\" shows matching lines, \"files_with_matches\" shows only file paths (default), \"count\" shows match counts\n - Use Task tool for open-ended searches requiring multiple rounds\n - Pattern syntax: Uses ripgrep (not grep) - literal braces need escaping (use `interface\\{\\}` to find `interface{}` in Go code)\n - Multiline matching: By default patterns match within single lines only. For cross-line patterns like `struct \\{[\\s\\S]*?field`, use `multiline: true`\n","input_schema":{"type":"object","properties":{"pattern":{"type":"string","description":"The regular expression pattern to search for in file contents"},"path":{"type":"string","description":"File or directory to search in (rg PATH). Defaults to current working directory."},"glob":{"type":"string","description":"Glob pattern to filter files (e.g. \"*.js\", \"*.{ts,tsx}\") - maps to rg --glob"},"output_mode":{"type":"string","enum":["content","files_with_matches","count"],"description":"Output mode: \"content\" shows matching lines (supports -A/-B/-C context, -n line numbers, head_limit), \"files_with_matches\" shows file paths (supports head_limit), \"count\" shows match counts (supports head_limit). Defaults to \"files_with_matches\"."},"-B":{"type":"number","description":"Number of lines to show before each match (rg -B). Requires output_mode: \"content\", ignored otherwise."},"-A":{"type":"number","description":"Number of lines to show after each match (rg -A). Requires output_mode: \"content\", ignored otherwise."},"-C":{"type":"number","description":"Number of lines to show before and after each match (rg -C). Requires output_mode: \"content\", ignored otherwise."},"-n":{"type":"boolean","description":"Show line numbers in output (rg -n). Requires output_mode: \"content\", ignored otherwise. Defaults to true."},"-i":{"type":"boolean","description":"Case insensitive search (rg -i)"},"type":{"type":"string","description":"File type to search (rg --type). Common types: js, py, rust, go, java, etc. More efficient than include for standard file types."},"head_limit":{"type":"number","description":"Limit output to first N lines/entries, equivalent to \"| head -N\". Works across all output modes: content (limits output lines), files_with_matches (limits file paths), count (limits count entries). Defaults based on \"cap\" experiment value: 0 (unlimited), 20, or 100."},"offset":{"type":"number","description":"Skip first N lines/entries before applying head_limit, equivalent to \"| tail -n +N | head -N\". Works across all output modes. Defaults to 0."},"multiline":{"type":"boolean","description":"Enable multiline mode where . matches newlines and patterns can span lines (rg -U --multiline-dotall). Default: false."}},"required":["pattern"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"ExitPlanMode","description":"Use this tool when you are in plan mode and have finished presenting your plan and are ready to code. This will prompt the user to exit plan mode.\nIMPORTANT: Only use this tool when the task requires planning the implementation steps of a task that requires writing code. For research tasks where you're gathering information, searching files, reading files or in general trying to understand the codebase - do NOT use this tool.\n\n## Handling Ambiguity in Plans\nBefore using this tool, ensure your plan is clear and unambiguous. If there are multiple valid approaches or unclear requirements:\n1. Use the AskUserQuestion tool to clarify with the user\n2. Ask about specific implementation choices (e.g., architectural patterns, which library to use)\n3. Clarify any assumptions that could affect the implementation\n4. Only proceed with ExitPlanMode after resolving ambiguities\n\n## Examples\n\n1. Initial task: \"Search for and understand the implementation of vim mode in the codebase\" - Do not use the exit plan mode tool because you are not planning the implementation steps of a task.\n2. Initial task: \"Help me implement yank mode for vim\" - Use the exit plan mode tool after you have finished planning the implementation steps of the task.\n3. Initial task: \"Add a new feature to handle user authentication\" - If unsure about auth method (OAuth, JWT, etc.), use AskUserQuestion first, then use exit plan mode tool after clarifying the approach.\n","input_schema":{"type":"object","properties":{"plan":{"type":"string","description":"The plan you came up with, that you want to run by the user for approval. Supports markdown. The plan should be pretty concise."}},"required":["plan"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"Read","description":"Reads a file from the local filesystem. You can access any file directly by using this tool.\nAssume this tool is able to read all files on the machine. If the User provides a path to a file assume that path is valid. It is okay to read a file that does not exist; an error will be returned.\n\nUsage:\n- The file_path parameter must be an absolute path, not a relative path\n- By default, it reads up to 2000 lines starting from the beginning of the file\n- You can optionally specify a line offset and limit (especially handy for long files), but it's recommended to read the whole file by not providing these parameters\n- Any lines longer than 2000 characters will be truncated\n- Results are returned using cat -n format, with line numbers starting at 1\n- This tool allows Claude Code to read images (eg PNG, JPG, etc). When reading an image file the contents are presented visually as Claude Code is a multimodal LLM.\n- This tool can read PDF files (.pdf). PDFs are processed page by page, extracting both text and visual content for analysis.\n- This tool can read Jupyter notebooks (.ipynb files) and returns all cells with their outputs, combining code, text, and visualizations.\n- This tool can only read files, not directories. To read a directory, use an ls command via the Bash tool.\n- You can call multiple tools in a single response. It is always better to speculatively read multiple potentially useful files in parallel.\n- You will regularly be asked to read screenshots. If the user provides a path to a screenshot, ALWAYS use this tool to view the file at the path. This tool will work with all temporary file paths.\n- If you read a file that exists but has empty contents you will receive a system reminder warning in place of file contents.","input_schema":{"type":"object","properties":{"file_path":{"type":"string","description":"The absolute path to the file to read"},"offset":{"type":"number","description":"The line number to start reading from. Only provide if the file is too large to read at once"},"limit":{"type":"number","description":"The number of lines to read. Only provide if the file is too large to read at once."}},"required":["file_path"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"Edit","description":"Performs exact string replacements in files. \n\nUsage:\n- You must use your `Read` tool at least once in the conversation before editing. This tool will error if you attempt an edit without reading the file. \n- When editing text from Read tool output, ensure you preserve the exact indentation (tabs/spaces) as it appears AFTER the line number prefix. The line number prefix format is: spaces + line number + tab. Everything after that tab is the actual file content to match. Never include any part of the line number prefix in the old_string or new_string.\n- ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required.\n- Only use emojis if the user explicitly requests it. Avoid adding emojis to files unless asked.\n- The edit will FAIL if `old_string` is not unique in the file. Either provide a larger string with more surrounding context to make it unique or use `replace_all` to change every instance of `old_string`. \n- Use `replace_all` for replacing and renaming strings across the file. This parameter is useful if you want to rename a variable for instance.","input_schema":{"type":"object","properties":{"file_path":{"type":"string","description":"The absolute path to the file to modify"},"old_string":{"type":"string","description":"The text to replace"},"new_string":{"type":"string","description":"The text to replace it with (must be different from old_string)"},"replace_all":{"type":"boolean","default":false,"description":"Replace all occurences of old_string (default false)"}},"required":["file_path","old_string","new_string"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"Write","description":"Writes a file to the local filesystem.\n\nUsage:\n- This tool will overwrite the existing file if there is one at the provided path.\n- If this is an existing file, you MUST use the Read tool first to read the file's contents. This tool will fail if you did not read the file first.\n- ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required.\n- NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly requested by the User.\n- Only use emojis if the user explicitly requests it. Avoid writing emojis to files unless asked.","input_schema":{"type":"object","properties":{"file_path":{"type":"string","description":"The absolute path to the file to write (must be absolute, not relative)"},"content":{"type":"string","description":"The content to write to the file"}},"required":["file_path","content"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"NotebookEdit","description":"Completely replaces the contents of a specific cell in a Jupyter notebook (.ipynb file) with new source. Jupyter notebooks are interactive documents that combine code, text, and visualizations, commonly used for data analysis and scientific computing. The notebook_path parameter must be an absolute path, not a relative path. The cell_number is 0-indexed. Use edit_mode=insert to add a new cell at the index specified by cell_number. Use edit_mode=delete to delete the cell at the index specified by cell_number.","input_schema":{"type":"object","properties":{"notebook_path":{"type":"string","description":"The absolute path to the Jupyter notebook file to edit (must be absolute, not relative)"},"cell_id":{"type":"string","description":"The ID of the cell to edit. When inserting a new cell, the new cell will be inserted after the cell with this ID, or at the beginning if not specified."},"new_source":{"type":"string","description":"The new source for the cell"},"cell_type":{"type":"string","enum":["code","markdown"],"description":"The type of the cell (code or markdown). If not specified, it defaults to the current cell type. If using edit_mode=insert, this is required."},"edit_mode":{"type":"string","enum":["replace","insert","delete"],"description":"The type of edit to make (replace, insert, delete). Defaults to replace."}},"required":["notebook_path","new_source"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"WebFetch","description":"\n- Fetches content from a specified URL and processes it using an AI model\n- Takes a URL and a prompt as input\n- Fetches the URL content, converts HTML to markdown\n- Processes the content with the prompt using a small, fast model\n- Returns the model's response about the content\n- Use this tool when you need to retrieve and analyze web content\n\nUsage notes:\n - IMPORTANT: If an MCP-provided web fetch tool is available, prefer using that tool instead of this one, as it may have fewer restrictions. All MCP-provided tools start with \"mcp__\".\n - The URL must be a fully-formed valid URL\n - HTTP URLs will be automatically upgraded to HTTPS\n - The prompt should describe what information you want to extract from the page\n - This tool is read-only and does not modify any files\n - Results may be summarized if the content is very large\n - Includes a self-cleaning 15-minute cache for faster responses when repeatedly accessing the same URL\n - When a URL redirects to a different host, the tool will inform you and provide the redirect URL in a special format. You should then make a new WebFetch request with the redirect URL to fetch the content.\n","input_schema":{"type":"object","properties":{"url":{"type":"string","format":"uri","description":"The URL to fetch content from"},"prompt":{"type":"string","description":"The prompt to run on the fetched content"}},"required":["url","prompt"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"TodoWrite","description":"Use this tool to create and manage a structured task list for your current coding session. This helps you track progress, organize complex tasks, and demonstrate thoroughness to the user.\nIt also helps the user understand the progress of the task and overall progress of their requests.\n\n## When to Use This Tool\nUse this tool proactively in these scenarios:\n\n1. Complex multi-step tasks - When a task requires 3 or more distinct steps or actions\n2. Non-trivial and complex tasks - Tasks that require careful planning or multiple operations\n3. User explicitly requests todo list - When the user directly asks you to use the todo list\n4. User provides multiple tasks - When users provide a list of things to be done (numbered or comma-separated)\n5. After receiving new instructions - Immediately capture user requirements as todos\n6. When you start working on a task - Mark it as in_progress BEFORE beginning work. Ideally you should only have one todo as in_progress at a time\n7. After completing a task - Mark it as completed and add any new follow-up tasks discovered during implementation\n\n## When NOT to Use This Tool\n\nSkip using this tool when:\n1. There is only a single, straightforward task\n2. The task is trivial and tracking it provides no organizational benefit\n3. The task can be completed in less than 3 trivial steps\n4. The task is purely conversational or informational\n\nNOTE that you should not use this tool if there is only one trivial task to do. In this case you are better off just doing the task directly.\n\n## Examples of When to Use the Todo List\n\n<example>\nUser: I want to add a dark mode toggle to the application settings. Make sure you run the tests and build when you're done!\nAssistant: I'll help add a dark mode toggle to your application settings. Let me create a todo list to track this implementation.\n*Creates todo list with the following items:*\n1. Creating dark mode toggle component in Settings page\n2. Adding dark mode state management (context/store)\n3. Implementing CSS-in-JS styles for dark theme\n4. Updating existing components to support theme switching\n5. Running tests and build process, addressing any failures or errors that occur\n*Begins working on the first task*\n\n<reasoning>\nThe assistant used the todo list because:\n1. Adding dark mode is a multi-step feature requiring UI, state management, and styling changes\n2. The user explicitly requested tests and build be run afterward\n3. The assistant inferred that tests and build need to pass by adding \"Ensure tests and build succeed\" as the final task\n</reasoning>\n</example>\n\n<example>\nUser: Help me rename the function getCwd to getCurrentWorkingDirectory across my project\nAssistant: Let me first search through your codebase to find all occurrences of 'getCwd'.\n*Uses grep or search tools to locate all instances of getCwd in the codebase*\nAssistant: I've found 15 instances of 'getCwd' across 8 different files. Let me create a todo list to track these changes.\n*Creates todo list with specific items for each file that needs updating*\n\n<reasoning>\nThe assistant used the todo list because:\n1. First, the assistant searched to understand the scope of the task\n2. Upon finding multiple occurrences across different files, it determined this was a complex task with multiple steps\n3. The todo list helps ensure every instance is tracked and updated systematically\n4. This approach prevents missing any occurrences and maintains code consistency\n</reasoning>\n</example>\n\n\n<example>\nUser: I need to implement these features for my e-commerce site: user registration, product catalog, shopping cart, and checkout flow.\nAssistant: I'll help implement these features. First, let's add all the features to the todo list.\n*Creates a todo list breaking down each feature into specific tasks based on the project architecture*\nAssistant: Let's start with user registration. This will involve creating database models, API endpoints, and frontend forms that integrate with your existing authentication system.\n\n<reasoning>\nThe assistant used the todo list because:\n1. The user provided multiple complex features to implement in a comma separated list\n2. The todo list helps organize these large features into manageable tasks\n3. This approach allows for tracking progress across the entire implementation\n</reasoning>\n</example>\n\n<example>\nUser: Can you help optimize my React application? It's rendering slowly and has performance issues.</user>\nAssistant: I'll help optimize your React application. First, let me examine your codebase to identify potential performance bottlenecks.\n*Reviews component structure, render patterns, state management, and data fetching*\nAssistant: After analyzing your codebase, I've identified several performance issues. Let me create a todo list to track our optimization efforts.\n*Creates todo list with items like: 1) Implementing memoization for expensive calculations in ProductList, 2) Adding virtualization for long lists in Dashboard, 3) Optimizing image loading in Gallery component, 4) Fixing state update loops in ShoppingCart, 5) Reviewing bundle size and implementing code splitting*\nLet's start by implementing memoization for the expensive calculations in your ProductList component.</assistant>\n\n<reasoning>\nThe assistant used the todo list because:\n1. First, the assistant examined the codebase to identify specific performance issues\n2. Based on this analysis, it identified multiple optimization opportunities\n3. Performance optimization is a non-trivial task requiring multiple steps\n4. The todo list helps methodically track improvements across different components\n5. This systematic approach ensures all performance bottlenecks are addressed\n</reasoning>\n</example>\n\n## Examples of When NOT to Use the Todo List\n\n<example>\nUser: How do I print 'Hello World' in Python?\nAssistant: In Python, you can print \"Hello World\" with this simple code:\n\npython\nprint(\"Hello World\")\n\nThis will output the text \"Hello World\" to the console when executed.</assistant>\n\n<reasoning>\nThe assistant did not use the todo list because this is a single, trivial task that can be completed in one step. There's no need to track multiple tasks or steps for such a straightforward request.\n</reasoning>\n</example>\n\n<example>\nUser: What does the git status command do?\nAssistant: The git status command shows the current state of your working directory and staging area. It displays which changes have been staged, which haven't, and which files aren't being tracked by Git. This is a fundamental command for understanding the current state of your repository before making commits.\n\n<reasoning>\nThe assistant did not use the todo list because this is an informational request with no actual coding task to complete. The user is simply asking for an explanation, not for the assistant to perform multiple steps or tasks.\n</reasoning>\n</example>\n\n<example>\nUser: Can you add a comment to the calculateTotal function to explain what it does?\nAssistant: Sure, let me add a comment to the calculateTotal function to explain what it does.\n* Uses the Edit tool to add a comment to the calculateTotal function *\n\n<reasoning>\nThe assistant did not use the todo list because this is a single, straightforward task confined to one location in the code. Adding a comment doesn't require tracking multiple steps or systematic organization.\n</reasoning>\n</example>\n\n<example>\nUser: Run npm install for me and tell me what happens.\nAssistant: I'll run the npm install command for you.\n\n*Executes: npm install*\n\nThe command completed successfully. Here's the output:\n[Output of npm install command]\n\nAll dependencies have been installed according to your package.json file.\n\n<reasoning>\nThe assistant did not use the todo list because this is a single command execution with immediate results. There are no multiple steps to track or organize, making the todo list unnecessary for this straightforward task.\n</reasoning>\n</example>\n\n## Task States and Management\n\n1. **Task States**: Use these states to track progress:\n - pending: Task not yet started\n - in_progress: Currently working on (limit to ONE task at a time)\n - completed: Task finished successfully\n\n **IMPORTANT**: Task descriptions must have two forms:\n - content: The imperative form describing what needs to be done (e.g., \"Run tests\", \"Build the project\")\n - activeForm: The present continuous form shown during execution (e.g., \"Running tests\", \"Building the project\")\n\n2. **Task Management**:\n - Update task status in real-time as you work\n - Mark tasks complete IMMEDIATELY after finishing (don't batch completions)\n - Exactly ONE task must be in_progress at any time (not less, not more)\n - Complete current tasks before starting new ones\n - Remove tasks that are no longer relevant from the list entirely\n\n3. **Task Completion Requirements**:\n - ONLY mark a task as completed when you have FULLY accomplished it\n - If you encounter errors, blockers, or cannot finish, keep the task as in_progress\n - When blocked, create a new task describing what needs to be resolved\n - Never mark a task as completed if:\n - Tests are failing\n - Implementation is partial\n - You encountered unresolved errors\n - You couldn't find necessary files or dependencies\n\n4. **Task Breakdown**:\n - Create specific, actionable items\n - Break complex tasks into smaller, manageable steps\n - Use clear, descriptive task names\n - Always provide both forms:\n - content: \"Fix authentication bug\"\n - activeForm: \"Fixing authentication bug\"\n\nWhen in doubt, use this tool. Being proactive with task management demonstrates attentiveness and ensures you complete all requirements successfully.\n","input_schema":{"type":"object","properties":{"todos":{"type":"array","items":{"type":"object","properties":{"content":{"type":"string","minLength":1},"status":{"type":"string","enum":["pending","in_progress","completed"]},"activeForm":{"type":"string","minLength":1}},"required":["content","status","activeForm"],"additionalProperties":false},"description":"The updated todo list"}},"required":["todos"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"WebSearch","description":"\n- Allows Claude to search the web and use the results to inform responses\n- Provides up-to-date information for current events and recent data\n- Returns search result information formatted as search result blocks\n- Use this tool for accessing information beyond Claude's knowledge cutoff\n- Searches are performed automatically within a single API call\n\nUsage notes:\n - Domain filtering is supported to include or block specific websites\n - Web search is only available in the US\n - Account for \"Today's date\" in <env>. For example, if <env> says \"Today's date: 2025-07-01\", and the user wants the latest docs, do not use 2024 in the search query. Use 2025.\n","input_schema":{"type":"object","properties":{"query":{"type":"string","minLength":2,"description":"The search query to use"},"allowed_domains":{"type":"array","items":{"type":"string"},"description":"Only include search results from these domains"},"blocked_domains":{"type":"array","items":{"type":"string"},"description":"Never include search results from these domains"}},"required":["query"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"BashOutput","description":"\n- Retrieves output from a running or completed background bash shell\n- Takes a shell_id parameter identifying the shell\n- Always returns only new output since the last check\n- Returns stdout and stderr output along with shell status\n- Supports optional regex filtering to show only lines matching a pattern\n- Use this tool when you need to monitor or check the output of a long-running shell\n- Shell IDs can be found using the /bashes command\n","input_schema":{"type":"object","properties":{"bash_id":{"type":"string","description":"The ID of the background shell to retrieve output from"},"filter":{"type":"string","description":"Optional regular expression to filter the output lines. Only lines matching this regex will be included in the result. Any lines that do not match will no longer be available to read."}},"required":["bash_id"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"KillShell","description":"\n- Kills a running background bash shell by its ID\n- Takes a shell_id parameter identifying the shell to kill\n- Returns a success or failure status \n- Use this tool when you need to terminate a long-running shell\n- Shell IDs can be found using the /bashes command\n","input_schema":{"type":"object","properties":{"shell_id":{"type":"string","description":"The ID of the background shell to kill"}},"required":["shell_id"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"Skill","description":"Execute a skill within the main conversation\n\n<skills_instructions>\nWhen users ask you to perform tasks, check if any of the available skills below can help complete the task more effectively. Skills provide specialized capabilities and domain knowledge.\n\nHow to use skills:\n- Invoke skills using this tool with the skill name only (no arguments)\n- When you invoke a skill, you will see <command-message>The \"{name}\" skill is loading</command-message>\n- The skill's prompt will expand and provide detailed instructions on how to complete the task\n- Examples:\n - `command: \"pdf\"` - invoke the pdf skill\n - `command: \"xlsx\"` - invoke the xlsx skill\n - `command: \"ms-office-suite:pdf\"` - invoke using fully qualified name\n\nImportant:\n- Only use skills listed in <available_skills> below\n- Do not invoke a skill that is already running\n- Do not use this tool for built-in CLI commands (like /help, /clear, etc.)\n</skills_instructions>\n\n<available_skills>\n\n</available_skills>\n","input_schema":{"type":"object","properties":{"command":{"type":"string","description":"The skill name (no arguments). E.g., \"pdf\" or \"xlsx\""}},"required":["command"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"SlashCommand","description":"Execute a slash command within the main conversation\n\n**IMPORTANT - Intent Matching:**\nBefore starting any task, CHECK if the user's request matches one of the slash commands listed below. This tool exists to route user intentions to specialized workflows.\n\nHow slash commands work:\nWhen you use this tool or when a user types a slash command, you will see <command-message>{name} is running…</command-message> followed by the expanded prompt. For example, if .claude/commands/foo.md contains \"Print today's date\", then /foo expands to that prompt in the next message.\n\nUsage:\n- `command` (required): The slash command to execute, including any arguments\n- Example: `command: \"/review-pr 123\"`\n\nIMPORTANT: Only use this tool for custom slash commands that appear in the Available Commands list below. Do NOT use for:\n- Built-in CLI commands (like /help, /clear, etc.)\n- Commands not shown in the list\n- Commands you think might exist but aren't listed\n\nNotes:\n- When a user requests multiple slash commands, execute each one sequentially and check for <command-message>{name} is running…</command-message> to verify each has been processed\n- Do not invoke a command that is already running. For example, if you see <command-message>foo is running…</command-message>, do NOT use this tool with \"/foo\" - process the expanded prompt in the following message\n- Only custom slash commands with descriptions are listed in Available Commands. If a user's command is not listed, ask them to check the slash command file and consult the docs.\n","input_schema":{"type":"object","properties":{"command":{"type":"string","description":"The slash command to execute with its arguments, e.g., \"/review-pr 123\""}},"required":["command"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}}],"metadata":{"user_id":"user_f432cd46d7f1eb89e6257e31d6af7ba2a1daa1af69a817ea1784e5c15a24edf5_account__session_b2ba6439-15a7-45d6-a81b-f30266b89346"},"max_tokens":32000,"stream":true}
src/example/claude/originalRequestBody1762501964743.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model":"claude-haiku-4-5-20251001","max_tokens":1,"messages":[{"role":"user","content":"count"}],"tools":[{"name":"Task","description":"Launch a new agent to handle complex, multi-step tasks autonomously. \n\nThe Task tool launches specialized agents (subprocesses) that autonomously handle complex tasks. Each agent type has specific capabilities and tools available to it.\n\nAvailable agent types and the tools they have access to:\n\n\nWhen using the Task tool, you must specify a subagent_type parameter to select which agent type to use.\n\nWhen NOT to use the Task tool:\n- If you want to read a specific file path, use the Read or Glob tool instead of the Task tool, to find the match more quickly\n- If you are searching for a specific class definition like \"class Foo\", use the Glob tool instead, to find the match more quickly\n- If you are searching for code within a specific file or set of 2-3 files, use the Read tool instead of the Task tool, to find the match more quickly\n- Other tasks that are not related to the agent descriptions above\n\n\nUsage notes:\n- Launch multiple agents concurrently whenever possible, to maximize performance; to do that, use a single message with multiple tool uses\n- When the agent is done, it will return a single message back to you. The result returned by the agent is not visible to the user. To show the user the result, you should send a text message back to the user with a concise summary of the result.\n- Each agent invocation is stateless. You will not be able to send additional messages to the agent, nor will the agent be able to communicate with you outside of its final report. Therefore, your prompt should contain a highly detailed task description for the agent to perform autonomously and you should specify exactly what information the agent should return back to you in its final and only message to you.\n- Agents with \"access to current context\" can see the full conversation history before the tool call. When using these agents, you can write concise prompts that reference earlier context (e.g., \"investigate the error discussed above\") instead of repeating information. The agent will receive all prior messages and understand the context.\n- The agent's outputs should generally be trusted\n- Clearly tell the agent whether you expect it to write code or just to do research (search, file reads, web fetches, etc.), since it is not aware of the user's intent\n- If the agent description mentions that it should be used proactively, then you should try your best to use it without the user having to ask for it first. Use your judgement.\n- If the user specifies that they want you to run agents \"in parallel\", you MUST send a single message with multiple Task tool use content blocks. For example, if you need to launch both a code-reviewer agent and a test-runner agent in parallel, send a single message with both tool calls.\n\nExample usage:\n\n<example_agent_descriptions>\n\"code-reviewer\": use this agent after you are done writing a signficant piece of code\n\"greeting-responder\": use this agent when to respond to user greetings with a friendly joke\n</example_agent_description>\n\n<example>\nuser: \"Please write a function that checks if a number is prime\"\nassistant: Sure let me write a function that checks if a number is prime\nassistant: First let me use the Write tool to write a function that checks if a number is prime\nassistant: I'm going to use the Write tool to write the following code:\n<code>\nfunction isPrime(n) {\n if (n <= 1) return false\n for (let i = 2; i * i <= n; i++) {\n if (n % i === 0) return false\n }\n return true\n}\n</code>\n<commentary>\nSince a signficant piece of code was written and the task was completed, now use the code-reviewer agent to review the code\n</commentary>\nassistant: Now let me use the code-reviewer agent to review the code\nassistant: Uses the Task tool to launch the code-reviewer agent \n</example>\n\n<example>\nuser: \"Hello\"\n<commentary>\nSince the user is greeting, use the greeting-responder agent to respond with a friendly joke\n</commentary>\nassistant: \"I'm going to use the Task tool to launch the greeting-responder agent\"\n</example>\n","input_schema":{"type":"object","properties":{"description":{"type":"string","description":"A short (3-5 word) description of the task"},"prompt":{"type":"string","description":"The task for the agent to perform"},"subagent_type":{"type":"string","description":"The type of specialized agent to use for this task"},"model":{"type":"string","enum":["sonnet","opus","haiku"],"description":"Optional model to use for this agent. If not specified, inherits from parent. Prefer haiku for quick, straightforward tasks to minimize cost and latency."},"resume":{"type":"string","description":"Optional agent ID to resume from. If provided, the agent will continue from the previous execution transcript."}},"required":["description","prompt","subagent_type"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"Bash","description":"Executes a given bash command in a persistent shell session with optional timeout, ensuring proper handling and security measures.\n\nIMPORTANT: This tool is for terminal operations like git, npm, docker, etc. DO NOT use it for file operations (reading, writing, editing, searching, finding files) - use the specialized tools for this instead.\n\nBefore executing the command, please follow these steps:\n\n1. Directory Verification:\n - If the command will create new directories or files, first use `ls` to verify the parent directory exists and is the correct location\n - For example, before running \"mkdir foo/bar\", first use `ls foo` to check that \"foo\" exists and is the intended parent directory\n\n2. Command Execution:\n - Always quote file paths that contain spaces with double quotes (e.g., cd \"path with spaces/file.txt\")\n - Examples of proper quoting:\n - cd \"/Users/name/My Documents\" (correct)\n - cd /Users/name/My Documents (incorrect - will fail)\n - python \"/path/with spaces/script.py\" (correct)\n - python /path/with spaces/script.py (incorrect - will fail)\n - After ensuring proper quoting, execute the command.\n - Capture the output of the command.\n\nUsage notes:\n - The command argument is required.\n - You can specify an optional timeout in milliseconds (up to 600000ms / 10 minutes). If not specified, commands will timeout after 120000ms (2 minutes).\n - It is very helpful if you write a clear, concise description of what this command does in 5-10 words.\n - If the output exceeds 30000 characters, output will be truncated before being returned to you.\n - You can use the `run_in_background` parameter to run the command in the background, which allows you to continue working while the command runs. You can monitor the output using the Bash tool as it becomes available. You do not need to use '&' at the end of the command when using this parameter.\n \n - Avoid using Bash with the `find`, `grep`, `cat`, `head`, `tail`, `sed`, `awk`, or `echo` commands, unless explicitly instructed or when these commands are truly necessary for the task. Instead, always prefer using the dedicated tools for these commands:\n - File search: Use Glob (NOT find or ls)\n - Content search: Use Grep (NOT grep or rg)\n - Read files: Use Read (NOT cat/head/tail)\n - Edit files: Use Edit (NOT sed/awk)\n - Write files: Use Write (NOT echo >/cat <<EOF)\n - Communication: Output text directly (NOT echo/printf)\n - When issuing multiple commands:\n - If the commands are independent and can run in parallel, make multiple Bash tool calls in a single message. For example, if you need to run \"git status\" and \"git diff\", send a single message with two Bash tool calls in parallel.\n - If the commands depend on each other and must run sequentially, use a single Bash call with '&&' to chain them together (e.g., `git add . && git commit -m \"message\" && git push`). For instance, if one operation must complete before another starts (like mkdir before cp, Write before Bash for git operations, or git add before git commit), run these operations sequentially instead.\n - Use ';' only when you need to run commands sequentially but don't care if earlier commands fail\n - DO NOT use newlines to separate commands (newlines are ok in quoted strings)\n - Try to maintain your current working directory throughout the session by using absolute paths and avoiding usage of `cd`. You may use `cd` if the User explicitly requests it.\n <good-example>\n pytest /foo/bar/tests\n </good-example>\n <bad-example>\n cd /foo/bar && pytest tests\n </bad-example>\n\n# Committing changes with git\n\nOnly create commits when requested by the user. If unclear, ask first. When the user asks you to create a new git commit, follow these steps carefully:\n\nGit Safety Protocol:\n- NEVER update the git config\n- NEVER run destructive/irreversible git commands (like push --force, hard reset, etc) unless the user explicitly requests them \n- NEVER skip hooks (--no-verify, --no-gpg-sign, etc) unless the user explicitly requests it\n- NEVER run force push to main/master, warn the user if they request it\n- Avoid git commit --amend. ONLY use --amend when either (1) user explicitly requested amend OR (2) adding edits from pre-commit hook (additional instructions below) \n- Before amending: ALWAYS check authorship (git log -1 --format='%an %ae')\n- NEVER commit changes unless the user explicitly asks you to. It is VERY IMPORTANT to only commit when explicitly asked, otherwise the user will feel that you are being too proactive.\n\n1. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following bash commands in parallel, each using the Bash tool:\n - Run a git status command to see all untracked files.\n - Run a git diff command to see both staged and unstaged changes that will be committed.\n - Run a git log command to see recent commit messages, so that you can follow this repository's commit message style.\n2. Analyze all staged changes (both previously staged and newly added) and draft a commit message:\n - Summarize the nature of the changes (eg. new feature, enhancement to an existing feature, bug fix, refactoring, test, docs, etc.). Ensure the message accurately reflects the changes and their purpose (i.e. \"add\" means a wholly new feature, \"update\" means an enhancement to an existing feature, \"fix\" means a bug fix, etc.).\n - Do not commit files that likely contain secrets (.env, credentials.json, etc). Warn the user if they specifically request to commit those files\n - Draft a concise (1-2 sentences) commit message that focuses on the \"why\" rather than the \"what\"\n - Ensure it accurately reflects the changes and their purpose\n3. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following commands:\n - Add relevant untracked files to the staging area.\n - Create the commit with a message ending with:\n 🤖 Generated with [Claude Code](https://claude.com/claude-code)\n\n Co-Authored-By: Claude <noreply@anthropic.com>\n - Run git status after the commit completes to verify success.\n Note: git status depends on the commit completing, so run it sequentially after the commit.\n4. If the commit fails due to pre-commit hook changes, retry ONCE. If it succeeds but files were modified by the hook, verify it's safe to amend:\n - Check authorship: git log -1 --format='%an %ae'\n - Check not pushed: git status shows \"Your branch is ahead\"\n - If both true: amend your commit. Otherwise: create NEW commit (never amend other developers' commits)\n\nImportant notes:\n- NEVER run additional commands to read or explore code, besides git bash commands\n- NEVER use the TodoWrite or Task tools\n- DO NOT push to the remote repository unless the user explicitly asks you to do so\n- IMPORTANT: Never use git commands with the -i flag (like git rebase -i or git add -i) since they require interactive input which is not supported.\n- If there are no changes to commit (i.e., no untracked files and no modifications), do not create an empty commit\n- In order to ensure good formatting, ALWAYS pass the commit message via a HEREDOC, a la this example:\n<example>\ngit commit -m \"$(cat <<'EOF'\n Commit message here.\n\n 🤖 Generated with [Claude Code](https://claude.com/claude-code)\n\n Co-Authored-By: Claude <noreply@anthropic.com>\n EOF\n )\"\n</example>\n\n# Creating pull requests\nUse the gh command via the Bash tool for ALL GitHub-related tasks including working with issues, pull requests, checks, and releases. If given a Github URL use the gh command to get the information needed.\n\nIMPORTANT: When the user asks you to create a pull request, follow these steps carefully:\n\n1. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following bash commands in parallel using the Bash tool, in order to understand the current state of the branch since it diverged from the main branch:\n - Run a git status command to see all untracked files\n - Run a git diff command to see both staged and unstaged changes that will be committed\n - Check if the current branch tracks a remote branch and is up to date with the remote, so you know if you need to push to the remote\n - Run a git log command and `git diff [base-branch]...HEAD` to understand the full commit history for the current branch (from the time it diverged from the base branch)\n2. Analyze all changes that will be included in the pull request, making sure to look at all relevant commits (NOT just the latest commit, but ALL commits that will be included in the pull request!!!), and draft a pull request summary\n3. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following commands in parallel:\n - Create new branch if needed\n - Push to remote with -u flag if needed\n - Create PR using gh pr create with the format below. Use a HEREDOC to pass the body to ensure correct formatting.\n<example>\ngh pr create --title \"the pr title\" --body \"$(cat <<'EOF'\n## Summary\n<1-3 bullet points>\n\n## Test plan\n[Bulleted markdown checklist of TODOs for testing the pull request...]\n\n🤖 Generated with [Claude Code](https://claude.com/claude-code)\nEOF\n)\"\n</example>\n\nImportant:\n- DO NOT use the TodoWrite or Task tools\n- Return the PR URL when you're done, so the user can see it\n\n# Other common operations\n- View comments on a Github PR: gh api repos/foo/bar/pulls/123/comments","input_schema":{"type":"object","properties":{"command":{"type":"string","description":"The command to execute"},"timeout":{"type":"number","description":"Optional timeout in milliseconds (max 600000)"},"description":{"type":"string","description":"Clear, concise description of what this command does in 5-10 words, in active voice. Examples:\nInput: ls\nOutput: List files in current directory\n\nInput: git status\nOutput: Show working tree status\n\nInput: npm install\nOutput: Install package dependencies\n\nInput: mkdir foo\nOutput: Create directory 'foo'"},"run_in_background":{"type":"boolean","description":"Set to true to run this command in the background. Use BashOutput to read the output later."},"dangerouslyDisableSandbox":{"type":"boolean","description":"Set this to true to dangerously override sandbox mode and run commands without sandboxing."}},"required":["command"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"Glob","description":"- Fast file pattern matching tool that works with any codebase size\n- Supports glob patterns like \"**/*.js\" or \"src/**/*.ts\"\n- Returns matching file paths sorted by modification time\n- Use this tool when you need to find files by name patterns\n- When you are doing an open ended search that may require multiple rounds of globbing and grepping, use the Agent tool instead\n- You can call multiple tools in a single response. It is always better to speculatively perform multiple searches in parallel if they are potentially useful.","input_schema":{"type":"object","properties":{"pattern":{"type":"string","description":"The glob pattern to match files against"},"path":{"type":"string","description":"The directory to search in. If not specified, the current working directory will be used. IMPORTANT: Omit this field to use the default directory. DO NOT enter \"undefined\" or \"null\" - simply omit it for the default behavior. Must be a valid directory path if provided."}},"required":["pattern"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"Grep","description":"A powerful search tool built on ripgrep\n\n Usage:\n - ALWAYS use Grep for search tasks. NEVER invoke `grep` or `rg` as a Bash command. The Grep tool has been optimized for correct permissions and access.\n - Supports full regex syntax (e.g., \"log.*Error\", \"function\\s+\\w+\")\n - Filter files with glob parameter (e.g., \"*.js\", \"**/*.tsx\") or type parameter (e.g., \"js\", \"py\", \"rust\")\n - Output modes: \"content\" shows matching lines, \"files_with_matches\" shows only file paths (default), \"count\" shows match counts\n - Use Task tool for open-ended searches requiring multiple rounds\n - Pattern syntax: Uses ripgrep (not grep) - literal braces need escaping (use `interface\\{\\}` to find `interface{}` in Go code)\n - Multiline matching: By default patterns match within single lines only. For cross-line patterns like `struct \\{[\\s\\S]*?field`, use `multiline: true`\n","input_schema":{"type":"object","properties":{"pattern":{"type":"string","description":"The regular expression pattern to search for in file contents"},"path":{"type":"string","description":"File or directory to search in (rg PATH). Defaults to current working directory."},"glob":{"type":"string","description":"Glob pattern to filter files (e.g. \"*.js\", \"*.{ts,tsx}\") - maps to rg --glob"},"output_mode":{"type":"string","enum":["content","files_with_matches","count"],"description":"Output mode: \"content\" shows matching lines (supports -A/-B/-C context, -n line numbers, head_limit), \"files_with_matches\" shows file paths (supports head_limit), \"count\" shows match counts (supports head_limit). Defaults to \"files_with_matches\"."},"-B":{"type":"number","description":"Number of lines to show before each match (rg -B). Requires output_mode: \"content\", ignored otherwise."},"-A":{"type":"number","description":"Number of lines to show after each match (rg -A). Requires output_mode: \"content\", ignored otherwise."},"-C":{"type":"number","description":"Number of lines to show before and after each match (rg -C). Requires output_mode: \"content\", ignored otherwise."},"-n":{"type":"boolean","description":"Show line numbers in output (rg -n). Requires output_mode: \"content\", ignored otherwise. Defaults to true."},"-i":{"type":"boolean","description":"Case insensitive search (rg -i)"},"type":{"type":"string","description":"File type to search (rg --type). Common types: js, py, rust, go, java, etc. More efficient than include for standard file types."},"head_limit":{"type":"number","description":"Limit output to first N lines/entries, equivalent to \"| head -N\". Works across all output modes: content (limits output lines), files_with_matches (limits file paths), count (limits count entries). Defaults based on \"cap\" experiment value: 0 (unlimited), 20, or 100."},"offset":{"type":"number","description":"Skip first N lines/entries before applying head_limit, equivalent to \"| tail -n +N | head -N\". Works across all output modes. Defaults to 0."},"multiline":{"type":"boolean","description":"Enable multiline mode where . matches newlines and patterns can span lines (rg -U --multiline-dotall). Default: false."}},"required":["pattern"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"ExitPlanMode","description":"Use this tool when you are in plan mode and have finished presenting your plan and are ready to code. This will prompt the user to exit plan mode.\nIMPORTANT: Only use this tool when the task requires planning the implementation steps of a task that requires writing code. For research tasks where you're gathering information, searching files, reading files or in general trying to understand the codebase - do NOT use this tool.\n\n## Handling Ambiguity in Plans\nBefore using this tool, ensure your plan is clear and unambiguous. If there are multiple valid approaches or unclear requirements:\n1. Use the AskUserQuestion tool to clarify with the user\n2. Ask about specific implementation choices (e.g., architectural patterns, which library to use)\n3. Clarify any assumptions that could affect the implementation\n4. Only proceed with ExitPlanMode after resolving ambiguities\n\n## Examples\n\n1. Initial task: \"Search for and understand the implementation of vim mode in the codebase\" - Do not use the exit plan mode tool because you are not planning the implementation steps of a task.\n2. Initial task: \"Help me implement yank mode for vim\" - Use the exit plan mode tool after you have finished planning the implementation steps of the task.\n3. Initial task: \"Add a new feature to handle user authentication\" - If unsure about auth method (OAuth, JWT, etc.), use AskUserQuestion first, then use exit plan mode tool after clarifying the approach.\n","input_schema":{"type":"object","properties":{"plan":{"type":"string","description":"The plan you came up with, that you want to run by the user for approval. Supports markdown. The plan should be pretty concise."}},"required":["plan"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"Read","description":"Reads a file from the local filesystem. You can access any file directly by using this tool.\nAssume this tool is able to read all files on the machine. If the User provides a path to a file assume that path is valid. It is okay to read a file that does not exist; an error will be returned.\n\nUsage:\n- The file_path parameter must be an absolute path, not a relative path\n- By default, it reads up to 2000 lines starting from the beginning of the file\n- You can optionally specify a line offset and limit (especially handy for long files), but it's recommended to read the whole file by not providing these parameters\n- Any lines longer than 2000 characters will be truncated\n- Results are returned using cat -n format, with line numbers starting at 1\n- This tool allows Claude Code to read images (eg PNG, JPG, etc). When reading an image file the contents are presented visually as Claude Code is a multimodal LLM.\n- This tool can read PDF files (.pdf). PDFs are processed page by page, extracting both text and visual content for analysis.\n- This tool can read Jupyter notebooks (.ipynb files) and returns all cells with their outputs, combining code, text, and visualizations.\n- This tool can only read files, not directories. To read a directory, use an ls command via the Bash tool.\n- You can call multiple tools in a single response. It is always better to speculatively read multiple potentially useful files in parallel.\n- You will regularly be asked to read screenshots. If the user provides a path to a screenshot, ALWAYS use this tool to view the file at the path. This tool will work with all temporary file paths.\n- If you read a file that exists but has empty contents you will receive a system reminder warning in place of file contents.","input_schema":{"type":"object","properties":{"file_path":{"type":"string","description":"The absolute path to the file to read"},"offset":{"type":"number","description":"The line number to start reading from. Only provide if the file is too large to read at once"},"limit":{"type":"number","description":"The number of lines to read. Only provide if the file is too large to read at once."}},"required":["file_path"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"Edit","description":"Performs exact string replacements in files. \n\nUsage:\n- You must use your `Read` tool at least once in the conversation before editing. This tool will error if you attempt an edit without reading the file. \n- When editing text from Read tool output, ensure you preserve the exact indentation (tabs/spaces) as it appears AFTER the line number prefix. The line number prefix format is: spaces + line number + tab. Everything after that tab is the actual file content to match. Never include any part of the line number prefix in the old_string or new_string.\n- ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required.\n- Only use emojis if the user explicitly requests it. Avoid adding emojis to files unless asked.\n- The edit will FAIL if `old_string` is not unique in the file. Either provide a larger string with more surrounding context to make it unique or use `replace_all` to change every instance of `old_string`. \n- Use `replace_all` for replacing and renaming strings across the file. This parameter is useful if you want to rename a variable for instance.","input_schema":{"type":"object","properties":{"file_path":{"type":"string","description":"The absolute path to the file to modify"},"old_string":{"type":"string","description":"The text to replace"},"new_string":{"type":"string","description":"The text to replace it with (must be different from old_string)"},"replace_all":{"type":"boolean","default":false,"description":"Replace all occurences of old_string (default false)"}},"required":["file_path","old_string","new_string"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"Write","description":"Writes a file to the local filesystem.\n\nUsage:\n- This tool will overwrite the existing file if there is one at the provided path.\n- If this is an existing file, you MUST use the Read tool first to read the file's contents. This tool will fail if you did not read the file first.\n- ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required.\n- NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly requested by the User.\n- Only use emojis if the user explicitly requests it. Avoid writing emojis to files unless asked.","input_schema":{"type":"object","properties":{"file_path":{"type":"string","description":"The absolute path to the file to write (must be absolute, not relative)"},"content":{"type":"string","description":"The content to write to the file"}},"required":["file_path","content"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"NotebookEdit","description":"Completely replaces the contents of a specific cell in a Jupyter notebook (.ipynb file) with new source. Jupyter notebooks are interactive documents that combine code, text, and visualizations, commonly used for data analysis and scientific computing. The notebook_path parameter must be an absolute path, not a relative path. The cell_number is 0-indexed. Use edit_mode=insert to add a new cell at the index specified by cell_number. Use edit_mode=delete to delete the cell at the index specified by cell_number.","input_schema":{"type":"object","properties":{"notebook_path":{"type":"string","description":"The absolute path to the Jupyter notebook file to edit (must be absolute, not relative)"},"cell_id":{"type":"string","description":"The ID of the cell to edit. When inserting a new cell, the new cell will be inserted after the cell with this ID, or at the beginning if not specified."},"new_source":{"type":"string","description":"The new source for the cell"},"cell_type":{"type":"string","enum":["code","markdown"],"description":"The type of the cell (code or markdown). If not specified, it defaults to the current cell type. If using edit_mode=insert, this is required."},"edit_mode":{"type":"string","enum":["replace","insert","delete"],"description":"The type of edit to make (replace, insert, delete). Defaults to replace."}},"required":["notebook_path","new_source"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"WebFetch","description":"\n- Fetches content from a specified URL and processes it using an AI model\n- Takes a URL and a prompt as input\n- Fetches the URL content, converts HTML to markdown\n- Processes the content with the prompt using a small, fast model\n- Returns the model's response about the content\n- Use this tool when you need to retrieve and analyze web content\n\nUsage notes:\n - IMPORTANT: If an MCP-provided web fetch tool is available, prefer using that tool instead of this one, as it may have fewer restrictions. All MCP-provided tools start with \"mcp__\".\n - The URL must be a fully-formed valid URL\n - HTTP URLs will be automatically upgraded to HTTPS\n - The prompt should describe what information you want to extract from the page\n - This tool is read-only and does not modify any files\n - Results may be summarized if the content is very large\n - Includes a self-cleaning 15-minute cache for faster responses when repeatedly accessing the same URL\n - When a URL redirects to a different host, the tool will inform you and provide the redirect URL in a special format. You should then make a new WebFetch request with the redirect URL to fetch the content.\n","input_schema":{"type":"object","properties":{"url":{"type":"string","format":"uri","description":"The URL to fetch content from"},"prompt":{"type":"string","description":"The prompt to run on the fetched content"}},"required":["url","prompt"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"TodoWrite","description":"Use this tool to create and manage a structured task list for your current coding session. This helps you track progress, organize complex tasks, and demonstrate thoroughness to the user.\nIt also helps the user understand the progress of the task and overall progress of their requests.\n\n## When to Use This Tool\nUse this tool proactively in these scenarios:\n\n1. Complex multi-step tasks - When a task requires 3 or more distinct steps or actions\n2. Non-trivial and complex tasks - Tasks that require careful planning or multiple operations\n3. User explicitly requests todo list - When the user directly asks you to use the todo list\n4. User provides multiple tasks - When users provide a list of things to be done (numbered or comma-separated)\n5. After receiving new instructions - Immediately capture user requirements as todos\n6. When you start working on a task - Mark it as in_progress BEFORE beginning work. Ideally you should only have one todo as in_progress at a time\n7. After completing a task - Mark it as completed and add any new follow-up tasks discovered during implementation\n\n## When NOT to Use This Tool\n\nSkip using this tool when:\n1. There is only a single, straightforward task\n2. The task is trivial and tracking it provides no organizational benefit\n3. The task can be completed in less than 3 trivial steps\n4. The task is purely conversational or informational\n\nNOTE that you should not use this tool if there is only one trivial task to do. In this case you are better off just doing the task directly.\n\n## Examples of When to Use the Todo List\n\n<example>\nUser: I want to add a dark mode toggle to the application settings. Make sure you run the tests and build when you're done!\nAssistant: I'll help add a dark mode toggle to your application settings. Let me create a todo list to track this implementation.\n*Creates todo list with the following items:*\n1. Creating dark mode toggle component in Settings page\n2. Adding dark mode state management (context/store)\n3. Implementing CSS-in-JS styles for dark theme\n4. Updating existing components to support theme switching\n5. Running tests and build process, addressing any failures or errors that occur\n*Begins working on the first task*\n\n<reasoning>\nThe assistant used the todo list because:\n1. Adding dark mode is a multi-step feature requiring UI, state management, and styling changes\n2. The user explicitly requested tests and build be run afterward\n3. The assistant inferred that tests and build need to pass by adding \"Ensure tests and build succeed\" as the final task\n</reasoning>\n</example>\n\n<example>\nUser: Help me rename the function getCwd to getCurrentWorkingDirectory across my project\nAssistant: Let me first search through your codebase to find all occurrences of 'getCwd'.\n*Uses grep or search tools to locate all instances of getCwd in the codebase*\nAssistant: I've found 15 instances of 'getCwd' across 8 different files. Let me create a todo list to track these changes.\n*Creates todo list with specific items for each file that needs updating*\n\n<reasoning>\nThe assistant used the todo list because:\n1. First, the assistant searched to understand the scope of the task\n2. Upon finding multiple occurrences across different files, it determined this was a complex task with multiple steps\n3. The todo list helps ensure every instance is tracked and updated systematically\n4. This approach prevents missing any occurrences and maintains code consistency\n</reasoning>\n</example>\n\n\n<example>\nUser: I need to implement these features for my e-commerce site: user registration, product catalog, shopping cart, and checkout flow.\nAssistant: I'll help implement these features. First, let's add all the features to the todo list.\n*Creates a todo list breaking down each feature into specific tasks based on the project architecture*\nAssistant: Let's start with user registration. This will involve creating database models, API endpoints, and frontend forms that integrate with your existing authentication system.\n\n<reasoning>\nThe assistant used the todo list because:\n1. The user provided multiple complex features to implement in a comma separated list\n2. The todo list helps organize these large features into manageable tasks\n3. This approach allows for tracking progress across the entire implementation\n</reasoning>\n</example>\n\n<example>\nUser: Can you help optimize my React application? It's rendering slowly and has performance issues.</user>\nAssistant: I'll help optimize your React application. First, let me examine your codebase to identify potential performance bottlenecks.\n*Reviews component structure, render patterns, state management, and data fetching*\nAssistant: After analyzing your codebase, I've identified several performance issues. Let me create a todo list to track our optimization efforts.\n*Creates todo list with items like: 1) Implementing memoization for expensive calculations in ProductList, 2) Adding virtualization for long lists in Dashboard, 3) Optimizing image loading in Gallery component, 4) Fixing state update loops in ShoppingCart, 5) Reviewing bundle size and implementing code splitting*\nLet's start by implementing memoization for the expensive calculations in your ProductList component.</assistant>\n\n<reasoning>\nThe assistant used the todo list because:\n1. First, the assistant examined the codebase to identify specific performance issues\n2. Based on this analysis, it identified multiple optimization opportunities\n3. Performance optimization is a non-trivial task requiring multiple steps\n4. The todo list helps methodically track improvements across different components\n5. This systematic approach ensures all performance bottlenecks are addressed\n</reasoning>\n</example>\n\n## Examples of When NOT to Use the Todo List\n\n<example>\nUser: How do I print 'Hello World' in Python?\nAssistant: In Python, you can print \"Hello World\" with this simple code:\n\npython\nprint(\"Hello World\")\n\nThis will output the text \"Hello World\" to the console when executed.</assistant>\n\n<reasoning>\nThe assistant did not use the todo list because this is a single, trivial task that can be completed in one step. There's no need to track multiple tasks or steps for such a straightforward request.\n</reasoning>\n</example>\n\n<example>\nUser: What does the git status command do?\nAssistant: The git status command shows the current state of your working directory and staging area. It displays which changes have been staged, which haven't, and which files aren't being tracked by Git. This is a fundamental command for understanding the current state of your repository before making commits.\n\n<reasoning>\nThe assistant did not use the todo list because this is an informational request with no actual coding task to complete. The user is simply asking for an explanation, not for the assistant to perform multiple steps or tasks.\n</reasoning>\n</example>\n\n<example>\nUser: Can you add a comment to the calculateTotal function to explain what it does?\nAssistant: Sure, let me add a comment to the calculateTotal function to explain what it does.\n* Uses the Edit tool to add a comment to the calculateTotal function *\n\n<reasoning>\nThe assistant did not use the todo list because this is a single, straightforward task confined to one location in the code. Adding a comment doesn't require tracking multiple steps or systematic organization.\n</reasoning>\n</example>\n\n<example>\nUser: Run npm install for me and tell me what happens.\nAssistant: I'll run the npm install command for you.\n\n*Executes: npm install*\n\nThe command completed successfully. Here's the output:\n[Output of npm install command]\n\nAll dependencies have been installed according to your package.json file.\n\n<reasoning>\nThe assistant did not use the todo list because this is a single command execution with immediate results. There are no multiple steps to track or organize, making the todo list unnecessary for this straightforward task.\n</reasoning>\n</example>\n\n## Task States and Management\n\n1. **Task States**: Use these states to track progress:\n - pending: Task not yet started\n - in_progress: Currently working on (limit to ONE task at a time)\n - completed: Task finished successfully\n\n **IMPORTANT**: Task descriptions must have two forms:\n - content: The imperative form describing what needs to be done (e.g., \"Run tests\", \"Build the project\")\n - activeForm: The present continuous form shown during execution (e.g., \"Running tests\", \"Building the project\")\n\n2. **Task Management**:\n - Update task status in real-time as you work\n - Mark tasks complete IMMEDIATELY after finishing (don't batch completions)\n - Exactly ONE task must be in_progress at any time (not less, not more)\n - Complete current tasks before starting new ones\n - Remove tasks that are no longer relevant from the list entirely\n\n3. **Task Completion Requirements**:\n - ONLY mark a task as completed when you have FULLY accomplished it\n - If you encounter errors, blockers, or cannot finish, keep the task as in_progress\n - When blocked, create a new task describing what needs to be resolved\n - Never mark a task as completed if:\n - Tests are failing\n - Implementation is partial\n - You encountered unresolved errors\n - You couldn't find necessary files or dependencies\n\n4. **Task Breakdown**:\n - Create specific, actionable items\n - Break complex tasks into smaller, manageable steps\n - Use clear, descriptive task names\n - Always provide both forms:\n - content: \"Fix authentication bug\"\n - activeForm: \"Fixing authentication bug\"\n\nWhen in doubt, use this tool. Being proactive with task management demonstrates attentiveness and ensures you complete all requirements successfully.\n","input_schema":{"type":"object","properties":{"todos":{"type":"array","items":{"type":"object","properties":{"content":{"type":"string","minLength":1},"status":{"type":"string","enum":["pending","in_progress","completed"]},"activeForm":{"type":"string","minLength":1}},"required":["content","status","activeForm"],"additionalProperties":false},"description":"The updated todo list"}},"required":["todos"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"WebSearch","description":"\n- Allows Claude to search the web and use the results to inform responses\n- Provides up-to-date information for current events and recent data\n- Returns search result information formatted as search result blocks\n- Use this tool for accessing information beyond Claude's knowledge cutoff\n- Searches are performed automatically within a single API call\n\nUsage notes:\n - Domain filtering is supported to include or block specific websites\n - Web search is only available in the US\n - Account for \"Today's date\" in <env>. For example, if <env> says \"Today's date: 2025-07-01\", and the user wants the latest docs, do not use 2024 in the search query. Use 2025.\n","input_schema":{"type":"object","properties":{"query":{"type":"string","minLength":2,"description":"The search query to use"},"allowed_domains":{"type":"array","items":{"type":"string"},"description":"Only include search results from these domains"},"blocked_domains":{"type":"array","items":{"type":"string"},"description":"Never include search results from these domains"}},"required":["query"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"BashOutput","description":"\n- Retrieves output from a running or completed background bash shell\n- Takes a shell_id parameter identifying the shell\n- Always returns only new output since the last check\n- Returns stdout and stderr output along with shell status\n- Supports optional regex filtering to show only lines matching a pattern\n- Use this tool when you need to monitor or check the output of a long-running shell\n- Shell IDs can be found using the /bashes command\n","input_schema":{"type":"object","properties":{"bash_id":{"type":"string","description":"The ID of the background shell to retrieve output from"},"filter":{"type":"string","description":"Optional regular expression to filter the output lines. Only lines matching this regex will be included in the result. Any lines that do not match will no longer be available to read."}},"required":["bash_id"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"KillShell","description":"\n- Kills a running background bash shell by its ID\n- Takes a shell_id parameter identifying the shell to kill\n- Returns a success or failure status \n- Use this tool when you need to terminate a long-running shell\n- Shell IDs can be found using the /bashes command\n","input_schema":{"type":"object","properties":{"shell_id":{"type":"string","description":"The ID of the background shell to kill"}},"required":["shell_id"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"Skill","description":"Execute a skill within the main conversation\n\n<skills_instructions>\nWhen users ask you to perform tasks, check if any of the available skills below can help complete the task more effectively. Skills provide specialized capabilities and domain knowledge.\n\nHow to use skills:\n- Invoke skills using this tool with the skill name only (no arguments)\n- When you invoke a skill, you will see <command-message>The \"{name}\" skill is loading</command-message>\n- The skill's prompt will expand and provide detailed instructions on how to complete the task\n- Examples:\n - `command: \"pdf\"` - invoke the pdf skill\n - `command: \"xlsx\"` - invoke the xlsx skill\n - `command: \"ms-office-suite:pdf\"` - invoke using fully qualified name\n\nImportant:\n- Only use skills listed in <available_skills> below\n- Do not invoke a skill that is already running\n- Do not use this tool for built-in CLI commands (like /help, /clear, etc.)\n</skills_instructions>\n\n<available_skills>\n\n</available_skills>\n","input_schema":{"type":"object","properties":{"command":{"type":"string","description":"The skill name (no arguments). E.g., \"pdf\" or \"xlsx\""}},"required":["command"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"SlashCommand","description":"Execute a slash command within the main conversation\n\n**IMPORTANT - Intent Matching:**\nBefore starting any task, CHECK if the user's request matches one of the slash commands listed below. This tool exists to route user intentions to specialized workflows.\n\nHow slash commands work:\nWhen you use this tool or when a user types a slash command, you will see <command-message>{name} is running…</command-message> followed by the expanded prompt. For example, if .claude/commands/foo.md contains \"Print today's date\", then /foo expands to that prompt in the next message.\n\nUsage:\n- `command` (required): The slash command to execute, including any arguments\n- Example: `command: \"/review-pr 123\"`\n\nIMPORTANT: Only use this tool for custom slash commands that appear in the Available Commands list below. Do NOT use for:\n- Built-in CLI commands (like /help, /clear, etc.)\n- Commands not shown in the list\n- Commands you think might exist but aren't listed\n\nNotes:\n- When a user requests multiple slash commands, execute each one sequentially and check for <command-message>{name} is running…</command-message> to verify each has been processed\n- Do not invoke a command that is already running. For example, if you see <command-message>foo is running…</command-message>, do NOT use this tool with \"/foo\" - process the expanded prompt in the following message\n- Only custom slash commands with descriptions are listed in Available Commands. If a user's command is not listed, ask them to check the slash command file and consult the docs.\n","input_schema":{"type":"object","properties":{"command":{"type":"string","description":"The slash command to execute with its arguments, e.g., \"/review-pr 123\""}},"required":["command"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}}],"metadata":{"user_id":"user_f432cd46d7f1eb89e6257e31d6af7ba2a1daa1af69a817ea1784e5c15a24edf5_account__session_ed2d8ac7-3aba-4db0-a755-ee9ce121461e"}}