ZhaoShanGeng commited on
Commit
0937160
·
1 Parent(s): 873f10f

feat: 错误信息返回客户端、自动重试、工具调用格式适配、思考模型多轮工具调用

Browse files

文件改动:

src/server/index.js:
- 新增 with429Retry() 通用重试工具函数,处理 429 状态码自动重试
- 新增 buildOpenAIErrorPayload() 构造 OpenAI 兼容错误响应
- 错误信息直接返回客户端:HTTP 状态码 + { error: { message, type, code } }
- 流式/非流式请求均使用 with429Retry 包装,支持配置重试次数
- 移除旧的错误处理逻辑,改为标准 OpenAI 错误格式

src/utils/utils.js:
- 新增 sanitizeToolName() 规范工具名称为 Vertex 要求格式 ^[a-zA-Z0-9_-]{1,128}$
- 新增 DEFAULT_THOUGHT_SIGNATURE 思维链签名占位常量
- handleAssistantMessage() 新增 enableThinking 参数
- 思考模型多轮工具调用支持:历史 assistant 消息补充思考块 + 签名块
- openaiMessageToAntigravity() 传递 enableThinking 参数
- convertOpenAIToolsToAntigravity() 使用 sanitizeToolName 规范工具名

src/api/client.js:
- 新增 createApiError() 创建带状态码的 API 错误对象
- handleErrorResponse() 使用 createApiError 抛出错误,保留原始状态码和响应体

src/config/config.js:
- 新增 retryTimes 配置项,默认 3 次重试

config.json:
- other 新增 retryTimes: 3 配置

public/index.html:
- 新增 429重试次数 配置输入框

public/app.js:
- loadConfig() 加载 retryTimes 配置
- 表单提交时保存 retryTimes 配置

API.md:
- 新增 429 自动重试配置说明文档

API.md CHANGED
@@ -259,11 +259,28 @@ curl http://localhost:8045/v1/chat/completions \
259
  -d '{
260
  "model": "gemini-2.5-pro",
261
  "messages": [{"role": "user", "content": "证明勾股定理"}],
262
- "stream": true,
263
  "thinking_budget": 24000
264
  }'
265
  ```
266
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
  ### 思维链响应格式
268
 
269
  思维链内容通过 `reasoning_content` 字段输出(兼容 DeepSeek 格式):
 
259
  -d '{
260
  "model": "gemini-2.5-pro",
261
  "messages": [{"role": "user", "content": "证明勾股定理"}],
262
+ "stream": true,
263
  "thinking_budget": 24000
264
  }'
265
  ```
266
 
267
+ ### 429 自动重试配置
268
+
269
+ 所有 429 重试次数仅通过服务端配置控制:
270
+
271
+ - 全局默认重试次数(服务端配置):
272
+ - 文件:`config.json` 中的 `other.retryTimes`
273
+ - 示例:
274
+ ```json
275
+ "other": {
276
+ "timeout": 300000,
277
+ "retryTimes": 3,
278
+ "skipProjectIdFetch": false,
279
+ "useNativeAxios": false
280
+ }
281
+ ```
282
+ - 服务器始终使用这里配置的值作为 429 时的重试次数(默认 3 次)。
283
+
284
  ### 思维链响应格式
285
 
286
  思维链内容通过 `reasoning_content` 字段输出(兼容 DeepSeek 格式):
config.json CHANGED
@@ -29,6 +29,7 @@
29
  },
30
  "other": {
31
  "timeout": 300000,
 
32
  "skipProjectIdFetch": false,
33
  "useNativeAxios": false
34
  }
 
29
  },
30
  "other": {
31
  "timeout": 300000,
32
+ "retryTimes": 3,
33
  "skipProjectIdFetch": false,
34
  "useNativeAxios": false
35
  }
public/app.js CHANGED
@@ -1242,6 +1242,7 @@ async function loadConfig() {
1242
  }
1243
  if (json.other) {
1244
  if (form.elements['TIMEOUT']) form.elements['TIMEOUT'].value = json.other.timeout ?? '';
 
1245
  if (form.elements['SKIP_PROJECT_ID_FETCH']) form.elements['SKIP_PROJECT_ID_FETCH'].value = json.other.skipProjectIdFetch ? 'true' : 'false';
1246
  }
1247
  // 加载轮询策略配置
@@ -1298,6 +1299,10 @@ document.getElementById('configForm').addEventListener('submit', async (e) => {
1298
  jsonConfig.defaults.thinkingBudget = Number.isNaN(num) ? undefined : num;
1299
  }
1300
  else if (key === 'TIMEOUT') jsonConfig.other.timeout = parseInt(value) || undefined;
 
 
 
 
1301
  else if (key === 'SKIP_PROJECT_ID_FETCH') jsonConfig.other.skipProjectIdFetch = value === 'true';
1302
  else if (key === 'ROTATION_STRATEGY') jsonConfig.rotation.strategy = value || undefined;
1303
  else if (key === 'ROTATION_REQUEST_COUNT') jsonConfig.rotation.requestCount = parseInt(value) || undefined;
 
1242
  }
1243
  if (json.other) {
1244
  if (form.elements['TIMEOUT']) form.elements['TIMEOUT'].value = json.other.timeout ?? '';
1245
+ if (form.elements['RETRY_TIMES']) form.elements['RETRY_TIMES'].value = json.other.retryTimes ?? '';
1246
  if (form.elements['SKIP_PROJECT_ID_FETCH']) form.elements['SKIP_PROJECT_ID_FETCH'].value = json.other.skipProjectIdFetch ? 'true' : 'false';
1247
  }
1248
  // 加载轮询策略配置
 
1299
  jsonConfig.defaults.thinkingBudget = Number.isNaN(num) ? undefined : num;
1300
  }
1301
  else if (key === 'TIMEOUT') jsonConfig.other.timeout = parseInt(value) || undefined;
1302
+ else if (key === 'RETRY_TIMES') {
1303
+ const num = parseInt(value);
1304
+ jsonConfig.other.retryTimes = Number.isNaN(num) ? undefined : num;
1305
+ }
1306
  else if (key === 'SKIP_PROJECT_ID_FETCH') jsonConfig.other.skipProjectIdFetch = value === 'true';
1307
  else if (key === 'ROTATION_STRATEGY') jsonConfig.rotation.strategy = value || undefined;
1308
  else if (key === 'ROTATION_REQUEST_COUNT') jsonConfig.rotation.requestCount = parseInt(value) || undefined;
public/index.html CHANGED
@@ -173,6 +173,10 @@
173
  <label>超时(ms)</label>
174
  <input type="number" name="TIMEOUT" placeholder="300000">
175
  </div>
 
 
 
 
176
  <div class="form-group compact">
177
  <label>跳过验证</label>
178
  <select name="SKIP_PROJECT_ID_FETCH">
 
173
  <label>超时(ms)</label>
174
  <input type="number" name="TIMEOUT" placeholder="300000">
175
  </div>
176
+ <div class="form-group compact">
177
+ <label>429重试次数</label>
178
+ <input type="number" name="RETRY_TIMES" placeholder="0">
179
+ </div>
180
  <div class="form-group compact">
181
  <label>跳过验证</label>
182
  <select name="SKIP_PROJECT_ID_FETCH">
src/api/client.js CHANGED
@@ -196,6 +196,15 @@ function buildRequesterConfig(headers, body = null) {
196
  return reqConfig;
197
  }
198
 
 
 
 
 
 
 
 
 
 
199
  // 统一错误处理
200
  async function handleApiError(error, token) {
201
  const status = error.response?.status || error.status || 'Unknown';
@@ -215,13 +224,13 @@ async function handleApiError(error, token) {
215
 
216
  if (status === 403) {
217
  if (JSON.stringify(errorBody).includes("The caller does not")){
218
- throw new Error(`超出模型最大上下文。错误详情: ${errorBody}`);
219
  }
220
  tokenManager.disableCurrentToken(token);
221
- throw new Error(`该账号没有使用权限,已自动禁用。错误详情: ${errorBody}`);
222
  }
223
 
224
- throw new Error(`API请求失败 (${status}): ${errorBody}`);
225
  }
226
 
227
  // 转换 functionCall 为 OpenAI 格式(使用对象池)
 
196
  return reqConfig;
197
  }
198
 
199
+ // 统一构造上游 API 错误对象,方便服务器层识别并透传
200
+ function createApiError(message, status, rawBody) {
201
+ const err = new Error(message);
202
+ err.status = status;
203
+ err.rawBody = rawBody;
204
+ err.isUpstreamApiError = true;
205
+ return err;
206
+ }
207
+
208
  // 统一错误处理
209
  async function handleApiError(error, token) {
210
  const status = error.response?.status || error.status || 'Unknown';
 
224
 
225
  if (status === 403) {
226
  if (JSON.stringify(errorBody).includes("The caller does not")){
227
+ throw createApiError(`超出模型最大上下文。错误详情: ${errorBody}`, status, errorBody);
228
  }
229
  tokenManager.disableCurrentToken(token);
230
+ throw createApiError(`该账号没有使用权限,已自动禁用。错误详情: ${errorBody}`, status, errorBody);
231
  }
232
 
233
+ throw createApiError(`API请求失败 (${status}): ${errorBody}`, status, errorBody);
234
  }
235
 
236
  // 转换 functionCall 为 OpenAI 格式(使用对象池)
src/config/config.js CHANGED
@@ -90,6 +90,8 @@ const config = {
90
  },
91
  useNativeAxios: jsonConfig.other?.useNativeAxios !== false,
92
  timeout: jsonConfig.other?.timeout || 300000,
 
 
93
  proxy: getProxyConfig(),
94
  systemInstruction: process.env.SYSTEM_INSTRUCTION || '',
95
  skipProjectIdFetch: jsonConfig.other?.skipProjectIdFetch === true
 
90
  },
91
  useNativeAxios: jsonConfig.other?.useNativeAxios !== false,
92
  timeout: jsonConfig.other?.timeout || 300000,
93
+ // 默认 429 重试次数(统一配置,0 表示不重试,默认 3 次)
94
+ retryTimes: Number.isFinite(jsonConfig.other?.retryTimes) ? jsonConfig.other.retryTimes : 3,
95
  proxy: getProxyConfig(),
96
  systemInstruction: process.env.SYSTEM_INSTRUCTION || '',
97
  skipProjectIdFetch: jsonConfig.other?.skipProjectIdFetch === true
src/server/index.js CHANGED
@@ -16,6 +16,27 @@ const __dirname = path.dirname(__filename);
16
 
17
  const app = express();
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  // ==================== 心跳机制(防止 CF 超时) ====================
20
  const HEARTBEAT_INTERVAL = config.server.heartbeatInterval || 15000; // 从配置读取心跳间隔
21
  const SSE_HEARTBEAT = Buffer.from(': heartbeat\n\n');
@@ -94,10 +115,44 @@ const writeStreamData = (res, data) => {
94
 
95
  // 工具函数:结束流式响应
96
  const endStream = (res) => {
 
97
  res.write(SSE_DONE);
98
  res.end();
99
  };
100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  app.use(cors());
102
  app.use(express.json({ limit: config.security.maxRequestSize }));
103
 
@@ -192,55 +247,68 @@ app.post('/v1/chat/completions', async (req, res) => {
192
  //console.log(JSON.stringify(requestBody,null,2))
193
 
194
  const { id, created } = createResponseMeta();
 
 
195
 
196
  if (stream) {
197
  setStreamHeaders(res);
198
 
199
  // 启动心跳,防止 Cloudflare 超时断连
200
  const heartbeatTimer = createHeartbeat(res);
201
-
202
  try {
203
  if (isImageModel) {
204
- //console.log(JSON.stringify(requestBody,null,2));
205
- const { content, usage } = await generateAssistantResponseNoStream(requestBody, token);
 
 
 
206
  writeStreamData(res, createStreamChunk(id, created, model, { content }));
207
  writeStreamData(res, { ...createStreamChunk(id, created, model, {}, 'stop'), usage });
208
  } else {
209
  let hasToolCall = false;
210
  let usageData = null;
211
- await generateAssistantResponse(requestBody, token, (data) => {
212
- if (data.type === 'usage') {
213
- usageData = data.usage;
214
- } else if (data.type === 'reasoning') {
215
- // DeepSeek 格式:思维链内容通过 reasoning_content 字段输出
216
- const delta = { reasoning_content: data.reasoning_content };
217
- writeStreamData(res, createStreamChunk(id, created, model, delta));
218
- } else if (data.type === 'tool_calls') {
219
- hasToolCall = true;
220
- // OpenAI 流式 schema 要求每个 tool_call 带有 index 字段
221
- const toolCallsWithIndex = data.tool_calls.map((toolCall, index) => ({
222
- index,
223
- ...toolCall
224
- }));
225
- const delta = { tool_calls: toolCallsWithIndex };
226
- writeStreamData(res, createStreamChunk(id, created, model, delta));
227
- } else {
228
- const delta = { content: data.content };
229
- writeStreamData(res, createStreamChunk(id, created, model, delta));
230
- }
231
- });
 
232
  writeStreamData(res, { ...createStreamChunk(id, created, model, {}, hasToolCall ? 'tool_calls' : 'stop'), usage: usageData });
233
  }
234
- } finally {
235
  clearInterval(heartbeatTimer);
236
  endStream(res);
 
 
 
237
  }
238
  } else {
239
  // 非流式请求:设置较长超时,避免大模型响应超时
240
  req.setTimeout(0); // 禁用请求超时
241
  res.setTimeout(0); // 禁用响应超时
242
 
243
- const { content, reasoningContent, toolCalls, usage } = await generateAssistantResponseNoStream(requestBody, token);
 
 
 
 
244
  // DeepSeek 格式:reasoning_content 在 content 之前
245
  const message = { role: 'assistant' };
246
  if (reasoningContent) message.reasoning_content = reasoningContent;
@@ -265,29 +333,15 @@ app.post('/v1/chat/completions', async (req, res) => {
265
  }
266
  } catch (error) {
267
  logger.error('生成响应失败:', error.message);
268
- if (!res.headersSent) {
269
- const { id, created } = createResponseMeta();
270
- const errorContent = `错误: ${error.message}`;
271
-
272
- if (stream) {
273
- setStreamHeaders(res);
274
- writeStreamData(res, createStreamChunk(id, created, model, { content: errorContent }));
275
- writeStreamData(res, createStreamChunk(id, created, model, {}, 'stop'));
276
- endStream(res);
277
- } else {
278
- res.json({
279
- id,
280
- object: 'chat.completion',
281
- created,
282
- model,
283
- choices: [{
284
- index: 0,
285
- message: { role: 'assistant', content: errorContent },
286
- finish_reason: 'stop'
287
- }]
288
- });
289
- }
290
  }
 
 
 
 
 
291
  }
292
  });
293
 
 
16
 
17
  const app = express();
18
 
19
+ // ==================== 通用重试工具(处理 429) ====================
20
+ const with429Retry = async (fn, maxRetries, loggerPrefix = '') => {
21
+ const retries = Number.isFinite(maxRetries) && maxRetries > 0 ? Math.floor(maxRetries) : 0;
22
+ let attempt = 0;
23
+ // 首次执行 + 最多 retries 次重试
24
+ while (true) {
25
+ try {
26
+ return await fn(attempt);
27
+ } catch (error) {
28
+ const status = Number(error.status || error.response?.status);
29
+ if (status === 429 && attempt < retries) {
30
+ const nextAttempt = attempt + 1;
31
+ logger.warn(`${loggerPrefix}收到 429,正在进行第 ${nextAttempt} 次重试(共 ${retries} 次)`);
32
+ attempt = nextAttempt;
33
+ continue;
34
+ }
35
+ throw error;
36
+ }
37
+ }
38
+ };
39
+
40
  // ==================== 心跳机制(防止 CF 超时) ====================
41
  const HEARTBEAT_INTERVAL = config.server.heartbeatInterval || 15000; // 从配置读取心跳间隔
42
  const SSE_HEARTBEAT = Buffer.from(': heartbeat\n\n');
 
115
 
116
  // 工具函数:结束流式响应
117
  const endStream = (res) => {
118
+ if (res.writableEnded) return;
119
  res.write(SSE_DONE);
120
  res.end();
121
  };
122
 
123
+ // OpenAI 兼容错误响应构造
124
+ const buildOpenAIErrorPayload = (error, statusCode) => {
125
+ if (error.isUpstreamApiError && error.rawBody) {
126
+ try {
127
+ const raw = typeof error.rawBody === 'string' ? JSON.parse(error.rawBody) : error.rawBody;
128
+ const inner = raw.error || raw;
129
+ return {
130
+ error: {
131
+ message: inner.message || error.message || 'Upstream API error',
132
+ type: inner.type || 'upstream_api_error',
133
+ code: inner.code ?? statusCode
134
+ }
135
+ };
136
+ } catch {
137
+ return {
138
+ error: {
139
+ message: error.rawBody || error.message || 'Upstream API error',
140
+ type: 'upstream_api_error',
141
+ code: statusCode
142
+ }
143
+ };
144
+ }
145
+ }
146
+
147
+ return {
148
+ error: {
149
+ message: error.message || 'Internal server error',
150
+ type: 'server_error',
151
+ code: statusCode
152
+ }
153
+ };
154
+ };
155
+
156
  app.use(cors());
157
  app.use(express.json({ limit: config.security.maxRequestSize }));
158
 
 
247
  //console.log(JSON.stringify(requestBody,null,2))
248
 
249
  const { id, created } = createResponseMeta();
250
+ const maxRetries = Number(config.retryTimes || 0);
251
+ const safeRetries = maxRetries > 0 ? Math.floor(maxRetries) : 0;
252
 
253
  if (stream) {
254
  setStreamHeaders(res);
255
 
256
  // 启动心跳,防止 Cloudflare 超时断连
257
  const heartbeatTimer = createHeartbeat(res);
258
+
259
  try {
260
  if (isImageModel) {
261
+ const { content, usage } = await with429Retry(
262
+ () => generateAssistantResponseNoStream(requestBody, token),
263
+ safeRetries,
264
+ 'chat.stream.image '
265
+ );
266
  writeStreamData(res, createStreamChunk(id, created, model, { content }));
267
  writeStreamData(res, { ...createStreamChunk(id, created, model, {}, 'stop'), usage });
268
  } else {
269
  let hasToolCall = false;
270
  let usageData = null;
271
+
272
+ await with429Retry(
273
+ () => generateAssistantResponse(requestBody, token, (data) => {
274
+ if (data.type === 'usage') {
275
+ usageData = data.usage;
276
+ } else if (data.type === 'reasoning') {
277
+ const delta = { reasoning_content: data.reasoning_content };
278
+ writeStreamData(res, createStreamChunk(id, created, model, delta));
279
+ } else if (data.type === 'tool_calls') {
280
+ hasToolCall = true;
281
+ const toolCallsWithIndex = data.tool_calls.map((toolCall, index) => ({ index, ...toolCall }));
282
+ const delta = { tool_calls: toolCallsWithIndex };
283
+ writeStreamData(res, createStreamChunk(id, created, model, delta));
284
+ } else {
285
+ const delta = { content: data.content };
286
+ writeStreamData(res, createStreamChunk(id, created, model, delta));
287
+ }
288
+ }),
289
+ safeRetries,
290
+ 'chat.stream '
291
+ );
292
+
293
  writeStreamData(res, { ...createStreamChunk(id, created, model, {}, hasToolCall ? 'tool_calls' : 'stop'), usage: usageData });
294
  }
295
+
296
  clearInterval(heartbeatTimer);
297
  endStream(res);
298
+ } catch (error) {
299
+ clearInterval(heartbeatTimer);
300
+ throw error;
301
  }
302
  } else {
303
  // 非流式请求:设置较长超时,避免大模型响应超时
304
  req.setTimeout(0); // 禁用请求超时
305
  res.setTimeout(0); // 禁用响应超时
306
 
307
+ const { content, reasoningContent, toolCalls, usage } = await with429Retry(
308
+ () => generateAssistantResponseNoStream(requestBody, token),
309
+ safeRetries,
310
+ 'chat.no_stream '
311
+ );
312
  // DeepSeek 格式:reasoning_content 在 content 之前
313
  const message = { role: 'assistant' };
314
  if (reasoningContent) message.reasoning_content = reasoningContent;
 
333
  }
334
  } catch (error) {
335
  logger.error('生成响应失败:', error.message);
336
+ // 如果已经开始写响应,就不再追加错误内容,避免协议冲突
337
+ if (res.headersSent) {
338
+ return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
  }
340
+
341
+ // OpenAI 兼容错误返回:HTTP 状态码 + { error: { message, type, code } }
342
+ const statusCode = Number(error.status) || 500;
343
+ const errorPayload = buildOpenAIErrorPayload(error, statusCode);
344
+ return res.status(statusCode).json(errorPayload);
345
  }
346
  });
347
 
src/utils/utils.js CHANGED
@@ -3,6 +3,9 @@ import tokenManager from '../auth/token_manager.js';
3
  import { generateRequestId } from './idGenerator.js';
4
  import os from 'os';
5
 
 
 
 
6
  function extractImagesFromContent(content) {
7
  const result = { text: '', images: [] };
8
 
@@ -50,7 +53,25 @@ function handleUserMessage(extracted, antigravityMessages){
50
  ]
51
  })
52
  }
53
- function handleAssistantMessage(message, antigravityMessages){
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  const lastMessage = antigravityMessages[antigravityMessages.length - 1];
55
  const hasToolCalls = message.tool_calls && message.tool_calls.length > 0;
56
  const hasContent = message.content && message.content.trim() !== '';
@@ -58,17 +79,42 @@ function handleAssistantMessage(message, antigravityMessages){
58
  const antigravityTools = hasToolCalls ? message.tool_calls.map(toolCall => ({
59
  functionCall: {
60
  id: toolCall.id,
61
- name: toolCall.function.name,
62
  args: {
63
  query: toolCall.function.arguments
64
  }
65
  }
66
  })) : [];
67
-
68
  if (lastMessage?.role === "model" && hasToolCalls && !hasContent){
69
  lastMessage.parts.push(...antigravityTools)
70
  }else{
71
  const parts = [];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  if (hasContent) parts.push({ text: message.content.trimEnd() });
73
  parts.push(...antigravityTools);
74
 
@@ -115,7 +161,7 @@ function handleToolCall(message, antigravityMessages){
115
  });
116
  }
117
  }
118
- function openaiMessageToAntigravity(openaiMessages){
119
  const antigravityMessages = [];
120
  for (const message of openaiMessages) {
121
  if (message.role === "user") {
@@ -126,7 +172,7 @@ function openaiMessageToAntigravity(openaiMessages){
126
  const extracted = extractImagesFromContent(message.content);
127
  handleUserMessage(extracted, antigravityMessages);
128
  } else if (message.role === "assistant") {
129
- handleAssistantMessage(message, antigravityMessages);
130
  } else if (message.role === "tool") {
131
  handleToolCall(message, antigravityMessages);
132
  }
@@ -279,10 +325,12 @@ function convertOpenAIToolsToAntigravity(openaiTools){
279
  cleanedParams.properties = {};
280
  }
281
 
 
 
282
  return {
283
  functionDeclarations: [
284
  {
285
- name: tool.function.name,
286
  description: tool.function.description,
287
  parameters: cleanedParams
288
  }
@@ -334,7 +382,7 @@ function generateRequestBody(openaiMessages,modelName,parameters,openaiTools,tok
334
  project: token.projectId,
335
  requestId: generateRequestId(),
336
  request: {
337
- contents: openaiMessageToAntigravity(filteredMessages),
338
  tools: convertOpenAIToolsToAntigravity(openaiTools),
339
  toolConfig: {
340
  functionCallingConfig: {
 
3
  import { generateRequestId } from './idGenerator.js';
4
  import os from 'os';
5
 
6
+ // 思维链签名占位(用于启用思考模型但没有真实签名时)
7
+ const DEFAULT_THOUGHT_SIGNATURE = 'RXFRRENrZ0lDaEFDR0FJcVFKV1Bvcy9GV20wSmtMV2FmWkFEbGF1ZTZzQTdRcFlTc1NvbklmemtSNFo4c1dqeitIRHBOYW9hS2NYTE1TeTF3bjh2T1RHdE1KVjVuYUNQclZ5cm9DMFNETHk4M0hOSWsrTG1aRUhNZ3hvTTl0ZEpXUDl6UUMzOExxc2ZJakI0UkkxWE1mdWJ1VDQrZnY0Znp0VEoyTlhtMjZKL2daYi9HL1gwcmR4b2x0VE54empLemtLcEp0ZXRia2plb3NBcWlRSWlXUHloMGhVVTk1dHNha1dyNDVWNUo3MTJjZDNxdHQ5Z0dkbjdFaFk4dUllUC9CcThVY2VZZC9YbFpYbDc2bHpEbmdzL2lDZXlNY3NuZXdQMjZBTDRaQzJReXdibVQzbXlSZmpld3ZSaUxxOWR1TVNidHIxYXRtYTJ0U1JIRjI0Z0JwUnpadE1RTmoyMjR4bTZVNUdRNXlOSWVzUXNFNmJzRGNSV0RTMGFVOEZERExybmhVQWZQT2JYMG5lTGR1QnU1VGZOWW9NZGlRbTgyUHVqVE1xaTlmN0t2QmJEUUdCeXdyVXR2eUNnTEFHNHNqeWluZDRCOEg3N2ZJamt5blI3Q3ZpQzlIOTVxSENVTCt3K3JzMmsvV0sxNlVsbGlTK0pET3UxWXpPMWRPOUp3V3hEMHd5ZVU0a0Y5MjIxaUE5Z2lUd2djZXhSU2c4TWJVMm1NSjJlaGdlY3g0YjJ3QloxR0FFPQ==';
8
+
9
  function extractImagesFromContent(content) {
10
  const result = { text: '', images: [] };
11
 
 
53
  ]
54
  })
55
  }
56
+ // 将工具名称规范为 Vertex 要求的格式:^[a-zA-Z0-9_-]{1,128}$
57
+ function sanitizeToolName(name) {
58
+ if (!name || typeof name !== 'string') {
59
+ return 'tool';
60
+ }
61
+ // 替换非法字符为下划线
62
+ let cleaned = name.replace(/[^a-zA-Z0-9_-]/g, '_');
63
+ // 去掉首尾多余下划线
64
+ cleaned = cleaned.replace(/^_+|_+$/g, '');
65
+ if (!cleaned) {
66
+ cleaned = 'tool';
67
+ }
68
+ // 限制最大长度 128
69
+ if (cleaned.length > 128) {
70
+ cleaned = cleaned.slice(0, 128);
71
+ }
72
+ return cleaned;
73
+ }
74
+ function handleAssistantMessage(message, antigravityMessages, enableThinking){
75
  const lastMessage = antigravityMessages[antigravityMessages.length - 1];
76
  const hasToolCalls = message.tool_calls && message.tool_calls.length > 0;
77
  const hasContent = message.content && message.content.trim() !== '';
 
79
  const antigravityTools = hasToolCalls ? message.tool_calls.map(toolCall => ({
80
  functionCall: {
81
  id: toolCall.id,
82
+ name: sanitizeToolName(toolCall.function.name),
83
  args: {
84
  query: toolCall.function.arguments
85
  }
86
  }
87
  })) : [];
88
+
89
  if (lastMessage?.role === "model" && hasToolCalls && !hasContent){
90
  lastMessage.parts.push(...antigravityTools)
91
  }else{
92
  const parts = [];
93
+
94
+ // 对于启用思考的模型,在历史 assistant 消息中补一个思考块 + 签名块
95
+ // 结构示例:
96
+ // {
97
+ // "role": "model",
98
+ // "parts": [
99
+ // { "text": "␈", "thought": true },
100
+ // { "text": "␈", "thoughtSignature": "..." },
101
+ // { "text": "正常回复..." }
102
+ // ]
103
+ // }
104
+ if (enableThinking) {
105
+ // 默认思考内容不能是完全空字符串,否则上游会要求 thinking 字段
106
+ // 这里用一个不可见的退格符作为占位,实际展示时等价于“空思考块”
107
+ let reasoningText = '';
108
+ if (typeof message.reasoning_content === 'string' && message.reasoning_content.length > 0) {
109
+ reasoningText = message.reasoning_content;
110
+ } else {
111
+ reasoningText = ' '; // 退格符占位
112
+ }
113
+ parts.push({ text: reasoningText, thought: true });
114
+ // 思维链签名占位,避免上游校验缺少签名字段
115
+ parts.push({ text: ' ', thoughtSignature: DEFAULT_THOUGHT_SIGNATURE });
116
+ }
117
+
118
  if (hasContent) parts.push({ text: message.content.trimEnd() });
119
  parts.push(...antigravityTools);
120
 
 
161
  });
162
  }
163
  }
164
+ function openaiMessageToAntigravity(openaiMessages, enableThinking){
165
  const antigravityMessages = [];
166
  for (const message of openaiMessages) {
167
  if (message.role === "user") {
 
172
  const extracted = extractImagesFromContent(message.content);
173
  handleUserMessage(extracted, antigravityMessages);
174
  } else if (message.role === "assistant") {
175
+ handleAssistantMessage(message, antigravityMessages, enableThinking);
176
  } else if (message.role === "tool") {
177
  handleToolCall(message, antigravityMessages);
178
  }
 
325
  cleanedParams.properties = {};
326
  }
327
 
328
+ const safeName = sanitizeToolName(tool.function?.name);
329
+
330
  return {
331
  functionDeclarations: [
332
  {
333
+ name: safeName,
334
  description: tool.function.description,
335
  parameters: cleanedParams
336
  }
 
382
  project: token.projectId,
383
  requestId: generateRequestId(),
384
  request: {
385
+ contents: openaiMessageToAntigravity(filteredMessages, enableThinking),
386
  tools: convertOpenAIToolsToAntigravity(openaiTools),
387
  toolConfig: {
388
  functionCallingConfig: {