liuw15 commited on
Commit
ddd87f2
·
1 Parent(s): 2e9246b

更新生图功能

Browse files
.gitignore CHANGED
@@ -17,3 +17,5 @@ logs/
17
  Thumbs.db
18
 
19
  data/
 
 
 
17
  Thumbs.db
18
 
19
  data/
20
+ test/*.png
21
+ test/*.jpeg
src/api/client.js CHANGED
@@ -59,7 +59,7 @@ function buildRequesterConfig(headers, body = null) {
59
 
60
  // 统一错误处理
61
  async function handleApiError(error, token) {
62
- const status = error.response?.status || error.status;
63
  let errorBody = error.message;
64
 
65
  if (error.response?.data?.readable) {
@@ -160,8 +160,7 @@ export async function generateAssistantResponse(requestBody, callback) {
160
  try {
161
  const axiosConfig = { ...buildAxiosConfig(config.api.url, headers, requestBody), responseType: 'stream' };
162
  const response = await axios(axiosConfig);
163
- if (response.status === 403) tokenManager.disableCurrentToken(token);
164
-
165
  response.data.on('data', chunk => processChunk(chunk.toString()));
166
  await new Promise((resolve, reject) => {
167
  response.data.on('end', resolve);
@@ -171,20 +170,21 @@ export async function generateAssistantResponse(requestBody, callback) {
171
  await handleApiError(error, token);
172
  }
173
  } else {
174
- const streamResponse = requester.antigravity_fetchStream(config.api.url, buildRequesterConfig(headers, requestBody));
175
- let errorBody = '';
176
- let statusCode = null;
 
177
 
178
- await new Promise((resolve, reject) => {
179
- streamResponse
180
- .onStart(({ status }) => {
181
- statusCode = status;
182
- if (status === 403) tokenManager.disableCurrentToken(token);
183
- })
184
- .onData((chunk) => statusCode !== 200 ? errorBody += chunk : processChunk(chunk))
185
- .onEnd(() => statusCode !== 200 ? reject(new Error(`API请求失败 (${statusCode}): ${errorBody}`)) : resolve())
186
- .onError(reject);
187
- });
188
  }
189
  }
190
 
@@ -195,9 +195,17 @@ export async function getAvailableModels() {
195
  const headers = buildHeaders(token);
196
 
197
  try {
198
- const data = useAxios
199
- ? (await axios(buildAxiosConfig(config.api.modelsUrl, headers, {}))).data
200
- : await (await requester.antigravity_fetch(config.api.modelsUrl, buildRequesterConfig(headers, {}))).json();
 
 
 
 
 
 
 
 
201
 
202
  return {
203
  object: 'list',
@@ -227,8 +235,7 @@ export async function generateAssistantResponseNoStream(requestBody) {
227
  const response = await requester.antigravity_fetch(config.api.noStreamUrl, buildRequesterConfig(headers, requestBody));
228
  if (response.status !== 200) {
229
  const errorBody = await response.text();
230
- if (response.status === 403) tokenManager.disableCurrentToken(token);
231
- throw new Error(response.status === 403 ? `该账号没有使用权限,已自动禁用。错误详情: ${errorBody}` : `API请求失败 (${response.status}): ${errorBody}`);
232
  }
233
  data = await response.json();
234
  }
@@ -241,6 +248,7 @@ export async function generateAssistantResponseNoStream(requestBody) {
241
  let content = '';
242
  let thinkingContent = '';
243
  const toolCalls = [];
 
244
 
245
  for (const part of parts) {
246
  if (part.thought === true) {
@@ -249,6 +257,11 @@ export async function generateAssistantResponseNoStream(requestBody) {
249
  content += part.text;
250
  } else if (part.functionCall) {
251
  toolCalls.push(convertToToolCall(part.functionCall));
 
 
 
 
 
252
  }
253
  }
254
 
@@ -257,6 +270,13 @@ export async function generateAssistantResponseNoStream(requestBody) {
257
  content = `<think>\n${thinkingContent}\n</think>\n${content}`;
258
  }
259
 
 
 
 
 
 
 
 
260
  return { content, toolCalls };
261
  }
262
 
 
59
 
60
  // 统一错误处理
61
  async function handleApiError(error, token) {
62
+ const status = error.response?.status || error.status || 'Unknown';
63
  let errorBody = error.message;
64
 
65
  if (error.response?.data?.readable) {
 
160
  try {
161
  const axiosConfig = { ...buildAxiosConfig(config.api.url, headers, requestBody), responseType: 'stream' };
162
  const response = await axios(axiosConfig);
163
+
 
164
  response.data.on('data', chunk => processChunk(chunk.toString()));
165
  await new Promise((resolve, reject) => {
166
  response.data.on('end', resolve);
 
170
  await handleApiError(error, token);
171
  }
172
  } else {
173
+ try {
174
+ const streamResponse = requester.antigravity_fetchStream(config.api.url, buildRequesterConfig(headers, requestBody));
175
+ let errorBody = '';
176
+ let statusCode = null;
177
 
178
+ await new Promise((resolve, reject) => {
179
+ streamResponse
180
+ .onStart(({ status }) => { statusCode = status; })
181
+ .onData((chunk) => statusCode !== 200 ? errorBody += chunk : processChunk(chunk))
182
+ .onEnd(() => statusCode !== 200 ? reject({ status: statusCode, message: errorBody }) : resolve())
183
+ .onError(reject);
184
+ });
185
+ } catch (error) {
186
+ await handleApiError(error, token);
187
+ }
188
  }
189
  }
190
 
 
195
  const headers = buildHeaders(token);
196
 
197
  try {
198
+ let data;
199
+ if (useAxios) {
200
+ data = (await axios(buildAxiosConfig(config.api.modelsUrl, headers, {}))).data;
201
+ } else {
202
+ const response = await requester.antigravity_fetch(config.api.modelsUrl, buildRequesterConfig(headers, {}));
203
+ if (response.status !== 200) {
204
+ const errorBody = await response.text();
205
+ throw { status: response.status, message: errorBody };
206
+ }
207
+ data = await response.json();
208
+ }
209
 
210
  return {
211
  object: 'list',
 
235
  const response = await requester.antigravity_fetch(config.api.noStreamUrl, buildRequesterConfig(headers, requestBody));
236
  if (response.status !== 200) {
237
  const errorBody = await response.text();
238
+ throw { status: response.status, message: errorBody };
 
239
  }
240
  data = await response.json();
241
  }
 
248
  let content = '';
249
  let thinkingContent = '';
250
  const toolCalls = [];
251
+ const imageContents = [];
252
 
253
  for (const part of parts) {
254
  if (part.thought === true) {
 
257
  content += part.text;
258
  } else if (part.functionCall) {
259
  toolCalls.push(convertToToolCall(part.functionCall));
260
+ } else if (part.inlineData) {
261
+ imageContents.push({
262
+ type: 'image_url',
263
+ image_url: { url: `data:${part.inlineData.mimeType};base64,${part.inlineData.data}` }
264
+ });
265
  }
266
  }
267
 
 
270
  content = `<think>\n${thinkingContent}\n</think>\n${content}`;
271
  }
272
 
273
+ // 生图模型:转换为markdown格式
274
+ if (imageContents.length > 0) {
275
+ let markdown = content ? content + '\n\n' : '';
276
+ markdown += imageContents.map(img => `![image](${img.image_url.url})`).join('\n\n');
277
+ return { content: markdown, toolCalls };
278
+ }
279
+
280
  return { content, toolCalls };
281
  }
282
 
src/config/config.js CHANGED
@@ -26,7 +26,7 @@ API_KEY=sk-text
26
 
27
  # 其他配置
28
  USE_NATIVE_AXIOS=false
29
- TIMEOUT=30000
30
  # PROXY=http://127.0.0.1:7897
31
 
32
  # 系统提示词
 
26
 
27
  # 其他配置
28
  USE_NATIVE_AXIOS=false
29
+ TIMEOUT=180000
30
  # PROXY=http://127.0.0.1:7897
31
 
32
  # 系统提示词
src/server/index.js CHANGED
@@ -6,6 +6,40 @@ import config from '../config/config.js';
6
 
7
  const app = express();
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  app.use(express.json({ limit: config.security.maxRequestSize }));
10
 
11
  app.use((err, req, res, next) => {
@@ -51,65 +85,55 @@ app.get('/v1/models', async (req, res) => {
51
  app.post('/v1/chat/completions', async (req, res) => {
52
  const { messages, model, stream = true, tools, ...params} = req.body;
53
  try {
54
-
55
  if (!messages) {
56
  return res.status(400).json({ error: 'messages is required' });
57
  }
58
 
 
59
  const requestBody = await generateRequestBody(messages, model, params, tools);
60
- // console.log(JSON.stringify(requestBody,null,2));
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
- if (stream) {
63
- res.setHeader('Content-Type', 'text/event-stream');
64
- res.setHeader('Cache-Control', 'no-cache');
65
- res.setHeader('Connection', 'keep-alive');
66
-
67
- const id = `chatcmpl-${Date.now()}`;
68
- const created = Math.floor(Date.now() / 1000);
69
  let hasToolCall = false;
70
 
71
  await generateAssistantResponse(requestBody, (data) => {
72
- if (data.type === 'tool_calls') {
73
- hasToolCall = true;
74
- res.write(`data: ${JSON.stringify({
75
- id,
76
- object: 'chat.completion.chunk',
77
- created,
78
- model,
79
- choices: [{ index: 0, delta: { tool_calls: data.tool_calls }, finish_reason: null }]
80
- })}\n\n`);
81
- } else {
82
- res.write(`data: ${JSON.stringify({
83
- id,
84
- object: 'chat.completion.chunk',
85
- created,
86
- model,
87
- choices: [{ index: 0, delta: { content: data.content }, finish_reason: null }]
88
- })}\n\n`);
89
- }
90
  });
91
 
92
- res.write(`data: ${JSON.stringify({
93
- id,
94
- object: 'chat.completion.chunk',
95
- created,
96
- model,
97
- choices: [{ index: 0, delta: {}, finish_reason: hasToolCall ? 'tool_calls' : 'stop' }]
98
- })}\n\n`);
99
- res.write('data: [DONE]\n\n');
100
- res.end();
101
  } else {
102
  const { content, toolCalls } = await generateAssistantResponseNoStream(requestBody);
103
-
104
  const message = { role: 'assistant', content };
105
- if (toolCalls.length > 0) {
106
- message.tool_calls = toolCalls;
107
- }
108
 
109
  res.json({
110
- id: `chatcmpl-${Date.now()}`,
111
  object: 'chat.completion',
112
- created: Math.floor(Date.now() / 1000),
113
  model,
114
  choices: [{
115
  index: 0,
@@ -121,37 +145,22 @@ app.post('/v1/chat/completions', async (req, res) => {
121
  } catch (error) {
122
  logger.error('生成响应失败:', error.message);
123
  if (!res.headersSent) {
 
 
 
124
  if (stream) {
125
- res.setHeader('Content-Type', 'text/event-stream');
126
- res.setHeader('Cache-Control', 'no-cache');
127
- res.setHeader('Connection', 'keep-alive');
128
- const id = `chatcmpl-${Date.now()}`;
129
- const created = Math.floor(Date.now() / 1000);
130
- res.write(`data: ${JSON.stringify({
131
- id,
132
- object: 'chat.completion.chunk',
133
- created,
134
- model,
135
- choices: [{ index: 0, delta: { content: `错误: ${error.message}` }, finish_reason: null }]
136
- })}\n\n`);
137
- res.write(`data: ${JSON.stringify({
138
- id,
139
- object: 'chat.completion.chunk',
140
- created,
141
- model,
142
- choices: [{ index: 0, delta: {}, finish_reason: 'stop' }]
143
- })}\n\n`);
144
- res.write('data: [DONE]\n\n');
145
- res.end();
146
  } else {
147
  res.json({
148
- id: `chatcmpl-${Date.now()}`,
149
  object: 'chat.completion',
150
- created: Math.floor(Date.now() / 1000),
151
  model,
152
  choices: [{
153
  index: 0,
154
- message: { role: 'assistant', content: `错误: ${error.message}` },
155
  finish_reason: 'stop'
156
  }]
157
  });
 
6
 
7
  const app = express();
8
 
9
+ // 工具函数:生成响应元数据
10
+ const createResponseMeta = () => ({
11
+ id: `chatcmpl-${Date.now()}`,
12
+ created: Math.floor(Date.now() / 1000)
13
+ });
14
+
15
+ // 工具函数:设置流式响应头
16
+ const setStreamHeaders = (res) => {
17
+ res.setHeader('Content-Type', 'text/event-stream');
18
+ res.setHeader('Cache-Control', 'no-cache');
19
+ res.setHeader('Connection', 'keep-alive');
20
+ };
21
+
22
+ // 工具函数:构建流式数据块
23
+ const createStreamChunk = (id, created, model, delta, finish_reason = null) => ({
24
+ id,
25
+ object: 'chat.completion.chunk',
26
+ created,
27
+ model,
28
+ choices: [{ index: 0, delta, finish_reason }]
29
+ });
30
+
31
+ // 工具函数:写入流式数据
32
+ const writeStreamData = (res, data) => {
33
+ res.write(`data: ${JSON.stringify(data)}\n\n`);
34
+ };
35
+
36
+ // 工具函数:结束流式响应
37
+ const endStream = (res, id, created, model, finish_reason) => {
38
+ writeStreamData(res, createStreamChunk(id, created, model, {}, finish_reason));
39
+ res.write('data: [DONE]\n\n');
40
+ res.end();
41
+ };
42
+
43
  app.use(express.json({ limit: config.security.maxRequestSize }));
44
 
45
  app.use((err, req, res, next) => {
 
85
  app.post('/v1/chat/completions', async (req, res) => {
86
  const { messages, model, stream = true, tools, ...params} = req.body;
87
  try {
 
88
  if (!messages) {
89
  return res.status(400).json({ error: 'messages is required' });
90
  }
91
 
92
+ const isImageModel = model.includes('-image');
93
  const requestBody = await generateRequestBody(messages, model, params, tools);
94
+ if (isImageModel) {
95
+ requestBody.request.generationConfig={
96
+ candidateCount: 1,
97
+ // imageConfig:{
98
+ // aspectRatio: "1:1"
99
+ // }
100
+ }
101
+ requestBody.requestType="image_gen";
102
+ //delete requestBody.request.systemInstruction;
103
+ delete requestBody.request.tools;
104
+ delete requestBody.request.toolConfig;
105
+ }
106
+ //console.log(JSON.stringify(requestBody,null,2))
107
 
108
+ const { id, created } = createResponseMeta();
109
+
110
+ if (stream && !isImageModel) {
111
+ setStreamHeaders(res);
 
 
 
112
  let hasToolCall = false;
113
 
114
  await generateAssistantResponse(requestBody, (data) => {
115
+ const delta = data.type === 'tool_calls'
116
+ ? { tool_calls: data.tool_calls }
117
+ : { content: data.content };
118
+ if (data.type === 'tool_calls') hasToolCall = true;
119
+ writeStreamData(res, createStreamChunk(id, created, model, delta));
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  });
121
 
122
+ endStream(res, id, created, model, hasToolCall ? 'tool_calls' : 'stop');
123
+ } else if (stream && isImageModel) {
124
+ setStreamHeaders(res);
125
+ const { content } = await generateAssistantResponseNoStream(requestBody);
126
+ writeStreamData(res, createStreamChunk(id, created, model, { content }));
127
+ endStream(res, id, created, model, 'stop');
 
 
 
128
  } else {
129
  const { content, toolCalls } = await generateAssistantResponseNoStream(requestBody);
 
130
  const message = { role: 'assistant', content };
131
+ if (toolCalls.length > 0) message.tool_calls = toolCalls;
 
 
132
 
133
  res.json({
134
+ id,
135
  object: 'chat.completion',
136
+ created,
137
  model,
138
  choices: [{
139
  index: 0,
 
145
  } catch (error) {
146
  logger.error('生成响应失败:', error.message);
147
  if (!res.headersSent) {
148
+ const { id, created } = createResponseMeta();
149
+ const errorContent = `错误: ${error.message}`;
150
+
151
  if (stream) {
152
+ setStreamHeaders(res);
153
+ writeStreamData(res, createStreamChunk(id, created, model, { content: errorContent }));
154
+ endStream(res, id, created, model, 'stop');
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  } else {
156
  res.json({
157
+ id,
158
  object: 'chat.completion',
159
+ created,
160
  model,
161
  choices: [{
162
  index: 0,
163
+ message: { role: 'assistant', content: errorContent },
164
  finish_reason: 'stop'
165
  }]
166
  });
test/test-image-generation.js ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fs from 'fs';
2
+ import path from 'path';
3
+
4
+ const API_URL = 'http://localhost:8045/v1/chat/completions';
5
+ const API_KEY = 'sk-text';
6
+
7
+ async function testImageGeneration(stream = true) {
8
+ console.log(`测试生图模型 (${stream ? '流式' : '非流式'})...\n`);
9
+
10
+ const response = await fetch(API_URL, {
11
+ method: 'POST',
12
+ headers: {
13
+ 'Content-Type': 'application/json',
14
+ 'Authorization': `Bearer ${API_KEY}`
15
+ },
16
+ body: JSON.stringify({
17
+ model: 'gemini-2.5-flash-image',
18
+ messages: [{ role: 'user', content: '画一个二次元美少女' }],
19
+ stream
20
+ })
21
+ });
22
+
23
+ let fullContent = '';
24
+
25
+ if (stream) {
26
+ let buffer = '';
27
+ const reader = response.body.getReader();
28
+ const decoder = new TextDecoder();
29
+
30
+ while (true) {
31
+ const { done, value } = await reader.read();
32
+ if (done) break;
33
+
34
+ buffer += decoder.decode(value, { stream: true });
35
+ const lines = buffer.split('\n');
36
+ buffer = lines.pop();
37
+
38
+ for (const line of lines) {
39
+ if (!line.startsWith('data: ') || line.includes('[DONE]')) continue;
40
+ try {
41
+ const data = JSON.parse(line.slice(6));
42
+ const content = data.choices[0]?.delta?.content;
43
+ if (content) fullContent = content;
44
+ } catch (e) {}
45
+ }
46
+ }
47
+ } else {
48
+ const data = await response.json();
49
+ fullContent = data.choices[0]?.message?.content || '';
50
+ }
51
+
52
+ console.log('响应内容:\n', fullContent.substring(0, 200), '...\n');
53
+
54
+ // 提取markdown中的图片
55
+ const imageRegex = /!\[.*?\]\((data:image\/(.*?);base64,([^)]+))\)/g;
56
+ let match;
57
+ let imageCount = 0;
58
+
59
+ while ((match = imageRegex.exec(fullContent)) !== null) {
60
+ imageCount++;
61
+ const base64Data = match[3];
62
+ const ext = match[2];
63
+ const filename = `generated_${Date.now()}_${imageCount}.${ext}`;
64
+ const filepath = path.join('test', filename);
65
+
66
+ fs.writeFileSync(filepath, Buffer.from(base64Data, 'base64'));
67
+ console.log(`✓ 图片已保存: ${filepath}`);
68
+ }
69
+
70
+ if (imageCount === 0) {
71
+ console.log('✗ 未找到图片');
72
+ } else {
73
+ console.log(`\n✓ 共保存 ${imageCount} 张图片`);
74
+ }
75
+ }
76
+
77
+ (async () => {
78
+ // await testImageGeneration(true);
79
+ // console.log('\n' + '='.repeat(50) + '\n');
80
+ await testImageGeneration(false);
81
+ })().catch(console.error);