timigogo commited on
Commit
e1db8b8
·
verified ·
1 Parent(s): 09dbae5

Upload 14 files

Browse files
Files changed (1) hide show
  1. src/routes/chat.js +434 -430
src/routes/chat.js CHANGED
@@ -1,430 +1,434 @@
1
- const express = require('express')
2
- const axios = require('axios')
3
- const WebSocket = require('ws')
4
- const router = express.Router()
5
- const { v4: uuidv4 } = require('uuid')
6
- const { uploadFileBuffer } = require('../lib/upload')
7
- const verify = require('./verify')
8
- const modelMap = require('../lib/model-map')
9
-
10
-
11
- async function parseMessages(req, res, next) {
12
- const messages = req.body.messages
13
- if (!Array.isArray(messages)) {
14
- req.processedMessages = []
15
- return next()
16
- }
17
-
18
- try {
19
- const transformedMessages = await Promise.all(messages.map(async (msg) => {
20
- const message = {
21
- role: msg.role,
22
- tool_calls: [],
23
- template_format: "f-string"
24
- }
25
-
26
- if (Array.isArray(msg.content)) {
27
- const contentItems = await Promise.all(msg.content.map(async (item) => {
28
- if (item.type === "text") {
29
- return {
30
- type: "text",
31
- text: item.text
32
- }
33
- }
34
- else if (item.type === "image_url") {
35
- try {
36
- const base64Match = item.image_url.url.match(/^data:image\/\w+;base64,(.+)$/)
37
- if (base64Match) {
38
- const base64 = base64Match[1]
39
- const data = Buffer.from(base64, 'base64')
40
- const uploadResult = await uploadFileBuffer(data)
41
-
42
- return {
43
- type: "media",
44
- media: {
45
- "type": "image",
46
- "url": uploadResult.file_url,
47
- "title": `image_${Date.now()}.png`
48
- }
49
- }
50
- } else {
51
- return {
52
- type: "media",
53
- media: {
54
- "type": "image",
55
- "url": item.image_url.url,
56
- "title": "external_image"
57
- }
58
- }
59
- }
60
- } catch (error) {
61
- console.error("处理图像时出错:", error)
62
- return {
63
- type: "text",
64
- text: "[图像处理失败]"
65
- }
66
- }
67
- } else {
68
- return {
69
- type: "text",
70
- text: JSON.stringify(item)
71
- }
72
- }
73
- }))
74
-
75
- message.content = contentItems
76
- } else {
77
- message.content = [
78
- {
79
- type: "text",
80
- text: msg.content || ""
81
- }
82
- ]
83
- }
84
-
85
- return message
86
- }))
87
-
88
- req.body.messages = transformedMessages
89
- return next()
90
- } catch (error) {
91
- console.error("处理消息时出错:", error.status)
92
- req.body.messages = []
93
- return next(error)
94
- }
95
- }
96
-
97
- async function getChatID(req, res) {
98
- try {
99
- const url = 'https://api.promptlayer.com/api/dashboard/v2/workspaces/' + req.account.workspaceId + '/playground_sessions'
100
- const headers = { Authorization: "Bearer " + req.account.token }
101
- const model_data = modelMap[req.body.model] ? modelMap[req.body.model] : modelMap["claude-3-7-sonnet-20250219"]
102
- let data = {
103
- "id": uuidv4(),
104
- "name": "Not implemented",
105
- "prompt_blueprint": {
106
- "inference_client_name": null,
107
- "metadata": {
108
- "model": model_data
109
- },
110
- "prompt_template": {
111
- "type": "chat",
112
- "messages": req.body.messages,
113
- "tools": req.body.tools || [],
114
- "tool_choice": req.body.tool_choice || "none",
115
- "input_variables": [],
116
- "functions": [],
117
- "function_call": null
118
- },
119
- "provider_base_url_name": null
120
- },
121
- "input_variables": []
122
- }
123
-
124
- for (const item in req.body) {
125
- if (item === "messages" || item === "model" || item === "stream") {
126
- continue
127
- } else if (model_data.parameters[item]) {
128
- model_data.parameters[item] = req.body[item]
129
- }
130
- }
131
- data.prompt_blueprint.metadata.model = model_data
132
- console.log(`模型参数: ${data.prompt_blueprint.metadata.model}`)
133
-
134
- const response = await axios.put(url, data, { headers })
135
- if (response.data.success) {
136
- console.log(`生成会话ID成功: ${response.data.playground_session.id}`)
137
- req.chatID = response.data.playground_session.id
138
- return response.data.playground_session.id
139
- } else {
140
- return false
141
- }
142
- } catch (error) {
143
- // console.error("错误:", error.response?.data)
144
- res.status(500).json({
145
- "error": {
146
- "message": error.message || "服务器内部错误",
147
- "type": "server_error",
148
- "param": null,
149
- "code": "server_error"
150
- }
151
- })
152
- return false
153
- }
154
- }
155
-
156
- async function sentRequest(req, res) {
157
- try {
158
- const url = 'https://api.promptlayer.com/api/dashboard/v2/workspaces/' + req.account.workspaceId + '/run_groups'
159
- const headers = { Authorization: "Bearer " + req.account.token }
160
- const model_data = modelMap[req.body.model] ? modelMap[req.body.model] : modelMap["claude-3-7-sonnet-20250219"]
161
- let data = {
162
- "id": uuidv4(),
163
- "playground_session_id": req.chatID,
164
- "shared_prompt_blueprint": {
165
- "inference_client_name": null,
166
- "metadata": {
167
- "model": model_data
168
- },
169
- "prompt_template": {
170
- "type": "chat",
171
- "messages": req.body.messages,
172
- "tools": req.body.tools || [],
173
- "tool_choice": req.body.tool_choice || "none",
174
- "input_variables": [],
175
- "functions": [],
176
- "function_call": null
177
- },
178
- "provider_base_url_name": null
179
- },
180
- "individual_run_requests": [
181
- {
182
- "input_variables": {},
183
- "run_group_position": 1
184
- }
185
- ]
186
- }
187
-
188
- for (const item in req.body) {
189
- if (item === "messages" || item === "model" || item === "stream") {
190
- continue
191
- } else if (model_data.parameters[item]) {
192
- model_data.parameters[item] = req.body[item]
193
- }
194
- }
195
- data.shared_prompt_blueprint.metadata.model = model_data
196
-
197
- const response = await axios.post(url, data, { headers })
198
- if (response.data.success) {
199
- return response.data.run_group.individual_run_requests[0].id
200
- } else {
201
- return false
202
- }
203
- } catch (error) {
204
- // console.error("错误:", error.response?.data)
205
- res.status(500).json({
206
- "error": {
207
- "message": error.message || "服务器内部错误",
208
- "type": "server_error",
209
- "param": null,
210
- "code": "server_error"
211
- }
212
- })
213
- }
214
- }
215
-
216
- // 聊天完成路由
217
- router.post('/v1/chat/completions', verify, parseMessages, async (req, res) => {
218
- // console.log(JSON.stringify(req.body))
219
-
220
- try {
221
-
222
- const setHeader = () => {
223
- try {
224
- if (req.body.stream === true) {
225
- res.setHeader('Content-Type', 'text/event-stream')
226
- res.setHeader('Cache-Control', 'no-cache')
227
- res.setHeader('Connection', 'keep-alive')
228
- } else {
229
- res.setHeader('Content-Type', 'application/json')
230
- }
231
- } catch (error) {
232
- // console.error("设置响应头时出错:", error)
233
- }
234
- }
235
-
236
- const { access_token, clientId } = req.account
237
- // 生成会话ID
238
- await getChatID(req, res)
239
-
240
- // 发送的数据
241
- const sendAction = `{"action":10,"channel":"user:${clientId}","params":{"agent":"react-hooks/2.0.2"}}`
242
- // 构建 WebSocket URL
243
- const wsUrl = `wss://realtime.ably.io/?access_token=${encodeURIComponent(access_token)}&clientId=${clientId}&format=json&heartbeats=true&v=3&agent=ably-js%2F2.0.2%20browser`
244
- // 创建 WebSocket 连接
245
- const ws = new WebSocket(wsUrl)
246
-
247
- // 状态详细
248
- let ThinkingLastContent = ""
249
- let TextLastContent = ""
250
- let ThinkingStart = false
251
- let ThinkingEnd = false
252
- let RequestID = ""
253
- let MessageID = "chatcmpl-" + uuidv4()
254
- let streamChunk = {
255
- "id": MessageID,
256
- "object": "chat.completion.chunk",
257
- "system_fingerprint": "fp_44709d6fcb",
258
- "created": Math.floor(Date.now() / 1000),
259
- "model": req.body.model,
260
- "choices": [
261
- {
262
- "index": 0,
263
- "delta": {
264
- "content": null
265
- },
266
- "finish_reason": null
267
- }
268
- ]
269
- }
270
-
271
- ws.on('open', async () => {
272
- ws.send(sendAction)
273
- RequestID = await sentRequest(req, res)
274
- setHeader()
275
- })
276
-
277
- ws.on('message', async (data) => {
278
- try {
279
- data = data.toString()
280
- // 在此记录原始 WebSocket 消息
281
- console.log("Raw WebSocket message from PromptLayer:", data);
282
- // console.log(JSON.parse(data))
283
- let ContentText = JSON.parse(data)?.messages?.[0]
284
- let ContentData = JSON.parse(ContentText?.data)
285
- const isRequestID = ContentData?.individual_run_request_id
286
- if (isRequestID != RequestID || !isRequestID) return
287
-
288
- let output = ""
289
-
290
- if (ContentText?.name === "UPDATE_LAST_MESSAGE") {
291
- const MessageArray = ContentData?.payload?.message?.content
292
- for (const item of MessageArray) {
293
-
294
- if (item.type === "text") {
295
- output = item.text.replace(TextLastContent, "")
296
- if (ThinkingStart && !ThinkingEnd) {
297
- ThinkingEnd = true
298
- output = `${output}\n\n</think>`
299
- }
300
- TextLastContent = item.text
301
- }
302
- else if (item.type === "thinking" && MessageArray.length === 1) {
303
- output = item.thinking.replace(ThinkingLastContent, "")
304
- if (!ThinkingStart) {
305
- ThinkingStart = true
306
- output = `<think>\n\n${output}`
307
- }
308
- ThinkingLastContent = item.thinking
309
- }
310
-
311
- }
312
-
313
- if (req.body.stream === true) {
314
- streamChunk.choices[0].delta.content = output
315
- res.write(`data: ${JSON.stringify(streamChunk)}\n\n`)
316
- }
317
-
318
- }
319
- else if (ContentText?.name === "INDIVIDUAL_RUN_COMPLETE") {
320
-
321
- if (req.body.stream !== true) {
322
- output = ThinkingLastContent ? `<think>\n\n${ThinkingLastContent}\n\n</think>\n\n${TextLastContent}` : TextLastContent
323
- }
324
-
325
- if (ThinkingLastContent === "" && TextLastContent === "") {
326
- output = "该模型在发送请求时遇到错误: \n1. 请检查请求参数,模型支持参数和默认参数可在/v1/models下查看\n2. 参数设置大小是否超过模型限制\n3. 模型当前官网此模型可能负载过高,可以切换别的模型尝试,这属于正常现象\n4. Anthropic系列模型的temperature的取值为0-1,请勿设置超过1的值\n5. 交流与支持群: https://t.me/nodejs_project"
327
- streamChunk.choices[0].delta.content = output
328
- res.write(`data: ${JSON.stringify(streamChunk)}\n\n`)
329
- }
330
-
331
- if (!req.body.stream || req.body.stream !== true) {
332
- let responseJson = {
333
- "id": MessageID,
334
- "object": "chat.completion",
335
- "created": Math.floor(Date.now() / 1000),
336
- "system_fingerprint": "fp_44709d6fcb",
337
- "model": req.body.model,
338
- "choices": [
339
- {
340
- "index": 0,
341
- "message": {
342
- "role": "assistant",
343
- "content": output
344
- },
345
- "finish_reason": "stop"
346
- }
347
- ],
348
- "usage": {
349
- "prompt_tokens": 0,
350
- "completion_tokens": 0,
351
- "total_tokens": 0
352
- }
353
- }
354
-
355
- res.json(responseJson)
356
- ws.close()
357
- return
358
- } else {
359
- // 流式响应:发送结束标记
360
- let finalChunk = {
361
- "id": MessageID,
362
- "object": "chat.completion.chunk",
363
- "system_fingerprint": "fp_44709d6fcb",
364
- "created": Math.floor(Date.now() / 1000),
365
- "model": req.body.model,
366
- "choices": [
367
- {
368
- "index": 0,
369
- "delta": {},
370
- "finish_reason": "stop"
371
- }
372
- ]
373
- }
374
-
375
- res.write(`data: ${JSON.stringify(finalChunk)}\n\n`)
376
- res.write(`data: [DONE]\n\n`)
377
- res.end()
378
- }
379
- ws.close()
380
- }
381
-
382
- } catch (err) {
383
- // console.error("处理WebSocket消息出错:", err)
384
- }
385
- })
386
-
387
- ws.on('error', (err) => {
388
- // 标准OpenAI错误响应格式
389
- res.status(500).json({
390
- "error": {
391
- "message": err.message,
392
- "type": "server_error",
393
- "param": null,
394
- "code": "server_error"
395
- }
396
- })
397
- })
398
-
399
- setTimeout(() => {
400
- if (ws.readyState === WebSocket.OPEN) {
401
- ws.close()
402
- if (!res.headersSent) {
403
- // 标准OpenAI超时错误响应格式
404
- res.status(504).json({
405
- "error": {
406
- "message": "请求超时",
407
- "type": "timeout",
408
- "param": null,
409
- "code": "timeout_error"
410
- }
411
- })
412
- }
413
- }
414
- }, 300 * 1000)
415
-
416
- } catch (error) {
417
- console.error("错误:", error)
418
- // 标准OpenAI通用错误响应格式
419
- res.status(500).json({
420
- "error": {
421
- "message": error.message || "服务器内部错误",
422
- "type": "server_error",
423
- "param": null,
424
- "code": "server_error"
425
- }
426
- })
427
- }
428
- })
429
-
430
- module.exports = router
 
 
 
 
 
1
+ const express = require('express')
2
+ const axios = require('axios')
3
+ const WebSocket = require('ws')
4
+ const router = express.Router()
5
+ const { v4: uuidv4 } = require('uuid')
6
+ const { uploadFileBuffer } = require('../lib/upload')
7
+ const verify = require('./verify')
8
+ const modelMap = require('../lib/model-map')
9
+
10
+
11
+ async function parseMessages(req, res, next) {
12
+ const messages = req.body.messages
13
+ if (!Array.isArray(messages)) {
14
+ req.processedMessages = []
15
+ return next()
16
+ }
17
+
18
+ try {
19
+ const transformedMessages = await Promise.all(messages.map(async (msg) => {
20
+ const message = {
21
+ role: msg.role,
22
+ tool_calls: [],
23
+ template_format: "f-string"
24
+ }
25
+
26
+ if (Array.isArray(msg.content)) {
27
+ const contentItems = await Promise.all(msg.content.map(async (item) => {
28
+ if (item.type === "text") {
29
+ return {
30
+ type: "text",
31
+ text: item.text
32
+ }
33
+ }
34
+ else if (item.type === "image_url") {
35
+ try {
36
+ const base64Match = item.image_url.url.match(/^data:image\/\w+;base64,(.+)$/)
37
+ if (base64Match) {
38
+ const base64 = base64Match[1]
39
+ const data = Buffer.from(base64, 'base64')
40
+ const uploadResult = await uploadFileBuffer(data)
41
+
42
+ return {
43
+ type: "media",
44
+ media: {
45
+ "type": "image",
46
+ "url": uploadResult.file_url,
47
+ "title": `image_${Date.now()}.png`
48
+ }
49
+ }
50
+ } else {
51
+ return {
52
+ type: "media",
53
+ media: {
54
+ "type": "image",
55
+ "url": item.image_url.url,
56
+ "title": "external_image"
57
+ }
58
+ }
59
+ }
60
+ } catch (error) {
61
+ console.error("处理图像时出错:", error)
62
+ return {
63
+ type: "text",
64
+ text: "[图像处理失败]"
65
+ }
66
+ }
67
+ } else {
68
+ return {
69
+ type: "text",
70
+ text: JSON.stringify(item)
71
+ }
72
+ }
73
+ }))
74
+
75
+ message.content = contentItems
76
+ } else {
77
+ message.content = [
78
+ {
79
+ type: "text",
80
+ text: msg.content || ""
81
+ }
82
+ ]
83
+ }
84
+
85
+ return message
86
+ }))
87
+
88
+ req.body.messages = transformedMessages
89
+ return next()
90
+ } catch (error) {
91
+ console.error("处理消息时出错:", error.status)
92
+ req.body.messages = []
93
+ return next(error)
94
+ }
95
+ }
96
+
97
+ async function getChatID(req, res) {
98
+ try {
99
+ const url = 'https://api.promptlayer.com/api/dashboard/v2/workspaces/' + req.account.workspaceId + '/playground_sessions'
100
+ const headers = { Authorization: "Bearer " + req.account.token }
101
+ const model_data = modelMap[req.body.model] ? modelMap[req.body.model] : modelMap["claude-3-7-sonnet-20250219"]
102
+ let data = {
103
+ "id": uuidv4(),
104
+ "name": "Not implemented",
105
+ "prompt_blueprint": {
106
+ "inference_client_name": null,
107
+ "metadata": {
108
+ "model": model_data
109
+ },
110
+ "prompt_template": {
111
+ "type": "chat",
112
+ "messages": req.body.messages,
113
+ "tools": req.body.tools || [],
114
+ "tool_choice": req.body.tool_choice || "none",
115
+ "input_variables": [],
116
+ "functions": [],
117
+ "function_call": null
118
+ },
119
+ "provider_base_url_name": null
120
+ },
121
+ "input_variables": []
122
+ }
123
+
124
+ for (const item in req.body) {
125
+ if (item === "messages" || item === "model" || item === "stream") {
126
+ continue
127
+ } else if (model_data.parameters[item]) {
128
+ model_data.parameters[item] = req.body[item]
129
+ }
130
+ }
131
+ data.prompt_blueprint.metadata.model = model_data
132
+ console.log(`模型参数: ${data.prompt_blueprint.metadata.model}`)
133
+
134
+ // Log raw JSON being sent to PromptLayer for getChatID
135
+ console.log('Raw JSON sent to PromptLayer (getChatID):\n', JSON.stringify(data, null, 2));
136
+
137
+ const response = await axios.put(url, data, { headers })
138
+ if (response.data.success) {
139
+ console.log(`生成会话ID成功: ${response.data.playground_session.id}`)
140
+ req.chatID = response.data.playground_session.id
141
+ return response.data.playground_session.id
142
+ } else {
143
+ return false
144
+ }
145
+ } catch (error) {
146
+ // console.error("错误:", error.response?.data)
147
+ res.status(500).json({
148
+ "error": {
149
+ "message": error.message || "服务器内部错误",
150
+ "type": "server_error",
151
+ "param": null,
152
+ "code": "server_error"
153
+ }
154
+ })
155
+ return false
156
+ }
157
+ }
158
+
159
+ async function sentRequest(req, res) {
160
+ try {
161
+ const url = 'https://api.promptlayer.com/api/dashboard/v2/workspaces/' + req.account.workspaceId + '/run_groups'
162
+ const headers = { Authorization: "Bearer " + req.account.token }
163
+ const model_data = modelMap[req.body.model] ? modelMap[req.body.model] : modelMap["claude-3-7-sonnet-20250219"]
164
+ let data = {
165
+ "id": uuidv4(),
166
+ "playground_session_id": req.chatID,
167
+ "shared_prompt_blueprint": {
168
+ "inference_client_name": null,
169
+ "metadata": {
170
+ "model": model_data
171
+ },
172
+ "prompt_template": {
173
+ "type": "chat",
174
+ "messages": req.body.messages,
175
+ "tools": req.body.tools || [],
176
+ "tool_choice": req.body.tool_choice || "none",
177
+ "input_variables": [],
178
+ "functions": [],
179
+ "function_call": null
180
+ },
181
+ "provider_base_url_name": null
182
+ },
183
+ "individual_run_requests": [
184
+ {
185
+ "input_variables": {},
186
+ "run_group_position": 1
187
+ }
188
+ ]
189
+ }
190
+
191
+ for (const item in req.body) {
192
+ if (item === "messages" || item === "model" || item === "stream") {
193
+ continue
194
+ } else if (model_data.parameters[item]) {
195
+ model_data.parameters[item] = req.body[item]
196
+ }
197
+ }
198
+ data.shared_prompt_blueprint.metadata.model = model_data
199
+
200
+ // Log raw JSON being sent to PromptLayer for sentRequest
201
+ console.log('Raw JSON sent to PromptLayer (sentRequest):\n', JSON.stringify(data, null, 2));
202
+
203
+ const response = await axios.post(url, data, { headers })
204
+ if (response.data.success) {
205
+ return response.data.run_group.individual_run_requests[0].id
206
+ } else {
207
+ return false
208
+ }
209
+ } catch (error) {
210
+ // console.error("错误:", error.response?.data)
211
+ res.status(500).json({
212
+ "error": {
213
+ "message": error.message || "服务器内部错误",
214
+ "type": "server_error",
215
+ "param": null,
216
+ "code": "server_error"
217
+ }
218
+ })
219
+ }
220
+ }
221
+
222
+ // 聊天完成路由
223
+ router.post('/v1/chat/completions', verify, parseMessages, async (req, res) => {
224
+ // console.log(JSON.stringify(req.body))
225
+
226
+ try {
227
+
228
+ const setHeader = () => {
229
+ try {
230
+ if (req.body.stream === true) {
231
+ res.setHeader('Content-Type', 'text/event-stream')
232
+ res.setHeader('Cache-Control', 'no-cache')
233
+ res.setHeader('Connection', 'keep-alive')
234
+ } else {
235
+ res.setHeader('Content-Type', 'application/json')
236
+ }
237
+ } catch (error) {
238
+ // console.error("设置响应头时出错:", error)
239
+ }
240
+ }
241
+
242
+ const { access_token, clientId } = req.account
243
+ // 生成会话ID
244
+ await getChatID(req, res)
245
+
246
+ // 发送的数据
247
+ const sendAction = `{"action":10,"channel":"user:${clientId}","params":{"agent":"react-hooks/2.0.2"}}`
248
+ // 构建 WebSocket URL
249
+ const wsUrl = `wss://realtime.ably.io/?access_token=${encodeURIComponent(access_token)}&clientId=${clientId}&format=json&heartbeats=true&v=3&agent=ably-js%2F2.0.2%20browser`
250
+ // 创建 WebSocket 连接
251
+ const ws = new WebSocket(wsUrl)
252
+
253
+ // 状态详细
254
+ let ThinkingLastContent = ""
255
+ let TextLastContent = ""
256
+ let ThinkingStart = false
257
+ let ThinkingEnd = false
258
+ let RequestID = ""
259
+ let MessageID = "chatcmpl-" + uuidv4()
260
+ let streamChunk = {
261
+ "id": MessageID,
262
+ "object": "chat.completion.chunk",
263
+ "system_fingerprint": "fp_44709d6fcb",
264
+ "created": Math.floor(Date.now() / 1000),
265
+ "model": req.body.model,
266
+ "choices": [
267
+ {
268
+ "index": 0,
269
+ "delta": {
270
+ "content": null
271
+ },
272
+ "finish_reason": null
273
+ }
274
+ ]
275
+ }
276
+
277
+ ws.on('open', async () => {
278
+ ws.send(sendAction)
279
+ RequestID = await sentRequest(req, res)
280
+ setHeader()
281
+ })
282
+
283
+ ws.on('message', async (data) => {
284
+ try {
285
+ data = data.toString()
286
+ // console.log(JSON.parse(data))
287
+ let ContentText = JSON.parse(data)?.messages?.[0]
288
+ let ContentData = JSON.parse(ContentText?.data)
289
+ const isRequestID = ContentData?.individual_run_request_id
290
+ if (isRequestID != RequestID || !isRequestID) return
291
+
292
+ let output = ""
293
+
294
+ if (ContentText?.name === "UPDATE_LAST_MESSAGE") {
295
+ const MessageArray = ContentData?.payload?.message?.content
296
+ for (const item of MessageArray) {
297
+
298
+ if (item.type === "text") {
299
+ output = item.text.replace(TextLastContent, "")
300
+ if (ThinkingStart && !ThinkingEnd) {
301
+ ThinkingEnd = true
302
+ output = `${output}\n\n</think>`
303
+ }
304
+ TextLastContent = item.text
305
+ }
306
+ else if (item.type === "thinking" && MessageArray.length === 1) {
307
+ output = item.thinking.replace(ThinkingLastContent, "")
308
+ if (!ThinkingStart) {
309
+ ThinkingStart = true
310
+ output = `<think>\n\n${output}`
311
+ }
312
+ ThinkingLastContent = item.thinking
313
+ }
314
+
315
+ }
316
+
317
+ if (req.body.stream === true) {
318
+ streamChunk.choices[0].delta.content = output
319
+ res.write(`data: ${JSON.stringify(streamChunk)}\n\n`)
320
+ }
321
+
322
+ }
323
+ else if (ContentText?.name === "INDIVIDUAL_RUN_COMPLETE") {
324
+
325
+ if (req.body.stream !== true) {
326
+ output = ThinkingLastContent ? `<think>\n\n${ThinkingLastContent}\n\n</think>\n\n${TextLastContent}` : TextLastContent
327
+ }
328
+
329
+ if (ThinkingLastContent === "" && TextLastContent === "") {
330
+ output = "该模型在发送请求时遇到错误: \n1. 请检查请求参数,模型支持参数和默认参数可在/v1/models下查看\n2. 参数设置大小是否超过模型限制\n3. 模型当前官网此模型可能负载过高,可以切换别的模型尝试,这属于正常现象\n4. Anthropic系列模型的temperature的取值为0-1,请勿设置超过1的值\n5. 交流与支持群: https://t.me/nodejs_project"
331
+ streamChunk.choices[0].delta.content = output
332
+ res.write(`data: ${JSON.stringify(streamChunk)}\n\n`)
333
+ }
334
+
335
+ if (!req.body.stream || req.body.stream !== true) {
336
+ let responseJson = {
337
+ "id": MessageID,
338
+ "object": "chat.completion",
339
+ "created": Math.floor(Date.now() / 1000),
340
+ "system_fingerprint": "fp_44709d6fcb",
341
+ "model": req.body.model,
342
+ "choices": [
343
+ {
344
+ "index": 0,
345
+ "message": {
346
+ "role": "assistant",
347
+ "content": output
348
+ },
349
+ "finish_reason": "stop"
350
+ }
351
+ ],
352
+ "usage": {
353
+ "prompt_tokens": 0,
354
+ "completion_tokens": 0,
355
+ "total_tokens": 0
356
+ }
357
+ }
358
+
359
+ res.json(responseJson)
360
+ ws.close()
361
+ return
362
+ } else {
363
+ // 流式响应:发送结束标记
364
+ let finalChunk = {
365
+ "id": MessageID,
366
+ "object": "chat.completion.chunk",
367
+ "system_fingerprint": "fp_44709d6fcb",
368
+ "created": Math.floor(Date.now() / 1000),
369
+ "model": req.body.model,
370
+ "choices": [
371
+ {
372
+ "index": 0,
373
+ "delta": {},
374
+ "finish_reason": "stop"
375
+ }
376
+ ]
377
+ }
378
+
379
+ res.write(`data: ${JSON.stringify(finalChunk)}\n\n`)
380
+ res.write(`data: [DONE]\n\n`)
381
+ res.end()
382
+ }
383
+ ws.close()
384
+ }
385
+
386
+ } catch (err) {
387
+ // console.error("处理WebSocket消息出错:", err)
388
+ }
389
+ })
390
+
391
+ ws.on('error', (err) => {
392
+ // 标准OpenAI错误响应格式
393
+ res.status(500).json({
394
+ "error": {
395
+ "message": err.message,
396
+ "type": "server_error",
397
+ "param": null,
398
+ "code": "server_error"
399
+ }
400
+ })
401
+ })
402
+
403
+ setTimeout(() => {
404
+ if (ws.readyState === WebSocket.OPEN) {
405
+ ws.close()
406
+ if (!res.headersSent) {
407
+ // 标准OpenAI超时错误响应格式
408
+ res.status(504).json({
409
+ "error": {
410
+ "message": "请求超时",
411
+ "type": "timeout",
412
+ "param": null,
413
+ "code": "timeout_error"
414
+ }
415
+ })
416
+ }
417
+ }
418
+ }, 300 * 1000)
419
+
420
+ } catch (error) {
421
+ console.error("错误:", error)
422
+ // 标准OpenAI通用错误响应格式
423
+ res.status(500).json({
424
+ "error": {
425
+ "message": error.message || "服务器内部错误",
426
+ "type": "server_error",
427
+ "param": null,
428
+ "code": "server_error"
429
+ }
430
+ })
431
+ }
432
+ })
433
+
434
+ module.exports = router