devme commited on
Commit
b99c846
·
verified ·
1 Parent(s): 12ed549

Delete transformers

Browse files
transformers/request-common.js DELETED
@@ -1,88 +0,0 @@
1
- import { logDebug } from '../logger.js';
2
- import { getSystemPrompt, getUserAgent } from '../config.js';
3
-
4
- export function transformToCommon(openaiRequest) {
5
- logDebug('将 OpenAI 请求转换为通用格式');
6
-
7
- // 基本保持 OpenAI 格式,只在 messages 前面插入 system 消息
8
- const commonRequest = {
9
- ...openaiRequest
10
- };
11
-
12
- const systemPrompt = getSystemPrompt();
13
-
14
- if (systemPrompt) {
15
- // 检查是否已有 system 消息
16
- const hasSystemMessage = commonRequest.messages?.some(m => m.role === 'system');
17
-
18
- if (hasSystemMessage) {
19
- // 如果已有 system 消息,在第一个 system 消息前插入我们的 system prompt
20
- commonRequest.messages = commonRequest.messages.map((msg, index) => {
21
- if (msg.role === 'system' && index === commonRequest.messages.findIndex(m => m.role === 'system')) {
22
- // 找到第一个 system 消息,前置我们的 prompt
23
- return {
24
- role: 'system',
25
- content: systemPrompt + (typeof msg.content === 'string' ? msg.content?.replace("You are Claude Code, Anthropic's official CLI for Claude.", 'you are bot.') : '')
26
- };
27
- }
28
- return msg;
29
- });
30
- } else {
31
- // 如果没有 system 消息,在 messages 数组最前面插入
32
- commonRequest.messages = [
33
- {
34
- role: 'system',
35
- content: systemPrompt
36
- },
37
- ...(commonRequest.messages || [])
38
- ];
39
- }
40
- }
41
-
42
- logDebug('已转换的通用请求', commonRequest);
43
- return commonRequest;
44
- }
45
-
46
- export function getCommonHeaders(authHeader, clientHeaders = {}) {
47
- // 如果未提供则生成唯一 ID
48
- const sessionId = clientHeaders['x-session-id'] || generateUUID();
49
- const messageId = clientHeaders['x-assistant-message-id'] || generateUUID();
50
-
51
- const headers = {
52
- 'accept': 'application/json',
53
- 'content-type': 'application/json',
54
- 'authorization': authHeader || '',
55
- 'x-api-provider': 'baseten',
56
- 'x-factory-client': 'cli',
57
- 'x-session-id': sessionId,
58
- 'x-assistant-message-id': messageId,
59
- 'user-agent': getUserAgent(),
60
- 'connection': 'keep-alive'
61
- };
62
-
63
- // 使用默认值传递 Stainless SDK 头
64
- const stainlessDefaults = {
65
- 'x-stainless-arch': 'x64',
66
- 'x-stainless-lang': 'js',
67
- 'x-stainless-os': 'MacOS',
68
- 'x-stainless-runtime': 'node',
69
- 'x-stainless-retry-count': '0',
70
- 'x-stainless-package-version': '5.23.2',
71
- 'x-stainless-runtime-version': 'v24.3.0'
72
- };
73
-
74
- // 从客户端复制 Stainless 头或使用默认值
75
- Object.keys(stainlessDefaults).forEach(header => {
76
- headers[header] = clientHeaders[header] || stainlessDefaults[header];
77
- });
78
-
79
- return headers;
80
- }
81
-
82
- function generateUUID() {
83
- return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
84
- const r = Math.random() * 16 | 0;
85
- const v = c == 'x' ? r : (r & 0x3 | 0x8);
86
- return v.toString(16);
87
- });
88
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
transformers/request-openai.js DELETED
@@ -1,177 +0,0 @@
1
- import { logDebug } from '../logger.js';
2
- import { getSystemPrompt, getModelReasoning, getUserAgent } from '../config.js';
3
-
4
- export function transformToOpenAI(openaiRequest) {
5
- logDebug('将 OpenAI 请求转换为目标 OpenAI 格式');
6
-
7
- const targetRequest = {
8
- model: openaiRequest.model,
9
- input: [],
10
- store: false
11
- };
12
-
13
- // 仅在客户端明确提供时添加 stream 参数
14
- if (openaiRequest.stream !== undefined) {
15
- targetRequest.stream = openaiRequest.stream;
16
- }
17
-
18
- // 将 max_tokens 转换为 max_output_tokens
19
- if (openaiRequest.max_tokens) {
20
- targetRequest.max_output_tokens = openaiRequest.max_tokens;
21
- } else if (openaiRequest.max_completion_tokens) {
22
- targetRequest.max_output_tokens = openaiRequest.max_completion_tokens;
23
- }
24
-
25
- // Transform messages to input
26
- if (openaiRequest.messages && Array.isArray(openaiRequest.messages)) {
27
- for (const msg of openaiRequest.messages) {
28
- const inputMsg = {
29
- role: msg.role,
30
- content: []
31
- };
32
-
33
- // Determine content type based on role
34
- // user role uses 'input_text', assistant role uses 'output_text'
35
- const textType = msg.role === 'assistant' ? 'output_text' : 'input_text';
36
- const imageType = msg.role === 'assistant' ? 'output_image' : 'input_image';
37
-
38
- if (typeof msg.content === 'string') {
39
- inputMsg.content.push({
40
- type: textType,
41
- text: msg.content?.replace("You are Claude Code, Anthropic's official CLI for Claude.", 'you are bot.')
42
- });
43
- } else if (Array.isArray(msg.content)) {
44
- for (const part of msg.content) {
45
- if (part.type === 'text') {
46
- inputMsg.content.push({
47
- type: textType,
48
- text: part.text?.replace("You are Claude Code, Anthropic's official CLI for Claude.", 'you are bot.')
49
- });
50
- } else if (part.type === 'image_url') {
51
- inputMsg.content.push({
52
- type: imageType,
53
- image_url: part.image_url
54
- });
55
- } else {
56
- // Pass through other types as-is
57
- inputMsg.content.push(part);
58
- }
59
- }
60
- }
61
-
62
- targetRequest.input.push(inputMsg);
63
- }
64
- }
65
-
66
- // Transform tools if present
67
- if (openaiRequest.tools && Array.isArray(openaiRequest.tools)) {
68
- targetRequest.tools = openaiRequest.tools.map(tool => ({
69
- ...tool,
70
- strict: false
71
- }));
72
- }
73
-
74
- // Extract system message as instructions and prepend system prompt
75
- const systemPrompt = getSystemPrompt();
76
- const systemMessage = openaiRequest.messages?.find(m => m.role === 'system');
77
-
78
- if (systemMessage) {
79
- let userInstructions = '';
80
- if (typeof systemMessage.content === 'string') {
81
- userInstructions = systemMessage.content;
82
- } else if (Array.isArray(systemMessage.content)) {
83
- userInstructions = systemMessage.content
84
- .filter(p => p.type === 'text')
85
- .map(p => p.text)
86
- .join('\n');
87
- }
88
- targetRequest.instructions = systemPrompt + userInstructions;
89
- targetRequest.input = targetRequest.input.filter(m => m.role !== 'system');
90
- } else if (systemPrompt) {
91
- // If no user-provided system message, just add the system prompt
92
- targetRequest.instructions = systemPrompt;
93
- }
94
-
95
- // 根据模型配置处理 reasoning 字段
96
- const reasoningLevel = getModelReasoning(openaiRequest.model);
97
- if (reasoningLevel === 'auto') {
98
- // 自动模式:完全保留原始请求的 reasoning 字段
99
- if (openaiRequest.reasoning !== undefined) {
100
- targetRequest.reasoning = openaiRequest.reasoning;
101
- }
102
- // 如果原始请求没有 reasoning 字段,则不添加
103
- } else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
104
- // 特定级别:使用模型配置覆盖
105
- targetRequest.reasoning = {
106
- effort: reasoningLevel,
107
- summary: 'auto'
108
- };
109
- } else {
110
- // 关闭或无效:显式删除 reasoning 字段
111
- // 这确保删除原始请求中的任何 reasoning 字段
112
- delete targetRequest.reasoning;
113
- }
114
-
115
- // Pass through other parameters
116
- if (openaiRequest.temperature !== undefined) {
117
- targetRequest.temperature = openaiRequest.temperature;
118
- }
119
- if (openaiRequest.top_p !== undefined) {
120
- targetRequest.top_p = openaiRequest.top_p;
121
- }
122
- if (openaiRequest.presence_penalty !== undefined) {
123
- targetRequest.presence_penalty = openaiRequest.presence_penalty;
124
- }
125
- if (openaiRequest.frequency_penalty !== undefined) {
126
- targetRequest.frequency_penalty = openaiRequest.frequency_penalty;
127
- }
128
- if (openaiRequest.parallel_tool_calls !== undefined) {
129
- targetRequest.parallel_tool_calls = openaiRequest.parallel_tool_calls;
130
- }
131
-
132
- logDebug('已转换的目标 OpenAI 请求', targetRequest);
133
- return targetRequest;
134
- }
135
-
136
- export function getOpenAIHeaders(authHeader, clientHeaders = {}) {
137
- // 如果未提供则生成唯一 ID
138
- const sessionId = clientHeaders['x-session-id'] || generateUUID();
139
- const messageId = clientHeaders['x-assistant-message-id'] || generateUUID();
140
-
141
- const headers = {
142
- 'content-type': 'application/json',
143
- 'authorization': authHeader || '',
144
- 'x-api-provider': 'azure_openai',
145
- 'x-factory-client': 'cli',
146
- 'x-session-id': sessionId,
147
- 'x-assistant-message-id': messageId,
148
- 'user-agent': getUserAgent(),
149
- 'connection': 'keep-alive'
150
- };
151
-
152
- // Pass through Stainless SDK headers with defaults
153
- const stainlessDefaults = {
154
- 'x-stainless-arch': 'x64',
155
- 'x-stainless-lang': 'js',
156
- 'x-stainless-os': 'MacOS',
157
- 'x-stainless-runtime': 'node',
158
- 'x-stainless-retry-count': '0',
159
- 'x-stainless-package-version': '5.23.2',
160
- 'x-stainless-runtime-version': 'v24.3.0'
161
- };
162
-
163
- // Copy Stainless headers from client or use defaults
164
- Object.keys(stainlessDefaults).forEach(header => {
165
- headers[header] = clientHeaders[header] || stainlessDefaults[header];
166
- });
167
-
168
- return headers;
169
- }
170
-
171
- function generateUUID() {
172
- return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
173
- const r = Math.random() * 16 | 0;
174
- const v = c == 'x' ? r : (r & 0x3 | 0x8);
175
- return v.toString(16);
176
- });
177
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
transformers/response-anthropic.js DELETED
@@ -1,138 +0,0 @@
1
- import { logDebug } from '../logger.js';
2
-
3
- export class AnthropicResponseTransformer {
4
- constructor(model, requestId) {
5
- this.model = model;
6
- this.requestId = requestId || `chatcmpl-${Date.now()}`;
7
- this.created = Math.floor(Date.now() / 1000);
8
- this.messageId = null;
9
- this.currentIndex = 0;
10
- }
11
-
12
- parseSSELine(line) {
13
- if (line.startsWith('event:')) {
14
- return { type: 'event', value: line.slice(6).trim() };
15
- }
16
- if (line.startsWith('data:')) {
17
- const dataStr = line.slice(5).trim();
18
- try {
19
- return { type: 'data', value: JSON.parse(dataStr) };
20
- } catch (e) {
21
- return { type: 'data', value: dataStr };
22
- }
23
- }
24
- return null;
25
- }
26
-
27
- transformEvent(eventType, eventData) {
28
- logDebug(`Anthropic 事件: ${eventType}`);
29
-
30
- if (eventType === 'message_start') {
31
- this.messageId = eventData.message?.id || this.requestId;
32
- return this.createOpenAIChunk('', 'assistant', false);
33
- }
34
-
35
- if (eventType === 'content_block_start') {
36
- return null;
37
- }
38
-
39
- if (eventType === 'content_block_delta') {
40
- const text = eventData.delta?.text || '';
41
- return this.createOpenAIChunk(text, null, false);
42
- }
43
-
44
- if (eventType === 'content_block_stop') {
45
- return null;
46
- }
47
-
48
- if (eventType === 'message_delta') {
49
- const stopReason = eventData.delta?.stop_reason;
50
- if (stopReason) {
51
- return this.createOpenAIChunk('', null, true, this.mapStopReason(stopReason));
52
- }
53
- return null;
54
- }
55
-
56
- if (eventType === 'message_stop') {
57
- return this.createDoneSignal();
58
- }
59
-
60
- if (eventType === 'ping') {
61
- return null;
62
- }
63
-
64
- return null;
65
- }
66
-
67
- createOpenAIChunk(content, role = null, finish = false, finishReason = null) {
68
- const chunk = {
69
- id: this.requestId,
70
- object: 'chat.completion.chunk',
71
- created: this.created,
72
- model: this.model,
73
- choices: [
74
- {
75
- index: 0,
76
- delta: {},
77
- finish_reason: finish ? finishReason : null
78
- }
79
- ]
80
- };
81
-
82
- if (role) {
83
- chunk.choices[0].delta.role = role;
84
- }
85
- if (content) {
86
- chunk.choices[0].delta.content = content;
87
- }
88
-
89
- return `data: ${JSON.stringify(chunk)}\n\n`;
90
- }
91
-
92
- createDoneSignal() {
93
- return 'data: [DONE]\n\n';
94
- }
95
-
96
- mapStopReason(anthropicReason) {
97
- const mapping = {
98
- 'end_turn': 'stop',
99
- 'max_tokens': 'length',
100
- 'stop_sequence': 'stop',
101
- 'tool_use': 'tool_calls'
102
- };
103
- return mapping[anthropicReason] || 'stop';
104
- }
105
-
106
- async *transformStream(sourceStream) {
107
- let buffer = '';
108
- let currentEvent = null;
109
-
110
- try {
111
- for await (const chunk of sourceStream) {
112
- buffer += chunk.toString();
113
- const lines = buffer.split('\n');
114
- buffer = lines.pop() || '';
115
-
116
- for (const line of lines) {
117
- if (!line.trim()) continue;
118
-
119
- const parsed = this.parseSSELine(line);
120
- if (!parsed) continue;
121
-
122
- if (parsed.type === 'event') {
123
- currentEvent = parsed.value;
124
- } else if (parsed.type === 'data' && currentEvent) {
125
- const transformed = this.transformEvent(currentEvent, parsed.value);
126
- if (transformed) {
127
- yield transformed;
128
- }
129
- currentEvent = null;
130
- }
131
- }
132
- }
133
- } catch (error) {
134
- logDebug('Anthropic 流转换错误', error);
135
- throw error;
136
- }
137
- }
138
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
transformers/response-openai.js DELETED
@@ -1,127 +0,0 @@
1
- import { logDebug } from '../logger.js';
2
-
3
- export class OpenAIResponseTransformer {
4
- constructor(model, requestId) {
5
- this.model = model;
6
- this.requestId = requestId || `chatcmpl-${Date.now()}`;
7
- this.created = Math.floor(Date.now() / 1000);
8
- }
9
-
10
- parseSSELine(line) {
11
- if (line.startsWith('event:')) {
12
- return { type: 'event', value: line.slice(6).trim() };
13
- }
14
- if (line.startsWith('data:')) {
15
- const dataStr = line.slice(5).trim();
16
- try {
17
- return { type: 'data', value: JSON.parse(dataStr) };
18
- } catch (e) {
19
- return { type: 'data', value: dataStr };
20
- }
21
- }
22
- return null;
23
- }
24
-
25
- transformEvent(eventType, eventData) {
26
- logDebug(`目标 OpenAI 事件: ${eventType}`);
27
-
28
- if (eventType === 'response.created') {
29
- return this.createOpenAIChunk('', 'assistant', false);
30
- }
31
-
32
- if (eventType === 'response.in_progress') {
33
- return null;
34
- }
35
-
36
- if (eventType === 'response.output_text.delta') {
37
- const text = eventData.delta || eventData.text || '';
38
- return this.createOpenAIChunk(text, null, false);
39
- }
40
-
41
- if (eventType === 'response.output_text.done') {
42
- return null;
43
- }
44
-
45
- if (eventType === 'response.done') {
46
- const status = eventData.response?.status;
47
- let finishReason = 'stop';
48
-
49
- if (status === 'completed') {
50
- finishReason = 'stop';
51
- } else if (status === 'incomplete') {
52
- finishReason = 'length';
53
- }
54
-
55
- const finalChunk = this.createOpenAIChunk('', null, true, finishReason);
56
- const done = this.createDoneSignal();
57
- return finalChunk + done;
58
- }
59
-
60
- return null;
61
- }
62
-
63
- createOpenAIChunk(content, role = null, finish = false, finishReason = null) {
64
- const chunk = {
65
- id: this.requestId,
66
- object: 'chat.completion.chunk',
67
- created: this.created,
68
- model: this.model,
69
- choices: [
70
- {
71
- index: 0,
72
- delta: {},
73
- finish_reason: finish ? finishReason : null
74
- }
75
- ]
76
- };
77
-
78
- if (role) {
79
- chunk.choices[0].delta.role = role;
80
- }
81
- if (content) {
82
- chunk.choices[0].delta.content = content;
83
- }
84
-
85
- return `data: ${JSON.stringify(chunk)}\n\n`;
86
- }
87
-
88
- createDoneSignal() {
89
- return 'data: [DONE]\n\n';
90
- }
91
-
92
- async *transformStream(sourceStream) {
93
- let buffer = '';
94
- let currentEvent = null;
95
-
96
- try {
97
- for await (const chunk of sourceStream) {
98
- buffer += chunk.toString();
99
- const lines = buffer.split('\n');
100
- buffer = lines.pop() || '';
101
-
102
- for (const line of lines) {
103
- if (!line.trim()) continue;
104
-
105
- const parsed = this.parseSSELine(line);
106
- if (!parsed) continue;
107
-
108
- if (parsed.type === 'event') {
109
- currentEvent = parsed.value;
110
- } else if (parsed.type === 'data' && currentEvent) {
111
- const transformed = this.transformEvent(currentEvent, parsed.value);
112
- if (transformed) {
113
- yield transformed;
114
- }
115
- }
116
- }
117
- }
118
-
119
- if (currentEvent === 'response.done' || currentEvent === 'response.completed') {
120
- yield this.createDoneSignal();
121
- }
122
- } catch (error) {
123
- logDebug('OpenAI 流转换错误', error);
124
- throw error;
125
- }
126
- }
127
- }