| |
| |
| |
| |
| |
|
|
|
|
| import type { Request, Response } from 'express';
|
| import { v4 as uuidv4 } from 'uuid';
|
| import type {
|
| OpenAIChatRequest,
|
| OpenAIMessage,
|
| OpenAIChatCompletion,
|
| OpenAIChatCompletionChunk,
|
| OpenAIToolCall,
|
| OpenAIContentPart,
|
| OpenAITool,
|
| } from './openai-types.js';
|
| import type {
|
| AnthropicRequest,
|
| AnthropicMessage,
|
| AnthropicContentBlock,
|
| AnthropicTool,
|
| CursorChatRequest,
|
| CursorSSEEvent,
|
| } from './types.js';
|
| import { convertToCursorRequest, parseToolCalls, hasToolCalls } from './converter.js';
|
| import { sendCursorRequest, sendCursorRequestFull } from './cursor-client.js';
|
| import { getConfig } from './config.js';
|
| import { extractThinking } from './thinking.js';
|
| import {
|
| isRefusal,
|
| sanitizeResponse,
|
| isIdentityProbe,
|
| isToolCapabilityQuestion,
|
| buildRetryRequest,
|
| CLAUDE_IDENTITY_RESPONSE,
|
| CLAUDE_TOOLS_RESPONSE,
|
| MAX_REFUSAL_RETRIES,
|
| estimateInputTokens,
|
| } from './handler.js';
|
|
|
| function chatId(): string {
|
| return 'chatcmpl-' + uuidv4().replace(/-/g, '').substring(0, 24);
|
| }
|
|
|
| function toolCallId(): string {
|
| return 'call_' + uuidv4().replace(/-/g, '').substring(0, 24);
|
| }
|
|
|
|
|
|
|
| |
| |
| |
|
|
| function convertToAnthropicRequest(body: OpenAIChatRequest): AnthropicRequest {
|
| const rawMessages: AnthropicMessage[] = [];
|
| let systemPrompt: string | undefined;
|
|
|
| for (const msg of body.messages) {
|
| switch (msg.role) {
|
| case 'system':
|
| systemPrompt = (systemPrompt ? systemPrompt + '\n\n' : '') + extractOpenAIContent(msg);
|
| break;
|
|
|
| case 'user': {
|
|
|
| const contentBlocks = extractOpenAIContentBlocks(msg);
|
| if (Array.isArray(contentBlocks)) {
|
| rawMessages.push({ role: 'user', content: contentBlocks });
|
| } else {
|
| rawMessages.push({ role: 'user', content: contentBlocks || '' });
|
| }
|
| break;
|
| }
|
|
|
| case 'assistant': {
|
| const blocks: AnthropicContentBlock[] = [];
|
| const contentBlocks = extractOpenAIContentBlocks(msg);
|
| if (typeof contentBlocks === 'string' && contentBlocks) {
|
| blocks.push({ type: 'text', text: contentBlocks });
|
| } else if (Array.isArray(contentBlocks)) {
|
| blocks.push(...contentBlocks);
|
| }
|
|
|
| if (msg.tool_calls && msg.tool_calls.length > 0) {
|
| for (const tc of msg.tool_calls) {
|
| let args: Record<string, unknown> = {};
|
| try {
|
| args = JSON.parse(tc.function.arguments);
|
| } catch {
|
| args = { input: tc.function.arguments };
|
| }
|
| blocks.push({
|
| type: 'tool_use',
|
| id: tc.id,
|
| name: tc.function.name,
|
| input: args,
|
| });
|
| }
|
| }
|
|
|
| rawMessages.push({
|
| role: 'assistant',
|
| content: blocks.length > 0 ? blocks : (typeof contentBlocks === 'string' ? contentBlocks : ''),
|
| });
|
| break;
|
| }
|
|
|
| case 'tool': {
|
| rawMessages.push({
|
| role: 'user',
|
| content: [{
|
| type: 'tool_result',
|
| tool_use_id: msg.tool_call_id,
|
| content: extractOpenAIContent(msg),
|
| }] as AnthropicContentBlock[],
|
| });
|
| break;
|
| }
|
| }
|
| }
|
|
|
|
|
| const messages = mergeConsecutiveRoles(rawMessages);
|
|
|
|
|
| const tools: AnthropicTool[] | undefined = body.tools?.map((t: OpenAITool | Record<string, unknown>) => {
|
|
|
| if ('function' in t && t.function) {
|
| const fn = (t as OpenAITool).function;
|
| return {
|
| name: fn.name,
|
| description: fn.description,
|
| input_schema: fn.parameters || { type: 'object', properties: {} },
|
| };
|
| }
|
|
|
| const flat = t as Record<string, unknown>;
|
| return {
|
| name: (flat.name as string) || '',
|
| description: flat.description as string | undefined,
|
| input_schema: (flat.input_schema as Record<string, unknown>) || { type: 'object', properties: {} },
|
| };
|
| });
|
|
|
| return {
|
| model: body.model,
|
| messages,
|
| max_tokens: Math.max(body.max_tokens || body.max_completion_tokens || 8192, 8192),
|
| stream: body.stream,
|
| system: systemPrompt,
|
| tools,
|
| temperature: body.temperature,
|
| top_p: body.top_p,
|
| stop_sequences: body.stop
|
| ? (Array.isArray(body.stop) ? body.stop : [body.stop])
|
| : undefined,
|
| };
|
| }
|
|
|
| |
| |
|
|
| function mergeConsecutiveRoles(messages: AnthropicMessage[]): AnthropicMessage[] {
|
| if (messages.length <= 1) return messages;
|
|
|
| const merged: AnthropicMessage[] = [];
|
| for (const msg of messages) {
|
| const last = merged[merged.length - 1];
|
| if (last && last.role === msg.role) {
|
|
|
| const lastBlocks = toBlocks(last.content);
|
| const newBlocks = toBlocks(msg.content);
|
| last.content = [...lastBlocks, ...newBlocks];
|
| } else {
|
| merged.push({ ...msg });
|
| }
|
| }
|
| return merged;
|
| }
|
|
|
| |
| |
|
|
| function toBlocks(content: string | AnthropicContentBlock[]): AnthropicContentBlock[] {
|
| if (typeof content === 'string') {
|
| return content ? [{ type: 'text', text: content }] : [];
|
| }
|
| return content || [];
|
| }
|
|
|
| |
| |
|
|
| function extractOpenAIContentBlocks(msg: OpenAIMessage): string | AnthropicContentBlock[] {
|
| if (msg.content === null || msg.content === undefined) return '';
|
| if (typeof msg.content === 'string') return msg.content;
|
| if (Array.isArray(msg.content)) {
|
| const blocks: AnthropicContentBlock[] = [];
|
| for (const p of msg.content as (OpenAIContentPart | Record<string, unknown>)[]) {
|
| if (p.type === 'text' && (p as OpenAIContentPart).text) {
|
| blocks.push({ type: 'text', text: (p as OpenAIContentPart).text! });
|
| } else if (p.type === 'image_url' && (p as OpenAIContentPart).image_url?.url) {
|
| const url = (p as OpenAIContentPart).image_url!.url;
|
| if (url.startsWith('data:')) {
|
| const match = url.match(/^data:([^;]+);base64,(.+)$/);
|
| if (match) {
|
| blocks.push({
|
| type: 'image',
|
| source: { type: 'base64', media_type: match[1], data: match[2] }
|
| });
|
| }
|
| } else {
|
| blocks.push({
|
| type: 'image',
|
| source: { type: 'url', media_type: 'image/jpeg', data: url }
|
| });
|
| }
|
| } else if (p.type === 'tool_use') {
|
|
|
| blocks.push(p as unknown as AnthropicContentBlock);
|
| } else if (p.type === 'tool_result') {
|
|
|
| blocks.push(p as unknown as AnthropicContentBlock);
|
| }
|
| }
|
| return blocks.length > 0 ? blocks : '';
|
| }
|
| return String(msg.content);
|
| }
|
|
|
| |
| |
|
|
| function extractOpenAIContent(msg: OpenAIMessage): string {
|
| const blocks = extractOpenAIContentBlocks(msg);
|
| if (typeof blocks === 'string') return blocks;
|
| return blocks.filter(b => b.type === 'text').map(b => b.text).join('\n');
|
| }
|
|
|
|
|
|
|
| export async function handleOpenAIChatCompletions(req: Request, res: Response): Promise<void> {
|
| const body = req.body as OpenAIChatRequest;
|
|
|
| console.log(`[OpenAI] 收到请求: model=${body.model}, messages=${body.messages?.length}, stream=${body.stream}, tools=${body.tools?.length ?? 0}`);
|
|
|
| try {
|
|
|
| const anthropicReq = convertToAnthropicRequest(body);
|
|
|
|
|
|
|
|
|
| if (isIdentityProbe(anthropicReq)) {
|
| console.log(`[OpenAI] 拦截到身份探针,返回模拟响应`);
|
| const mockText = "I am Claude, an advanced AI programming assistant created by Anthropic. I am ready to help you write code, debug, and answer your technical questions. Please let me know what we should work on!";
|
| if (body.stream) {
|
| return handleOpenAIMockStream(res, body, mockText);
|
| } else {
|
| return handleOpenAIMockNonStream(res, body, mockText);
|
| }
|
| }
|
|
|
|
|
| const cursorReq = await convertToCursorRequest(anthropicReq);
|
|
|
| if (body.stream) {
|
| await handleOpenAIStream(res, cursorReq, body, anthropicReq);
|
| } else {
|
| await handleOpenAINonStream(res, cursorReq, body, anthropicReq);
|
| }
|
| } catch (err: unknown) {
|
| const message = err instanceof Error ? err.message : String(err);
|
| console.error(`[OpenAI] 请求处理失败:`, message);
|
| res.status(500).json({
|
| error: {
|
| message,
|
| type: 'server_error',
|
| code: 'internal_error',
|
| },
|
| });
|
| }
|
| }
|
|
|
|
|
|
|
| function handleOpenAIMockStream(res: Response, body: OpenAIChatRequest, mockText: string): void {
|
| res.writeHead(200, {
|
| 'Content-Type': 'text/event-stream',
|
| 'Cache-Control': 'no-cache',
|
| 'Connection': 'keep-alive',
|
| 'X-Accel-Buffering': 'no',
|
| });
|
| const id = chatId();
|
| const created = Math.floor(Date.now() / 1000);
|
| writeOpenAISSE(res, {
|
| id, object: 'chat.completion.chunk', created, model: body.model,
|
| choices: [{ index: 0, delta: { role: 'assistant', content: mockText }, finish_reason: null }],
|
| });
|
| writeOpenAISSE(res, {
|
| id, object: 'chat.completion.chunk', created, model: body.model,
|
| choices: [{ index: 0, delta: {}, finish_reason: 'stop' }],
|
| });
|
| res.write('data: [DONE]\n\n');
|
| res.end();
|
| }
|
|
|
| function handleOpenAIMockNonStream(res: Response, body: OpenAIChatRequest, mockText: string): void {
|
| res.json({
|
| id: chatId(),
|
| object: 'chat.completion',
|
| created: Math.floor(Date.now() / 1000),
|
| model: body.model,
|
| choices: [{
|
| index: 0,
|
| message: { role: 'assistant', content: mockText },
|
| finish_reason: 'stop',
|
| }],
|
| usage: { prompt_tokens: 15, completion_tokens: 35, total_tokens: 50 },
|
| });
|
| }
|
|
|
|
|
|
|
| async function handleOpenAIStream(
|
| res: Response,
|
| cursorReq: CursorChatRequest,
|
| body: OpenAIChatRequest,
|
| anthropicReq: AnthropicRequest,
|
| ): Promise<void> {
|
| res.writeHead(200, {
|
| 'Content-Type': 'text/event-stream',
|
| 'Cache-Control': 'no-cache',
|
| 'Connection': 'keep-alive',
|
| 'X-Accel-Buffering': 'no',
|
| });
|
|
|
| const id = chatId();
|
| const created = Math.floor(Date.now() / 1000);
|
| const model = body.model;
|
| const hasTools = (body.tools?.length ?? 0) > 0;
|
|
|
|
|
| writeOpenAISSE(res, {
|
| id, object: 'chat.completion.chunk', created, model,
|
| choices: [{
|
| index: 0,
|
| delta: { role: 'assistant', content: '' },
|
| finish_reason: null,
|
| }],
|
| });
|
|
|
| let fullResponse = '';
|
| let sentText = '';
|
| let activeCursorReq = cursorReq;
|
| let retryCount = 0;
|
|
|
|
|
| const executeStream = async () => {
|
| fullResponse = '';
|
| await sendCursorRequest(activeCursorReq, (event: CursorSSEEvent) => {
|
| if (event.type !== 'text-delta' || !event.delta) return;
|
| fullResponse += event.delta;
|
| });
|
| };
|
|
|
| try {
|
| await executeStream();
|
|
|
| console.log(`[OpenAI] 原始响应 (${fullResponse.length} chars, tools=${hasTools}): ${fullResponse.substring(0, 200)}${fullResponse.length > 200 ? '...' : ''}`);
|
|
|
|
|
| const shouldRetryRefusal = () => {
|
| if (!isRefusal(fullResponse)) return false;
|
| if (hasTools && hasToolCalls(fullResponse)) return false;
|
| return true;
|
| };
|
|
|
| while (shouldRetryRefusal() && retryCount < MAX_REFUSAL_RETRIES) {
|
| retryCount++;
|
| console.log(`[OpenAI] 检测到拒绝(第${retryCount}次),自动重试...原始: ${fullResponse.substring(0, 100)}`);
|
| const retryBody = buildRetryRequest(anthropicReq, retryCount - 1);
|
| activeCursorReq = await convertToCursorRequest(retryBody);
|
| await executeStream();
|
| }
|
| if (shouldRetryRefusal()) {
|
| if (!hasTools) {
|
| if (isToolCapabilityQuestion(anthropicReq)) {
|
| console.log(`[OpenAI] 工具能力询问被拒绝,返回 Claude 能力描述`);
|
| fullResponse = CLAUDE_TOOLS_RESPONSE;
|
| } else {
|
| console.log(`[OpenAI] 重试${MAX_REFUSAL_RETRIES}次后仍被拒绝,返回 Claude 身份回复`);
|
| fullResponse = CLAUDE_IDENTITY_RESPONSE;
|
| }
|
| } else {
|
| console.log(`[OpenAI] 工具模式下拒绝且无工具调用,引导模型输出`);
|
| fullResponse = 'I understand the request. Let me analyze the information and proceed with the appropriate action.';
|
| }
|
| }
|
|
|
|
|
| if (hasTools && fullResponse.trim().length < 10 && retryCount < MAX_REFUSAL_RETRIES) {
|
| retryCount++;
|
| console.log(`[OpenAI] 响应过短 (${fullResponse.length} chars),重试第${retryCount}次`);
|
| activeCursorReq = await convertToCursorRequest(anthropicReq);
|
| await executeStream();
|
| }
|
|
|
| let finishReason: 'stop' | 'tool_calls' = 'stop';
|
|
|
|
|
| const config = getConfig();
|
| if (config.enableThinking && fullResponse.includes('<thinking>')) {
|
| const extracted = extractThinking(fullResponse);
|
| if (extracted.thinkingBlocks.length > 0) {
|
| const reasoningContent = extracted.thinkingBlocks.map(b => b.thinking).join('\n\n');
|
| fullResponse = extracted.cleanText;
|
|
|
| writeOpenAISSE(res, {
|
| id, object: 'chat.completion.chunk', created, model,
|
| choices: [{
|
| index: 0,
|
| delta: { reasoning_content: reasoningContent },
|
| finish_reason: null,
|
| }],
|
| });
|
| }
|
| }
|
|
|
| if (hasTools && hasToolCalls(fullResponse)) {
|
| const { toolCalls, cleanText } = parseToolCalls(fullResponse);
|
|
|
| if (toolCalls.length > 0) {
|
| finishReason = 'tool_calls';
|
|
|
|
|
| let cleanOutput = isRefusal(cleanText) ? '' : cleanText;
|
| cleanOutput = sanitizeResponse(cleanOutput);
|
| if (cleanOutput) {
|
| writeOpenAISSE(res, {
|
| id, object: 'chat.completion.chunk', created, model,
|
| choices: [{
|
| index: 0,
|
| delta: { content: cleanOutput },
|
| finish_reason: null,
|
| }],
|
| });
|
| }
|
|
|
|
|
| for (let i = 0; i < toolCalls.length; i++) {
|
| const tc = toolCalls[i];
|
| const tcId = toolCallId();
|
| const argsStr = JSON.stringify(tc.arguments);
|
|
|
|
|
| writeOpenAISSE(res, {
|
| id, object: 'chat.completion.chunk', created, model,
|
| choices: [{
|
| index: 0,
|
| delta: {
|
| ...(i === 0 ? { content: null } : {}),
|
| tool_calls: [{
|
| index: i,
|
| id: tcId,
|
| type: 'function',
|
| function: { name: tc.name, arguments: '' },
|
| }],
|
| },
|
| finish_reason: null,
|
| }],
|
| });
|
|
|
|
|
| const CHUNK_SIZE = 128;
|
| for (let j = 0; j < argsStr.length; j += CHUNK_SIZE) {
|
| writeOpenAISSE(res, {
|
| id, object: 'chat.completion.chunk', created, model,
|
| choices: [{
|
| index: 0,
|
| delta: {
|
| tool_calls: [{
|
| index: i,
|
| function: { arguments: argsStr.slice(j, j + CHUNK_SIZE) },
|
| }],
|
| },
|
| finish_reason: null,
|
| }],
|
| });
|
| }
|
| }
|
| } else {
|
|
|
| let textToSend = fullResponse;
|
| if (isRefusal(fullResponse)) {
|
| textToSend = 'The previous action is unavailable. Continue using other available actions to complete the task.';
|
| } else {
|
| textToSend = sanitizeResponse(fullResponse);
|
| }
|
| writeOpenAISSE(res, {
|
| id, object: 'chat.completion.chunk', created, model,
|
| choices: [{
|
| index: 0,
|
| delta: { content: textToSend },
|
| finish_reason: null,
|
| }],
|
| });
|
| }
|
| } else {
|
|
|
| const sanitized = sanitizeResponse(fullResponse);
|
| if (sanitized) {
|
| writeOpenAISSE(res, {
|
| id, object: 'chat.completion.chunk', created, model,
|
| choices: [{
|
| index: 0,
|
| delta: { content: sanitized },
|
| finish_reason: null,
|
| }],
|
| });
|
| }
|
| }
|
|
|
|
|
| writeOpenAISSE(res, {
|
| id, object: 'chat.completion.chunk', created, model,
|
| choices: [{
|
| index: 0,
|
| delta: {},
|
| finish_reason: finishReason,
|
| }],
|
| });
|
|
|
| res.write('data: [DONE]\n\n');
|
|
|
| } catch (err: unknown) {
|
| const message = err instanceof Error ? err.message : String(err);
|
| writeOpenAISSE(res, {
|
| id, object: 'chat.completion.chunk', created, model,
|
| choices: [{
|
| index: 0,
|
| delta: { content: `\n\n[Error: ${message}]` },
|
| finish_reason: 'stop',
|
| }],
|
| });
|
| res.write('data: [DONE]\n\n');
|
| }
|
|
|
| res.end();
|
| }
|
|
|
|
|
|
|
| async function handleOpenAINonStream(
|
| res: Response,
|
| cursorReq: CursorChatRequest,
|
| body: OpenAIChatRequest,
|
| anthropicReq: AnthropicRequest,
|
| ): Promise<void> {
|
| let fullText = await sendCursorRequestFull(cursorReq);
|
| const hasTools = (body.tools?.length ?? 0) > 0;
|
|
|
| console.log(`[OpenAI] 非流式原始响应 (${fullText.length} chars, tools=${hasTools}): ${fullText.substring(0, 300)}${fullText.length > 300 ? '...' : ''}`);
|
|
|
|
|
| const shouldRetry = () => isRefusal(fullText) && !(hasTools && hasToolCalls(fullText));
|
|
|
| if (shouldRetry()) {
|
| for (let attempt = 0; attempt < MAX_REFUSAL_RETRIES; attempt++) {
|
| console.log(`[OpenAI] 非流式:检测到拒绝(第${attempt + 1}次重试)...原始: ${fullText.substring(0, 100)}`);
|
| const retryBody = buildRetryRequest(anthropicReq, attempt);
|
| const retryCursorReq = await convertToCursorRequest(retryBody);
|
| fullText = await sendCursorRequestFull(retryCursorReq);
|
| if (!shouldRetry()) break;
|
| }
|
| if (shouldRetry()) {
|
| if (hasTools) {
|
| console.log(`[OpenAI] 非流式:工具模式下拒绝,引导模型输出`);
|
| fullText = 'I understand the request. Let me analyze the information and proceed with the appropriate action.';
|
| } else if (isToolCapabilityQuestion(anthropicReq)) {
|
| console.log(`[OpenAI] 非流式:工具能力询问被拒绝,返回 Claude 能力描述`);
|
| fullText = CLAUDE_TOOLS_RESPONSE;
|
| } else {
|
| console.log(`[OpenAI] 非流式:重试${MAX_REFUSAL_RETRIES}次后仍被拒绝,返回 Claude 身份回复`);
|
| fullText = CLAUDE_IDENTITY_RESPONSE;
|
| }
|
| }
|
| }
|
|
|
| let content: string | null = fullText;
|
| let toolCalls: OpenAIToolCall[] | undefined;
|
| let finishReason: 'stop' | 'tool_calls' = 'stop';
|
| let reasoningContent: string | undefined;
|
|
|
|
|
| const config = getConfig();
|
| if (config.enableThinking && fullText.includes('<thinking>')) {
|
| const extracted = extractThinking(fullText);
|
| if (extracted.thinkingBlocks.length > 0) {
|
| reasoningContent = extracted.thinkingBlocks.map(b => b.thinking).join('\n\n');
|
| fullText = extracted.cleanText;
|
| }
|
| }
|
|
|
| if (hasTools) {
|
| const parsed = parseToolCalls(fullText);
|
|
|
| if (parsed.toolCalls.length > 0) {
|
| finishReason = 'tool_calls';
|
|
|
| let cleanText = parsed.cleanText;
|
| if (isRefusal(cleanText)) {
|
| console.log(`[OpenAI] 抑制工具模式下的拒绝文本: ${cleanText.substring(0, 100)}...`);
|
| cleanText = '';
|
| }
|
| content = sanitizeResponse(cleanText) || null;
|
|
|
| toolCalls = parsed.toolCalls.map(tc => ({
|
| id: toolCallId(),
|
| type: 'function' as const,
|
| function: {
|
| name: tc.name,
|
| arguments: JSON.stringify(tc.arguments),
|
| },
|
| }));
|
| } else {
|
|
|
| if (isRefusal(fullText)) {
|
| content = 'The previous action is unavailable. Continue using other available actions to complete the task.';
|
| } else {
|
| content = sanitizeResponse(fullText);
|
| }
|
| }
|
| } else {
|
|
|
| content = sanitizeResponse(fullText);
|
| }
|
|
|
| const response: OpenAIChatCompletion = {
|
| id: chatId(),
|
| object: 'chat.completion',
|
| created: Math.floor(Date.now() / 1000),
|
| model: body.model,
|
| choices: [{
|
| index: 0,
|
| message: {
|
| role: 'assistant',
|
| content,
|
| ...(reasoningContent ? { reasoning_content: reasoningContent } : {}),
|
| ...(toolCalls ? { tool_calls: toolCalls } : {}),
|
| },
|
| finish_reason: finishReason,
|
| }],
|
| usage: {
|
| prompt_tokens: estimateInputTokens(anthropicReq).input_tokens,
|
| completion_tokens: Math.ceil(fullText.length / 3),
|
| total_tokens: estimateInputTokens(anthropicReq).input_tokens + Math.ceil(fullText.length / 3),
|
| ...estimateInputTokens(anthropicReq)
|
| },
|
| };
|
|
|
| res.json(response);
|
| }
|
|
|
|
|
|
|
| function writeOpenAISSE(res: Response, data: OpenAIChatCompletionChunk): void {
|
| res.write(`data: ${JSON.stringify(data)}\n\n`);
|
| if (typeof (res as unknown as { flush: () => void }).flush === 'function') {
|
| (res as unknown as { flush: () => void }).flush();
|
| }
|
| }
|
|
|
|
|
|
|
| |
| |
| |
| |
| |
|
|
| export async function handleOpenAIResponses(req: Request, res: Response): Promise<void> {
|
| try {
|
| const body = req.body;
|
| console.log(`[OpenAI] 收到 /v1/responses 请求: model=${body.model}`);
|
|
|
|
|
| const chatBody = responsesToChatCompletions(body);
|
|
|
|
|
| req.body = chatBody;
|
| return handleOpenAIChatCompletions(req, res);
|
| } catch (err: unknown) {
|
| const message = err instanceof Error ? err.message : String(err);
|
| console.error(`[OpenAI] /v1/responses 处理失败:`, message);
|
| res.status(500).json({
|
| error: { message, type: 'server_error', code: 'internal_error' },
|
| });
|
| }
|
| }
|
|
|
| |
| |
| |
| |
|
|
| export function responsesToChatCompletions(body: Record<string, unknown>): OpenAIChatRequest {
|
| const messages: OpenAIMessage[] = [];
|
|
|
|
|
| if (body.instructions && typeof body.instructions === 'string') {
|
| messages.push({ role: 'system', content: body.instructions });
|
| }
|
|
|
|
|
| const input = body.input;
|
| if (typeof input === 'string') {
|
| messages.push({ role: 'user', content: input });
|
| } else if (Array.isArray(input)) {
|
| for (const item of input as Record<string, unknown>[]) {
|
|
|
| if (item.type === 'function_call_output') {
|
| messages.push({
|
| role: 'tool',
|
| content: (item.output as string) || '',
|
| tool_call_id: (item.call_id as string) || '',
|
| });
|
| continue;
|
| }
|
| const role = (item.role as string) || 'user';
|
| if (role === 'system' || role === 'developer') {
|
| const text = typeof item.content === 'string'
|
| ? item.content
|
| : Array.isArray(item.content)
|
| ? (item.content as Array<Record<string, unknown>>).filter(b => b.type === 'input_text').map(b => b.text as string).join('\n')
|
| : String(item.content || '');
|
| messages.push({ role: 'system', content: text });
|
| } else if (role === 'user') {
|
| const content = typeof item.content === 'string'
|
| ? item.content
|
| : Array.isArray(item.content)
|
| ? (item.content as Array<Record<string, unknown>>).filter(b => b.type === 'input_text').map(b => b.text as string).join('\n')
|
| : String(item.content || '');
|
| messages.push({ role: 'user', content });
|
| } else if (role === 'assistant') {
|
| const blocks = Array.isArray(item.content) ? item.content as Array<Record<string, unknown>> : [];
|
| const text = blocks.filter(b => b.type === 'output_text').map(b => b.text as string).join('\n');
|
|
|
| const toolCallBlocks = blocks.filter(b => b.type === 'function_call');
|
| const toolCalls: OpenAIToolCall[] = toolCallBlocks.map(b => ({
|
| id: (b.call_id as string) || toolCallId(),
|
| type: 'function' as const,
|
| function: {
|
| name: (b.name as string) || '',
|
| arguments: (b.arguments as string) || '{}',
|
| },
|
| }));
|
| messages.push({
|
| role: 'assistant',
|
| content: text || null,
|
| ...(toolCalls.length > 0 ? { tool_calls: toolCalls } : {}),
|
| });
|
| }
|
| }
|
| }
|
|
|
|
|
| const tools: OpenAITool[] | undefined = Array.isArray(body.tools)
|
| ? (body.tools as Array<Record<string, unknown>>).map(t => {
|
| if (t.type === 'function') {
|
| return {
|
| type: 'function' as const,
|
| function: {
|
| name: (t.name as string) || '',
|
| description: t.description as string | undefined,
|
| parameters: t.parameters as Record<string, unknown> | undefined,
|
| },
|
| };
|
| }
|
| return {
|
| type: 'function' as const,
|
| function: {
|
| name: (t.name as string) || '',
|
| description: t.description as string | undefined,
|
| parameters: t.parameters as Record<string, unknown> | undefined,
|
| },
|
| };
|
| })
|
| : undefined;
|
|
|
| return {
|
| model: (body.model as string) || 'gpt-4',
|
| messages,
|
| stream: (body.stream as boolean) ?? true,
|
| temperature: body.temperature as number | undefined,
|
| max_tokens: (body.max_output_tokens as number) || 8192,
|
| tools,
|
| };
|
| }
|
|
|