Spaces:
Sleeping
Sleeping
File size: 3,763 Bytes
32aacff cfb6a6f 32aacff cfb6a6f 6682602 32aacff f85c1b6 32aacff f85c1b6 6682602 12231eb 6682602 32aacff 6a87313 32aacff 6682602 32aacff cfb6a6f 32aacff cfb6a6f 32aacff cfb6a6f 32aacff 6682602 12231eb 6682602 32aacff 6a87313 32aacff 6682602 32aacff | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 | import OpenAI from 'openai'
import { createLogger } from '../../utils/logger'
import { cleanManimCode } from '../../utils/manim-code-cleaner'
import { getClient } from './client'
import { extractCodeFromResponse } from './utils'
import type { CodeRetryContext } from './types'
import { buildRetryPrompt, getCodeRetrySystemPrompt } from './prompt-builder'
import { dedupeSharedBlocksInMessages } from '../prompt-dedup'
import { createChatCompletionText } from '../openai-stream'
const logger = createLogger('CodeRetryCodeGen')
const OPENAI_MODEL = process.env.OPENAI_MODEL || 'glm-4-flash'
const AI_TEMPERATURE = parseFloat(process.env.AI_TEMPERATURE || '0.7')
const MAX_TOKENS = parseInt(process.env.AI_MAX_TOKENS || '1200', 10)
function getModel(customApiConfig?: unknown): string {
const model = (customApiConfig as { model?: string } | undefined)?.model
return model?.trim() || OPENAI_MODEL
}
export async function generateInitialCode(
context: CodeRetryContext,
customApiConfig?: unknown
): Promise<string> {
const client = getClient(customApiConfig as any)
if (!client) {
throw new Error('OpenAI 客户端不可用')
}
try {
const requestMessages = dedupeSharedBlocksInMessages(
[
{ role: 'system', content: getCodeRetrySystemPrompt(context.promptOverrides) },
{ role: 'user', content: context.originalPrompt }
],
context.promptOverrides
)
const { content, mode } = await createChatCompletionText(
client,
{
model: getModel(customApiConfig),
messages: requestMessages,
temperature: AI_TEMPERATURE,
max_tokens: MAX_TOKENS
},
{ fallbackToNonStream: true }
)
if (!content) {
throw new Error('AI 返回空内容')
}
const code = extractCodeFromResponse(content, context.outputMode)
const cleaned = cleanManimCode(code)
logger.info('首次代码生成成功', {
concept: context.concept,
mode,
codeLength: cleaned.code.length
})
return cleaned.code
} catch (error) {
if (error instanceof OpenAI.APIError) {
logger.error('OpenAI API 错误', {
status: error.status,
message: error.message
})
}
throw error
}
}
export async function retryCodeGeneration(
context: CodeRetryContext,
errorMessage: string,
attempt: number,
currentCode: string,
customApiConfig?: unknown
): Promise<string> {
const client = getClient(customApiConfig as any)
if (!client) {
throw new Error('OpenAI 客户端不可用')
}
const retryPrompt = buildRetryPrompt(context, errorMessage, attempt, currentCode)
try {
const requestMessages = dedupeSharedBlocksInMessages(
[
{ role: 'system', content: getCodeRetrySystemPrompt(context.promptOverrides) },
{ role: 'user', content: retryPrompt }
],
context.promptOverrides
)
const { content, mode } = await createChatCompletionText(
client,
{
model: getModel(customApiConfig),
messages: requestMessages,
temperature: AI_TEMPERATURE,
max_tokens: MAX_TOKENS
},
{ fallbackToNonStream: true }
)
if (!content) {
throw new Error('AI 返回空内容')
}
const code = extractCodeFromResponse(content, context.outputMode)
const cleaned = cleanManimCode(code)
logger.info('代码重试生成成功', {
concept: context.concept,
attempt,
mode,
codeLength: cleaned.code.length
})
return cleaned.code
} catch (error) {
if (error instanceof OpenAI.APIError) {
logger.error('OpenAI API 错误(重试)', {
attempt,
status: error.status,
message: error.message
})
}
throw error
}
}
|