File size: 3,340 Bytes
14ea677 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 | import OpenAI from 'openai'
import { createLogger } from '../../utils/logger'
import { cleanManimCode } from '../../utils/manim-code-cleaner'
import { getClient } from './client'
import type { CodeRetryContext } from './types'
import { buildRetryPrompt, getCodeRetrySystemPrompt } from './prompt-builder'
import { dedupeSharedBlocksInMessages } from '../prompt-dedup'
import { createChatCompletionText } from '../openai-stream'
import { buildTokenParams } from '../../utils/reasoning-model'
import { applyPatchSetToCode, extractTargetLine, parsePatchResponse } from './utils'
const logger = createLogger('CodeRetryCodeGen')
const AI_TEMPERATURE = parseFloat(process.env.AI_TEMPERATURE || '0.7')
const MAX_TOKENS = parseInt(process.env.AI_MAX_TOKENS || '12000', 10)
const THINKING_TOKENS = parseInt(process.env.AI_THINKING_TOKENS || '20000', 10)
function getModel(customApiConfig?: unknown): string {
const model = (customApiConfig as { model?: string } | undefined)?.model
const trimmed = model?.trim() || ''
if (!trimmed) {
throw new Error('No model available')
}
return trimmed
}
export async function retryCodeGeneration(
context: CodeRetryContext,
errorMessage: string,
attempt: number,
currentCode: string,
codeSnippet: string | undefined,
customApiConfig?: unknown
): Promise<string> {
const client = getClient(customApiConfig as any)
if (!client) {
throw new Error('No upstream AI is configured for this request')
}
const retryPrompt = buildRetryPrompt(context, errorMessage, attempt, currentCode, codeSnippet)
try {
const requestMessages = dedupeSharedBlocksInMessages(
[
{ role: 'system', content: getCodeRetrySystemPrompt(context.promptOverrides) },
{ role: 'user', content: retryPrompt }
],
context.promptOverrides
)
const { content, mode } = await createChatCompletionText(
client,
{
model: getModel(customApiConfig),
messages: requestMessages,
temperature: AI_TEMPERATURE,
...buildTokenParams(THINKING_TOKENS, MAX_TOKENS)
},
{ fallbackToNonStream: true, usageLabel: `retry-${attempt}` }
)
if (!content) {
throw new Error('AI returned empty content')
}
logger.info('Code retry model response received', {
concept: context.concept,
attempt,
mode,
contentLength: content.length,
contentPreview: content.trim().slice(0, 500)
})
const patchSet = parsePatchResponse(content)
const patchedCode = applyPatchSetToCode(currentCode, patchSet, extractTargetLine(errorMessage))
const cleaned = cleanManimCode(patchedCode)
logger.info('Code retry patch applied', {
concept: context.concept,
attempt,
mode,
patchCount: patchSet.patches.length,
codeLength: cleaned.code.length,
patchLengths: patchSet.patches.map((patch) => ({
originalSnippetLength: patch.originalSnippet.length,
replacementSnippetLength: patch.replacementSnippet.length
})),
codePreview: cleaned.code.slice(0, 500)
})
return cleaned.code
} catch (error) {
if (error instanceof OpenAI.APIError) {
logger.error('OpenAI API error during code retry', {
attempt,
status: error.status,
message: error.message
})
}
throw error
}
}
|