|
|
|
|
|
|
|
|
|
|
|
import config from '../config/config.js'; |
|
|
import { REASONING_EFFORT_MAP } from '../constants/index.js'; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
export function normalizeOpenAIParameters(params = {}) { |
|
|
const normalized = { |
|
|
max_tokens: params.max_tokens ?? config.defaults.max_tokens, |
|
|
temperature: params.temperature ?? config.defaults.temperature, |
|
|
top_p: params.top_p ?? config.defaults.top_p, |
|
|
top_k: params.top_k ?? config.defaults.top_k, |
|
|
}; |
|
|
|
|
|
|
|
|
if (params.thinking_budget !== undefined) { |
|
|
normalized.thinking_budget = params.thinking_budget; |
|
|
} else if (params.reasoning_effort !== undefined) { |
|
|
normalized.thinking_budget = REASONING_EFFORT_MAP[params.reasoning_effort]; |
|
|
} |
|
|
|
|
|
return normalized; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
export function normalizeClaudeParameters(params = {}) { |
|
|
const { max_tokens, temperature, top_p, top_k, thinking, ...rest } = params; |
|
|
|
|
|
const normalized = { |
|
|
max_tokens: max_tokens ?? config.defaults.max_tokens, |
|
|
temperature: temperature ?? config.defaults.temperature, |
|
|
top_p: top_p ?? config.defaults.top_p, |
|
|
top_k: top_k ?? config.defaults.top_k, |
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
if (thinking && typeof thinking === 'object') { |
|
|
if (thinking.type === 'enabled' && thinking.budget_tokens !== undefined) { |
|
|
normalized.thinking_budget = thinking.budget_tokens; |
|
|
} else if (thinking.type === 'disabled') { |
|
|
|
|
|
normalized.thinking_budget = 0; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
Object.assign(normalized, rest); |
|
|
|
|
|
return normalized; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
export function normalizeGeminiParameters(generationConfig = {}) { |
|
|
const normalized = { |
|
|
max_tokens: generationConfig.maxOutputTokens ?? config.defaults.max_tokens, |
|
|
temperature: generationConfig.temperature ?? config.defaults.temperature, |
|
|
top_p: generationConfig.topP ?? config.defaults.top_p, |
|
|
top_k: generationConfig.topK ?? config.defaults.top_k, |
|
|
}; |
|
|
|
|
|
|
|
|
if (generationConfig.thinkingConfig && typeof generationConfig.thinkingConfig === 'object') { |
|
|
if (generationConfig.thinkingConfig.includeThoughts === false) { |
|
|
|
|
|
normalized.thinking_budget = 0; |
|
|
} else if (generationConfig.thinkingConfig.thinkingBudget !== undefined) { |
|
|
normalized.thinking_budget = generationConfig.thinkingConfig.thinkingBudget; |
|
|
} |
|
|
} |
|
|
|
|
|
return normalized; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
export function normalizeParameters(params, format) { |
|
|
switch (format) { |
|
|
case 'openai': |
|
|
return normalizeOpenAIParameters(params); |
|
|
case 'claude': |
|
|
return normalizeClaudeParameters(params); |
|
|
case 'gemini': |
|
|
return normalizeGeminiParameters(params); |
|
|
default: |
|
|
return normalizeOpenAIParameters(params); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
export function toGenerationConfig(normalized, enableThinking, actualModelName) { |
|
|
const defaultThinkingBudget = config.defaults.thinking_budget ?? 1024; |
|
|
let thinkingBudget = 0; |
|
|
let actualEnableThinking = enableThinking; |
|
|
|
|
|
if (enableThinking) { |
|
|
if (normalized.thinking_budget !== undefined) { |
|
|
thinkingBudget = normalized.thinking_budget; |
|
|
|
|
|
if (thinkingBudget === 0) { |
|
|
actualEnableThinking = false; |
|
|
} |
|
|
} else { |
|
|
thinkingBudget = defaultThinkingBudget; |
|
|
} |
|
|
} |
|
|
|
|
|
const generationConfig = { |
|
|
topP: normalized.top_p, |
|
|
topK: normalized.top_k, |
|
|
temperature: normalized.temperature, |
|
|
candidateCount: 1, |
|
|
maxOutputTokens: normalized.max_tokens || normalized.max_completion_tokens, |
|
|
thinkingConfig: { |
|
|
includeThoughts: actualEnableThinking, |
|
|
thinkingBudget: thinkingBudget |
|
|
} |
|
|
}; |
|
|
|
|
|
|
|
|
if (actualEnableThinking && actualModelName && actualModelName.includes('claude')) { |
|
|
delete generationConfig.topP; |
|
|
} |
|
|
|
|
|
return generationConfig; |
|
|
} |
|
|
|
|
|
export default { |
|
|
normalizeOpenAIParameters, |
|
|
normalizeClaudeParameters, |
|
|
normalizeGeminiParameters, |
|
|
normalizeParameters, |
|
|
toGenerationConfig |
|
|
}; |