|
|
const axios = require('axios') |
|
|
const ProxyHelper = require('../utils/proxyHelper') |
|
|
const logger = require('../utils/logger') |
|
|
const config = require('../../config/config') |
|
|
const apiKeyService = require('./apiKeyService') |
|
|
|
|
|
|
|
|
const GEMINI_API_BASE = 'https://cloudcode.googleapis.com/v1' |
|
|
const DEFAULT_MODEL = 'models/gemini-2.0-flash-exp' |
|
|
|
|
|
|
|
|
function createProxyAgent(proxyConfig) { |
|
|
return ProxyHelper.createProxyAgent(proxyConfig) |
|
|
} |
|
|
|
|
|
|
|
|
function convertMessagesToGemini(messages) { |
|
|
const contents = [] |
|
|
let systemInstruction = '' |
|
|
|
|
|
for (const message of messages) { |
|
|
if (message.role === 'system') { |
|
|
systemInstruction += (systemInstruction ? '\n\n' : '') + message.content |
|
|
} else if (message.role === 'user') { |
|
|
contents.push({ |
|
|
role: 'user', |
|
|
parts: [{ text: message.content }] |
|
|
}) |
|
|
} else if (message.role === 'assistant') { |
|
|
contents.push({ |
|
|
role: 'model', |
|
|
parts: [{ text: message.content }] |
|
|
}) |
|
|
} |
|
|
} |
|
|
|
|
|
return { contents, systemInstruction } |
|
|
} |
|
|
|
|
|
|
|
|
function convertGeminiResponse(geminiResponse, model, stream = false) { |
|
|
if (stream) { |
|
|
|
|
|
const candidate = geminiResponse.candidates?.[0] |
|
|
if (!candidate) { |
|
|
return null |
|
|
} |
|
|
|
|
|
const content = candidate.content?.parts?.[0]?.text || '' |
|
|
const finishReason = candidate.finishReason?.toLowerCase() |
|
|
|
|
|
return { |
|
|
id: `chatcmpl-${Date.now()}`, |
|
|
object: 'chat.completion.chunk', |
|
|
created: Math.floor(Date.now() / 1000), |
|
|
model, |
|
|
choices: [ |
|
|
{ |
|
|
index: 0, |
|
|
delta: { |
|
|
content |
|
|
}, |
|
|
finish_reason: finishReason === 'stop' ? 'stop' : null |
|
|
} |
|
|
] |
|
|
} |
|
|
} else { |
|
|
|
|
|
const candidate = geminiResponse.candidates?.[0] |
|
|
if (!candidate) { |
|
|
throw new Error('No response from Gemini') |
|
|
} |
|
|
|
|
|
const content = candidate.content?.parts?.[0]?.text || '' |
|
|
const finishReason = candidate.finishReason?.toLowerCase() || 'stop' |
|
|
|
|
|
|
|
|
const usage = geminiResponse.usageMetadata || { |
|
|
promptTokenCount: 0, |
|
|
candidatesTokenCount: 0, |
|
|
totalTokenCount: 0 |
|
|
} |
|
|
|
|
|
return { |
|
|
id: `chatcmpl-${Date.now()}`, |
|
|
object: 'chat.completion', |
|
|
created: Math.floor(Date.now() / 1000), |
|
|
model, |
|
|
choices: [ |
|
|
{ |
|
|
index: 0, |
|
|
message: { |
|
|
role: 'assistant', |
|
|
content |
|
|
}, |
|
|
finish_reason: finishReason |
|
|
} |
|
|
], |
|
|
usage: { |
|
|
prompt_tokens: usage.promptTokenCount, |
|
|
completion_tokens: usage.candidatesTokenCount, |
|
|
total_tokens: usage.totalTokenCount |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
async function* handleStreamResponse(response, model, apiKeyId, accountId = null) { |
|
|
let buffer = '' |
|
|
let totalUsage = { |
|
|
promptTokenCount: 0, |
|
|
candidatesTokenCount: 0, |
|
|
totalTokenCount: 0 |
|
|
} |
|
|
|
|
|
try { |
|
|
for await (const chunk of response.data) { |
|
|
buffer += chunk.toString() |
|
|
|
|
|
|
|
|
const lines = buffer.split('\n') |
|
|
buffer = lines.pop() || '' |
|
|
|
|
|
for (const line of lines) { |
|
|
if (!line.trim()) { |
|
|
continue |
|
|
} |
|
|
|
|
|
|
|
|
let jsonData = line |
|
|
if (line.startsWith('data: ')) { |
|
|
jsonData = line.substring(6).trim() |
|
|
} |
|
|
|
|
|
if (!jsonData || jsonData === '[DONE]') { |
|
|
continue |
|
|
} |
|
|
|
|
|
try { |
|
|
const data = JSON.parse(jsonData) |
|
|
|
|
|
|
|
|
if (data.usageMetadata) { |
|
|
totalUsage = data.usageMetadata |
|
|
} |
|
|
|
|
|
|
|
|
const openaiResponse = convertGeminiResponse(data, model, true) |
|
|
if (openaiResponse) { |
|
|
yield `data: ${JSON.stringify(openaiResponse)}\n\n` |
|
|
} |
|
|
|
|
|
|
|
|
if (data.candidates?.[0]?.finishReason === 'STOP') { |
|
|
|
|
|
if (apiKeyId && totalUsage.totalTokenCount > 0) { |
|
|
await apiKeyService |
|
|
.recordUsage( |
|
|
apiKeyId, |
|
|
totalUsage.promptTokenCount || 0, |
|
|
totalUsage.candidatesTokenCount || 0, |
|
|
0, |
|
|
0, |
|
|
model, |
|
|
accountId |
|
|
) |
|
|
.catch((error) => { |
|
|
logger.error('❌ Failed to record Gemini usage:', error) |
|
|
}) |
|
|
} |
|
|
|
|
|
yield 'data: [DONE]\n\n' |
|
|
return |
|
|
} |
|
|
} catch (e) { |
|
|
logger.debug('Error parsing JSON line:', e.message, 'Line:', jsonData) |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if (buffer.trim()) { |
|
|
try { |
|
|
let jsonData = buffer.trim() |
|
|
if (jsonData.startsWith('data: ')) { |
|
|
jsonData = jsonData.substring(6).trim() |
|
|
} |
|
|
|
|
|
if (jsonData && jsonData !== '[DONE]') { |
|
|
const data = JSON.parse(jsonData) |
|
|
const openaiResponse = convertGeminiResponse(data, model, true) |
|
|
if (openaiResponse) { |
|
|
yield `data: ${JSON.stringify(openaiResponse)}\n\n` |
|
|
} |
|
|
} |
|
|
} catch (e) { |
|
|
logger.debug('Error parsing final buffer:', e.message) |
|
|
} |
|
|
} |
|
|
|
|
|
yield 'data: [DONE]\n\n' |
|
|
} catch (error) { |
|
|
|
|
|
if (error.name === 'CanceledError' || error.code === 'ECONNABORTED') { |
|
|
logger.info('Stream request was aborted by client') |
|
|
} else { |
|
|
logger.error('Stream processing error:', error) |
|
|
yield `data: ${JSON.stringify({ |
|
|
error: { |
|
|
message: error.message, |
|
|
type: 'stream_error' |
|
|
} |
|
|
})}\n\n` |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
async function sendGeminiRequest({ |
|
|
messages, |
|
|
model = DEFAULT_MODEL, |
|
|
temperature = 0.7, |
|
|
maxTokens = 4096, |
|
|
stream = false, |
|
|
accessToken, |
|
|
proxy, |
|
|
apiKeyId, |
|
|
signal, |
|
|
projectId, |
|
|
location = 'us-central1', |
|
|
accountId = null |
|
|
}) { |
|
|
|
|
|
if (!model.startsWith('models/')) { |
|
|
model = `models/${model}` |
|
|
} |
|
|
|
|
|
|
|
|
const { contents, systemInstruction } = convertMessagesToGemini(messages) |
|
|
|
|
|
|
|
|
const requestBody = { |
|
|
contents, |
|
|
generationConfig: { |
|
|
temperature, |
|
|
maxOutputTokens: maxTokens, |
|
|
candidateCount: 1 |
|
|
} |
|
|
} |
|
|
|
|
|
if (systemInstruction) { |
|
|
requestBody.systemInstruction = { parts: [{ text: systemInstruction }] } |
|
|
} |
|
|
|
|
|
|
|
|
let apiUrl |
|
|
if (projectId) { |
|
|
|
|
|
apiUrl = `${GEMINI_API_BASE}/projects/${projectId}/locations/${location}/${model}:${stream ? 'streamGenerateContent' : 'generateContent'}?alt=sse` |
|
|
logger.debug(`Using project-specific URL with projectId: ${projectId}, location: ${location}`) |
|
|
} else { |
|
|
|
|
|
apiUrl = `${GEMINI_API_BASE}/${model}:${stream ? 'streamGenerateContent' : 'generateContent'}?alt=sse` |
|
|
logger.debug('Using standard URL without projectId') |
|
|
} |
|
|
|
|
|
const axiosConfig = { |
|
|
method: 'POST', |
|
|
url: apiUrl, |
|
|
headers: { |
|
|
Authorization: `Bearer ${accessToken}`, |
|
|
'Content-Type': 'application/json' |
|
|
}, |
|
|
data: requestBody, |
|
|
timeout: config.requestTimeout || 600000 |
|
|
} |
|
|
|
|
|
|
|
|
const proxyAgent = createProxyAgent(proxy) |
|
|
if (proxyAgent) { |
|
|
axiosConfig.httpAgent = proxyAgent |
|
|
axiosConfig.httpsAgent = proxyAgent |
|
|
axiosConfig.proxy = false |
|
|
logger.info(`🌐 Using proxy for Gemini API request: ${ProxyHelper.getProxyDescription(proxy)}`) |
|
|
} else { |
|
|
logger.debug('🌐 No proxy configured for Gemini API request') |
|
|
} |
|
|
|
|
|
|
|
|
if (signal) { |
|
|
axiosConfig.signal = signal |
|
|
logger.debug('AbortController signal attached to request') |
|
|
} |
|
|
|
|
|
if (stream) { |
|
|
axiosConfig.responseType = 'stream' |
|
|
} |
|
|
|
|
|
try { |
|
|
logger.debug('Sending request to Gemini API') |
|
|
const response = await axios(axiosConfig) |
|
|
|
|
|
if (stream) { |
|
|
return handleStreamResponse(response, model, apiKeyId, accountId) |
|
|
} else { |
|
|
|
|
|
const openaiResponse = convertGeminiResponse(response.data, model, false) |
|
|
|
|
|
|
|
|
if (apiKeyId && openaiResponse.usage) { |
|
|
await apiKeyService |
|
|
.recordUsage( |
|
|
apiKeyId, |
|
|
openaiResponse.usage.prompt_tokens || 0, |
|
|
openaiResponse.usage.completion_tokens || 0, |
|
|
0, |
|
|
0, |
|
|
model, |
|
|
accountId |
|
|
) |
|
|
.catch((error) => { |
|
|
logger.error('❌ Failed to record Gemini usage:', error) |
|
|
}) |
|
|
} |
|
|
|
|
|
return openaiResponse |
|
|
} |
|
|
} catch (error) { |
|
|
|
|
|
if (error.name === 'CanceledError' || error.code === 'ECONNABORTED') { |
|
|
logger.info('Gemini request was aborted by client') |
|
|
const err = new Error('Request canceled by client') |
|
|
err.status = 499 |
|
|
err.error = { |
|
|
message: 'Request canceled by client', |
|
|
type: 'canceled', |
|
|
code: 'request_canceled' |
|
|
} |
|
|
throw err |
|
|
} |
|
|
|
|
|
logger.error('Gemini API request failed:', error.response?.data || error.message) |
|
|
|
|
|
|
|
|
if (error.response) { |
|
|
const geminiError = error.response.data?.error |
|
|
const err = new Error(geminiError?.message || 'Gemini API request failed') |
|
|
err.status = error.response.status |
|
|
err.error = { |
|
|
message: geminiError?.message || 'Gemini API request failed', |
|
|
type: geminiError?.code || 'api_error', |
|
|
code: geminiError?.code |
|
|
} |
|
|
throw err |
|
|
} |
|
|
|
|
|
const err = new Error(error.message) |
|
|
err.status = 500 |
|
|
err.error = { |
|
|
message: error.message, |
|
|
type: 'network_error' |
|
|
} |
|
|
throw err |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
async function getAvailableModels(accessToken, proxy, projectId, location = 'us-central1') { |
|
|
let apiUrl |
|
|
if (projectId) { |
|
|
|
|
|
apiUrl = `${GEMINI_API_BASE}/projects/${projectId}/locations/${location}/models` |
|
|
logger.debug(`Fetching models with projectId: ${projectId}, location: ${location}`) |
|
|
} else { |
|
|
|
|
|
apiUrl = `${GEMINI_API_BASE}/models` |
|
|
logger.debug('Fetching models without projectId') |
|
|
} |
|
|
|
|
|
const axiosConfig = { |
|
|
method: 'GET', |
|
|
url: apiUrl, |
|
|
headers: { |
|
|
Authorization: `Bearer ${accessToken}` |
|
|
}, |
|
|
timeout: config.requestTimeout || 600000 |
|
|
} |
|
|
|
|
|
const proxyAgent = createProxyAgent(proxy) |
|
|
if (proxyAgent) { |
|
|
axiosConfig.httpAgent = proxyAgent |
|
|
axiosConfig.httpsAgent = proxyAgent |
|
|
axiosConfig.proxy = false |
|
|
logger.info( |
|
|
`🌐 Using proxy for Gemini models request: ${ProxyHelper.getProxyDescription(proxy)}` |
|
|
) |
|
|
} else { |
|
|
logger.debug('🌐 No proxy configured for Gemini models request') |
|
|
} |
|
|
|
|
|
try { |
|
|
const response = await axios(axiosConfig) |
|
|
const models = response.data.models || [] |
|
|
|
|
|
|
|
|
return models |
|
|
.filter((model) => model.supportedGenerationMethods?.includes('generateContent')) |
|
|
.map((model) => ({ |
|
|
id: model.name.replace('models/', ''), |
|
|
object: 'model', |
|
|
created: Date.now() / 1000, |
|
|
owned_by: 'google' |
|
|
})) |
|
|
} catch (error) { |
|
|
logger.error('Failed to get Gemini models:', error) |
|
|
|
|
|
return [ |
|
|
{ |
|
|
id: 'gemini-2.0-flash-exp', |
|
|
object: 'model', |
|
|
created: Date.now() / 1000, |
|
|
owned_by: 'google' |
|
|
} |
|
|
] |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
async function countTokens({ |
|
|
model, |
|
|
content, |
|
|
accessToken, |
|
|
proxy, |
|
|
projectId, |
|
|
location = 'us-central1' |
|
|
}) { |
|
|
|
|
|
if (!model.startsWith('models/')) { |
|
|
model = `models/${model}` |
|
|
} |
|
|
|
|
|
|
|
|
let requestBody |
|
|
if (Array.isArray(content)) { |
|
|
|
|
|
requestBody = { contents: content } |
|
|
} else if (typeof content === 'string') { |
|
|
|
|
|
requestBody = { |
|
|
contents: [ |
|
|
{ |
|
|
parts: [{ text: content }] |
|
|
} |
|
|
] |
|
|
} |
|
|
} else if (content.parts || content.role) { |
|
|
|
|
|
requestBody = { contents: [content] } |
|
|
} else { |
|
|
|
|
|
requestBody = { contents: content } |
|
|
} |
|
|
|
|
|
|
|
|
const GENERATIVE_API_BASE = 'https://generativelanguage.googleapis.com/v1beta' |
|
|
let apiUrl |
|
|
if (projectId) { |
|
|
|
|
|
apiUrl = `${GENERATIVE_API_BASE}/projects/${projectId}/locations/${location}/${model}:countTokens` |
|
|
logger.debug( |
|
|
`Using project-specific countTokens URL with projectId: ${projectId}, location: ${location}` |
|
|
) |
|
|
} else { |
|
|
|
|
|
apiUrl = `${GENERATIVE_API_BASE}/${model}:countTokens` |
|
|
logger.debug('Using standard countTokens URL without projectId') |
|
|
} |
|
|
|
|
|
const axiosConfig = { |
|
|
method: 'POST', |
|
|
url: apiUrl, |
|
|
headers: { |
|
|
Authorization: `Bearer ${accessToken}`, |
|
|
'Content-Type': 'application/json', |
|
|
'X-Goog-User-Project': projectId || undefined |
|
|
}, |
|
|
data: requestBody, |
|
|
timeout: config.requestTimeout || 600000 |
|
|
} |
|
|
|
|
|
|
|
|
const proxyAgent = createProxyAgent(proxy) |
|
|
if (proxyAgent) { |
|
|
axiosConfig.httpAgent = proxyAgent |
|
|
axiosConfig.httpsAgent = proxyAgent |
|
|
axiosConfig.proxy = false |
|
|
logger.info( |
|
|
`🌐 Using proxy for Gemini countTokens request: ${ProxyHelper.getProxyDescription(proxy)}` |
|
|
) |
|
|
} else { |
|
|
logger.debug('🌐 No proxy configured for Gemini countTokens request') |
|
|
} |
|
|
|
|
|
try { |
|
|
logger.debug(`Sending countTokens request to: ${apiUrl}`) |
|
|
logger.debug(`Request body: ${JSON.stringify(requestBody, null, 2)}`) |
|
|
const response = await axios(axiosConfig) |
|
|
|
|
|
|
|
|
return { |
|
|
totalTokens: response.data.totalTokens || 0, |
|
|
totalBillableCharacters: response.data.totalBillableCharacters || 0, |
|
|
...response.data |
|
|
} |
|
|
} catch (error) { |
|
|
logger.error(`Gemini countTokens API request failed for URL: ${apiUrl}`) |
|
|
logger.error( |
|
|
'Request config:', |
|
|
JSON.stringify( |
|
|
{ |
|
|
url: apiUrl, |
|
|
headers: axiosConfig.headers, |
|
|
data: requestBody |
|
|
}, |
|
|
null, |
|
|
2 |
|
|
) |
|
|
) |
|
|
logger.error('Error details:', error.response?.data || error.message) |
|
|
|
|
|
|
|
|
if (error.response) { |
|
|
const geminiError = error.response.data?.error |
|
|
const errorObj = new Error( |
|
|
geminiError?.message || |
|
|
`Gemini countTokens API request failed (Status: ${error.response.status})` |
|
|
) |
|
|
errorObj.status = error.response.status |
|
|
errorObj.error = { |
|
|
message: |
|
|
geminiError?.message || |
|
|
`Gemini countTokens API request failed (Status: ${error.response.status})`, |
|
|
type: geminiError?.code || 'api_error', |
|
|
code: geminiError?.code |
|
|
} |
|
|
throw errorObj |
|
|
} |
|
|
|
|
|
const errorObj = new Error(error.message) |
|
|
errorObj.status = 500 |
|
|
errorObj.error = { |
|
|
message: error.message, |
|
|
type: 'network_error' |
|
|
} |
|
|
throw errorObj |
|
|
} |
|
|
} |
|
|
|
|
|
module.exports = { |
|
|
sendGeminiRequest, |
|
|
getAvailableModels, |
|
|
convertMessagesToGemini, |
|
|
convertGeminiResponse, |
|
|
countTokens |
|
|
} |
|
|
|