import { TRANSCRIPTION_PROMPT } from '../../../prompts/transcription.js'; import { tryModels, getPrompt, DEFAULT_SAFETY_SETTINGS } from '@/backend/services/ai/utils'; export async function transcribe(media, mimeType, apiKey, isOwnApi = false) { const models = ['gemini-3-flash-preview', 'gemini-flash-lite-latest']; const systemPrompt = TRANSCRIPTION_PROMPT; return await tryModels(apiKey, models, async (ai, model) => { const response = await ai.models.generateContent({ model: model, contents: { parts: [ { inlineData: { data: media, mimeType } }, { text: "Transcribe accurately and completely. Do not skip any dialogue." } ] }, config: { temperature: 0.1, systemInstruction: systemPrompt, safetySettings: DEFAULT_SAFETY_SETTINGS, thinkingConfig: { thinkingBudget: 0 } } }); const text = response.text; if (!text || text.trim().length < 2) { throw new Error("EMPTY_RESPONSE_FROM_MODEL"); } return text; }); }