aibanking.dev / services /geminiService.ts
admin08077's picture
Upload 4 files
e253bc0 verified
raw
history blame
7.34 kB
import { GoogleGenAI, Type, Modality } from "@google/genai";
import { SimulationResult, AIInsight } from "../types/index";
// Direct initialization as per instructions
const getAI = () => new GoogleGenAI({ apiKey: process.env.API_KEY as string });
export { Type, Modality };
export const TTS_LANGUAGES = [
{ name: 'English', code: 'en' }, { name: 'French', code: 'fr' }, { name: 'German', code: 'de' },
{ name: 'Spanish', code: 'es' }, { name: 'Portuguese', code: 'pt' }, { name: 'Chinese', code: 'zh' },
{ name: 'Japanese', code: 'ja' }, { name: 'Korean', code: 'ko' }, { name: 'Hindi', code: 'hi' },
];
export const TTS_VOICES = [
{ name: 'Zephyr', style: 'Bright' }, { name: 'Puck', style: 'Upbeat' }, { name: 'Charon', style: 'Informative' },
{ name: 'Kore', style: 'Firm' }, { name: 'Fenrir', style: 'Excitable' }, { name: 'Leda', style: 'Youthful' }
];
function decodeBase64(base64: string) {
const binaryString = atob(base64);
const bytes = new Uint8Array(binaryString.length);
for (let i = 0; i < binaryString.length; i++) {
bytes[i] = binaryString.charCodeAt(i);
}
return bytes;
}
async function decodeAudioData(data: Uint8Array, ctx: AudioContext, sampleRate: number, numChannels: number): Promise<AudioBuffer> {
const byteLen = data.byteLength - (data.byteLength % 2);
const dataInt16 = new Int16Array(data.buffer, 0, byteLen / 2);
const frameCount = dataInt16.length / numChannels;
const buffer = ctx.createBuffer(numChannels, frameCount, sampleRate);
for (let channel = 0; channel < numChannels; channel++) {
const channelData = buffer.getChannelData(channel);
for (let i = 0; i < frameCount; i++) {
channelData[i] = dataInt16[i * numChannels + channel] / 32768.0;
}
}
return buffer;
}
let audioContext: AudioContext | null = null;
export const getAudioContext = () => {
if (!audioContext) {
audioContext = new (window.AudioContext || (window as any).webkitAudioContext)({ sampleRate: 24000 });
}
return audioContext;
};
// fix: Added support for multi-speaker synthesis and updated config type signature
export const synthesizeSpeech = async (config: {
text: string,
voiceName: string,
directorNotes?: string,
multiSpeaker?: { speaker1: string, voice1: string, speaker2: string, voice2: string }
}) => {
try {
const ai = getAI();
const promptText = config.directorNotes ? `${config.directorNotes} ${config.text}` : config.text;
// fix: Define speechConfig based on presence of multi-speaker configuration
const speechConfig: any = config.multiSpeaker ? {
multiSpeakerVoiceConfig: {
speakerVoiceConfigs: [
{
speaker: config.multiSpeaker.speaker1,
voiceConfig: { prebuiltVoiceConfig: { voiceName: config.multiSpeaker.voice1 } }
},
{
speaker: config.multiSpeaker.speaker2,
voiceConfig: { prebuiltVoiceConfig: { voiceName: config.multiSpeaker.voice2 } }
}
]
}
} : {
voiceConfig: { prebuiltVoiceConfig: { voiceName: config.voiceName } }
};
const response = await ai.models.generateContent({
model: "gemini-2.5-flash-preview-tts",
contents: [{ parts: [{ text: promptText }] }],
config: {
responseModalities: [Modality.AUDIO],
speechConfig
}
});
const base64Audio = response.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
if (base64Audio) {
const ctx = getAudioContext();
if (ctx.state === 'suspended') await ctx.resume();
const audioBuffer = await decodeAudioData(decodeBase64(base64Audio), ctx, 24000, 1);
const source = ctx.createBufferSource();
source.buffer = audioBuffer;
source.connect(ctx.destination);
source.start();
return true;
}
} catch (error) {
console.error("Advanced Synthesis failure:", error);
}
return false;
};
export const speakText = async (text: string) => synthesizeSpeech({ text, voiceName: 'Zephyr' });
export const callGemini = async (model: string, contents: any, config: any = {}) => {
const ai = getAI();
const normalizedContents = typeof contents === 'string' ? [{ parts: [{ text: contents }] }] :
(Array.isArray(contents) ? contents : [contents]);
return await ai.models.generateContent({
model: model || 'gemini-3-flash-preview',
contents: normalizedContents,
config
});
};
export const processVoiceCommand = async (command: string) => {
try {
const prompt = `You are the Lumina Neural Parser. Analyze: "${command}". Extract amount, recipient, category. Return ONLY JSON: { "action": "SEND_MONEY", "amount": number, "recipient": string, "category": string, "narration": "Confirming dispatch..." }`;
const response = await callGemini('gemini-3-flash-preview', prompt, { responseMimeType: "application/json" });
return JSON.parse(response.text || '{}');
} catch (error) {
return { action: "ERROR", narration: "Communication link unstable." };
}
};
export const getFinancialAdviceStream = async (query: string, context: any) => {
const ai = getAI();
return await ai.models.generateContentStream({
model: 'gemini-3-flash-preview',
contents: [{ parts: [{ text: `Context: ${JSON.stringify(context)}. User Query: ${query}` }] }],
config: { systemInstruction: "You are the Lumina Quantum Financial Advisor. Be professional, concise, and technically accurate." }
});
};
// fix: Implemented getSystemIntelligenceFeed missing in services/geminiService.ts
export const getSystemIntelligenceFeed = async (): Promise<AIInsight[]> => {
try {
const ai = getAI();
const response = await ai.models.generateContent({
model: 'gemini-3-flash-preview',
contents: [{ parts: [{ text: "Generate 4 brief institutional financial intelligence alerts for a quantum ledger. Format as JSON array: [{title, description, severity: 'INFO'|'CRITICAL'}]" }] }],
config: { responseMimeType: "application/json" }
});
return JSON.parse(response.text || '[]');
} catch (error) {
console.error("Intelligence feed failure:", error);
return [
{ id: '1', title: "Node Sync Active", description: "All global registry nodes reporting stable parity.", severity: "INFO" }
];
}
};
export const runSimulationForecast = async (prompt: string): Promise<SimulationResult> => {
try {
const ai = getAI();
const response = await ai.models.generateContent({
model: 'gemini-3-flash-preview',
contents: [{ parts: [{ text: `Perform financial simulation for: ${prompt}. Return JSON.` }] }],
config: { responseMimeType: "application/json" }
});
return JSON.parse(response.text || '{}');
} catch (error) {
return { outcomeNarrative: "Simulation failed.", projectedValue: 0, confidenceScore: 0, status: "ERROR", simulationId: "ERR_A1" };
}
};
export const getPortfolioSuggestions = async (context: any) => {
try {
const ai = getAI();
const response = await ai.models.generateContent({
model: 'gemini-3-flash-preview',
contents: [{ parts: [{ text: `Strategize for: ${JSON.stringify(context)}. Return 3 strategies as JSON array.` }] }],
config: { responseMimeType: "application/json" }
});
return JSON.parse(response.text || '[]');
} catch {
return [];
}
};