| | import { GoogleGenAI } from '@google/genai'; |
| | import fs from 'fs'; |
| | import path from 'path'; |
| | import mime from 'mime'; |
| |
|
| | |
| | const promptsPath = path.resolve('./prompts.json'); |
| | const prompts = JSON.parse(fs.readFileSync(promptsPath, 'utf8')); |
| |
|
| | |
| | |
| | const genAI = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); |
| |
|
| | export const AIEngine = { |
| | |
| | |
| | |
| | |
| | callPM: async (history, input) => { |
| | const modelId = 'gemini-3-pro-preview'; |
| | |
| | const config = { |
| | thinkingConfig: { thinkingLevel: 'HIGH' }, |
| | tools: [{ googleSearch: {} }], |
| | systemInstruction: { |
| | parts: [{ text: prompts.pm_system_prompt }] |
| | } |
| | }; |
| |
|
| | const contents = [ |
| | ...history, |
| | { role: 'user', parts: [{ text: input }] } |
| | ]; |
| |
|
| | try { |
| | const response = await genAI.models.generateContent({ |
| | model: modelId, |
| | config, |
| | contents, |
| | }); |
| |
|
| | console.log(response.usageMetadata.total_token_count); |
| | |
| | |
| | return { |
| | text: response.text, |
| | usage: response.usageMetadata |
| | }; |
| |
|
| | } catch (error) { |
| | console.error("PM AI Error:", error); |
| | throw error; |
| | } |
| | }, |
| |
|
| | |
| | |
| | |
| | |
| | callWorker: async (history, input, images = []) => { |
| | const modelId = "gemini-3-flash-preview"; |
| | const config = { |
| | thinkingConfig: { thinkingLevel: 'HIGH' }, |
| | tools: [{ googleSearch: {} }], |
| | systemInstruction: { |
| | parts: [{ text: prompts.worker_system_prompt }] |
| | } |
| | }; |
| | |
| |
|
| | const currentParts = [{ text: input }]; |
| |
|
| | |
| | if (images && images.length > 0) { |
| | images.forEach(base64String => { |
| | |
| | const cleanData = base64String.replace(/^data:image\/\w+;base64,/, ""); |
| | currentParts.push({ |
| | inlineData: { |
| | mimeType: "image/png", |
| | data: cleanData |
| | } |
| | }); |
| | }); |
| | } |
| |
|
| | const contents = [ |
| | ...history, |
| | { role: 'user', parts: currentParts } |
| | ]; |
| |
|
| | try { |
| | const response = await genAI.models.generateContent({ |
| | model: modelId, |
| | config, |
| | contents, |
| | }); |
| | |
| | console.log(response.usageMetadata.total_token_count); |
| | |
| | |
| | return { |
| | text: response.text, |
| | usage: response.usageMetadata |
| | }; |
| |
|
| | } catch (error) { |
| | console.error("Worker AI Error:", error); |
| | throw error; |
| | } |
| | }, |
| |
|
| | |
| | |
| | |
| | |
| | generateEntryQuestions: async (description) => { |
| | const modelId = "gemini-3-flash-preview"; |
| | |
| | const input = `[MODE 1: QUESTIONS]\nAnalyze this game idea: "${description}". Check for TOS violations or nonsense. If good, ask 3 questions. Output ONLY raw JSON.`; |
| | |
| | try { |
| | const response = await genAI.models.generateContent({ |
| | model: modelId, |
| | config: { |
| | responseMimeType: "application/json", |
| | systemInstruction: { parts: [{ text: prompts.analyst_system_prompt }] } |
| | }, |
| | contents: [{ role: 'user', parts: [{ text: input }] }] |
| | }); |
| | |
| | const text = response.text; |
| | const parsed = JSON.parse(text); |
| |
|
| | console.log(response.usageMetadata.total_token_count) |
| | |
| | |
| | return { |
| | ...parsed, |
| | usage: response.usageMetadata |
| | }; |
| |
|
| | } catch (e) { |
| | console.error("Analyst Error:", e); |
| | |
| | |
| | throw e; |
| | } |
| | }, |
| |
|
| | |
| | |
| | |
| | |
| | gradeProject: async (description, answers) => { |
| | const modelId = "gemini-3-flash-preview"; |
| | |
| | const input = `[MODE 2: GRADING]\nIdea: "${description}"\nUser Answers: ${JSON.stringify(answers)}\n\nAssess feasibility. Output JSON with title and rating.`; |
| |
|
| | try { |
| | const response = await genAI.models.generateContent({ |
| | model: modelId, |
| | config: { |
| | responseMimeType: "application/json", |
| | systemInstruction: { parts: [{ text: prompts.analyst_system_prompt }] } |
| | }, |
| | contents: [{ role: 'user', parts: [{ text: input }] }] |
| | }); |
| | |
| | const parsed = JSON.parse(response.text); |
| | console.log(response.usageMetadata.total_token_count); |
| | |
| | |
| | return { |
| | ...parsed, |
| | usage: response.usageMetadata |
| | }; |
| |
|
| | } catch (e) { |
| | console.error("Grading Error:", e); |
| | |
| | |
| | throw e; |
| | } |
| | }, |
| |
|
| | |
| | |
| | |
| | |
| | generateImage: async (prompt) => { |
| | |
| | const finalPrompt = prompts.image_gen_prompt.replace('{{DESCRIPTION}}', prompt); |
| |
|
| | const config = { |
| | responseModalities: ['IMAGE', 'TEXT'], |
| | }; |
| | const model = 'gemini-2.5-flash-image'; |
| | const contents = [ |
| | { |
| | role: 'user', |
| | parts: [{ text: finalPrompt }], |
| | }, |
| | ]; |
| |
|
| | try { |
| | const response = await genAI.models.generateContentStream({ |
| | model, |
| | config, |
| | contents, |
| | }); |
| |
|
| | let finalDataUrl = null; |
| |
|
| | for await (const chunk of response) { |
| | if (!chunk.candidates || !chunk.candidates[0].content || !chunk.candidates[0].content.parts) { |
| | continue; |
| | } |
| | |
| | |
| | if (chunk.candidates?.[0]?.content?.parts?.[0]?.inlineData) { |
| | const inlineData = chunk.candidates[0].content.parts[0].inlineData; |
| | const rawB64 = (inlineData.data || "").replace(/\s+/g, ""); |
| | const mimeType = inlineData.mimeType || "image/png"; |
| | const buffer = Buffer.from(rawB64, "base64"); |
| | const base64 = buffer.toString("base64"); |
| | |
| | finalDataUrl = `data:${mimeType};base64,${base64}`; |
| | |
| | |
| | } |
| | } |
| | |
| | |
| | const aggregatedResponse = await response.response; |
| | |
| | return { |
| | image: finalDataUrl, |
| | usage: 2000 |
| | }; |
| |
|
| | } catch (error) { |
| | console.error("Image Gen Error:", error); |
| | |
| | return null; |
| | } |
| | } |
| | }; |