Spaces:
Running
Running
| import { GoogleGenAI } from '@google/genai'; | |
| import fs from 'fs'; | |
| import path from 'path'; | |
| import mime from 'mime'; | |
| // Load prompts safely | |
| const promptsPath = path.resolve('./prompts.json'); | |
| const prompts = JSON.parse(fs.readFileSync(promptsPath, 'utf8')); | |
| // Initialize SDK | |
| // Make sure process.env.GEMINI_API_KEY is set in your environment | |
| const genAI = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); | |
| export const AIEngine = { | |
| /** | |
| * 1. PROJECT MANAGER (Reasoning & Delegation) | |
| * Uses High-Reasoning Model | |
| */ | |
| callPM: async (history, input) => { | |
| const modelId = 'gemini-3-pro-preview'; | |
| const config = { | |
| thinkingConfig: { thinkingLevel: 'HIGH' }, | |
| tools: [{ googleSearch: {} }], | |
| systemInstruction: { | |
| parts: [{ text: prompts.pm_system_prompt }] | |
| } | |
| }; | |
| const contents = [ | |
| ...history, | |
| { role: 'user', parts: [{ text: input }] } | |
| ]; | |
| try { | |
| const response = await genAI.models.generateContent({ | |
| model: modelId, | |
| config, | |
| contents, | |
| }); | |
| console.log(response.usageMetadata.total_token_count); | |
| // Return both text and usage metadata | |
| return { | |
| text: response.text, | |
| usage: response.usageMetadata | |
| }; | |
| } catch (error) { | |
| console.error("PM AI Error:", error); | |
| throw error; | |
| } | |
| }, | |
| /** | |
| * 2. WORKER (Coding & Execution) | |
| * Uses Flash Model (Fast) + Image Support | |
| */ | |
| callWorker: async (history, input, images = []) => { | |
| const modelId = "gemini-3-flash-preview"; // 'gemini-3-pro-preview'; | |
| const config = { | |
| thinkingConfig: { thinkingLevel: 'HIGH' }, | |
| // tools: [{ googleSearch: {} }], | |
| systemInstruction: { | |
| parts: [{ text: prompts.worker_system_prompt }] | |
| } | |
| }; | |
| const currentParts = [{ text: input }]; | |
| // Handle Image Injection (Base64) | |
| if (images && images.length > 0) { | |
| images.forEach(base64String => { | |
| // Strip prefix if present | |
| const cleanData = base64String.replace(/^data:image\/\w+;base64,/, ""); | |
| currentParts.push({ | |
| inlineData: { | |
| mimeType: "image/png", | |
| data: cleanData | |
| } | |
| }); | |
| }); | |
| } | |
| const contents = [ | |
| ...history, | |
| { role: 'user', parts: currentParts } | |
| ]; | |
| try { | |
| const response = await genAI.models.generateContent({ | |
| model: modelId, | |
| config, | |
| contents, | |
| }); | |
| console.log(response.usageMetadata.total_token_count); | |
| // Return both text and usage metadata | |
| return { | |
| text: response.text, | |
| usage: response.usageMetadata | |
| }; | |
| } catch (error) { | |
| console.error("Worker AI Error:", error); | |
| throw error; | |
| } | |
| }, | |
| /** | |
| * 3. ONBOARDING ANALYST (Question Generation) | |
| * Returns STRICT JSON for the Frontend | |
| */ | |
| generateEntryQuestions: async (description) => { | |
| const modelId = "gemini-3-flash-preview"; // 'gemini-2.5-flash'; | |
| // Using the updated prompt which handles REJECTED/ACCEPTED logic | |
| const input = `[MODE 1: QUESTIONS]\nAnalyze this game idea: "${description}". Check for TOS violations or nonsense. If good, ask 3 questions. Output ONLY raw JSON.`; | |
| try { | |
| const response = await genAI.models.generateContent({ | |
| model: modelId, | |
| config: { | |
| responseMimeType: "application/json", | |
| systemInstruction: { parts: [{ text: prompts.analyst_system_prompt }] } | |
| }, | |
| contents: [{ role: 'user', parts: [{ text: input }] }] | |
| }); | |
| const text = response.text; | |
| const parsed = JSON.parse(text); | |
| console.log(response.usageMetadata.total_token_count) | |
| // Attach usage to the JSON object | |
| return { | |
| ...parsed, | |
| usage: response.usageMetadata | |
| }; | |
| } catch (e) { | |
| console.error("Analyst Error:", e); | |
| // On failure, we don't return usage, so no charge applies | |
| // return { status: "ACCEPTED", questions: [{ id: "fallback", label: "Please describe the core gameplay loop in detail.", type: "textarea" }] }; | |
| throw e; | |
| } | |
| }, | |
| /** | |
| * 4. PROJECT GRADER (Feasibility Check) | |
| * Returns STRICT JSON | |
| */ | |
| gradeProject: async (description, answers) => { | |
| const modelId = "gemini-3-flash-preview"; // 'gemini-2.5-flash'; | |
| // Using the updated prompt to respect Title and relaxed Grading | |
| const input = `[MODE 2: GRADING]\nIdea: "${description}"\nUser Answers: ${JSON.stringify(answers)}\n\nAssess feasibility. Output JSON with title and rating.`; | |
| try { | |
| const response = await genAI.models.generateContent({ | |
| model: modelId, | |
| config: { | |
| responseMimeType: "application/json", | |
| systemInstruction: { parts: [{ text: prompts.analyst_system_prompt }] } | |
| }, | |
| contents: [{ role: 'user', parts: [{ text: input }] }] | |
| }); | |
| const parsed = JSON.parse(response.text); | |
| console.log(response.usageMetadata.total_token_count); | |
| // Attach usage to the JSON object | |
| return { | |
| ...parsed, | |
| usage: response.usageMetadata | |
| }; | |
| } catch (e) { | |
| console.error("Grading Error:", e); | |
| // On failure, no usage returned | |
| // return { feasibility: 80, rating: "B", title: "Untitled Project", summary: "Standard project structure detected." }; | |
| throw e; | |
| } | |
| }, | |
| /** | |
| * 5. IMAGE GENERATOR (Visual Assets) | |
| * Uses Gemini 2.5 Flash Image with Stream (Correct Implementation) | |
| */ | |
| generateImage: async (prompt) => { | |
| // Inject the prompt template from JSON to ensure adherence to instructions | |
| const finalPrompt = prompts.image_gen_prompt.replace('{{DESCRIPTION}}', prompt); | |
| const config = { | |
| responseModalities: ['IMAGE', 'TEXT'], | |
| }; | |
| const model = 'gemini-2.5-flash-image'; | |
| const contents = [ | |
| { | |
| role: 'user', | |
| parts: [{ text: finalPrompt }], | |
| }, | |
| ]; | |
| try { | |
| const response = await genAI.models.generateContentStream({ | |
| model, | |
| config, | |
| contents, | |
| }); | |
| let finalDataUrl = null; | |
| for await (const chunk of response) { | |
| if (!chunk.candidates || !chunk.candidates[0].content || !chunk.candidates[0].content.parts) { | |
| continue; | |
| } | |
| // Capture image data if present | |
| if (chunk.candidates?.[0]?.content?.parts?.[0]?.inlineData) { | |
| const inlineData = chunk.candidates[0].content.parts[0].inlineData; | |
| const rawB64 = (inlineData.data || "").replace(/\s+/g, ""); | |
| const mimeType = inlineData.mimeType || "image/png"; | |
| const buffer = Buffer.from(rawB64, "base64"); | |
| const base64 = buffer.toString("base64"); | |
| finalDataUrl = `data:${mimeType};base64,${base64}`; | |
| // We do NOT return here immediately, we continue to allow the stream to finish | |
| // so we can access the aggregated usage metadata at the end. | |
| } | |
| } | |
| // Retrieve the full response object to get usage metadata | |
| const aggregatedResponse = await response.response; | |
| // console.log(aggregatedResponse.usageMetadata.total_token_count); | |
| return { | |
| image: finalDataUrl, | |
| usage: 2000// aggregatedResponse.usageMetadata | |
| }; | |
| } catch (error) { | |
| console.error("Image Gen Error:", error); | |
| // On failure, return null (logic in backend handles null as no-op) | |
| return null; | |
| } | |
| } | |
| }; |