Spaces:
Running
Running
Update aiEngine.js
Browse files- aiEngine.js +102 -33
aiEngine.js
CHANGED
|
@@ -1,26 +1,42 @@
|
|
| 1 |
import { GoogleGenAI } from '@google/genai';
|
| 2 |
import fs from 'fs';
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
// Initialize SDK
|
|
|
|
| 5 |
const genAI = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
|
| 6 |
-
const prompts = JSON.parse(fs.readFileSync('./prompts.json', 'utf8'));
|
| 7 |
|
| 8 |
export const AIEngine = {
|
| 9 |
/**
|
| 10 |
* 1. PROJECT MANAGER (Reasoning & Delegation)
|
|
|
|
| 11 |
*/
|
| 12 |
callPM: async (history, input) => {
|
| 13 |
-
const modelId = 'gemini-
|
|
|
|
| 14 |
const config = {
|
| 15 |
thinkingConfig: { thinkingLevel: 'HIGH' },
|
| 16 |
tools: [{ googleSearch: {} }],
|
| 17 |
-
systemInstruction: {
|
|
|
|
|
|
|
| 18 |
};
|
| 19 |
|
| 20 |
-
const contents = [
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
try {
|
| 23 |
-
const response = await genAI.models.generateContent({
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
return response.text();
|
| 25 |
} catch (error) {
|
| 26 |
console.error("PM AI Error:", error);
|
|
@@ -30,38 +46,45 @@ export const AIEngine = {
|
|
| 30 |
|
| 31 |
/**
|
| 32 |
* 2. WORKER (Coding & Execution)
|
|
|
|
| 33 |
*/
|
| 34 |
callWorker: async (history, input, images = []) => {
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
tools: [{ googleSearch: {} }],
|
| 38 |
-
systemInstruction: { parts: [{ text: prompts.worker_system_prompt }] }
|
| 39 |
-
};
|
| 40 |
-
*/
|
| 41 |
-
const modelId = 'gemini-3-pro-preview';
|
| 42 |
const config = {
|
| 43 |
-
thinkingConfig: { thinkingLevel: 'HIGH' },
|
| 44 |
tools: [{ googleSearch: {} }],
|
| 45 |
-
systemInstruction: {
|
|
|
|
|
|
|
| 46 |
};
|
| 47 |
-
|
| 48 |
|
| 49 |
const currentParts = [{ text: input }];
|
| 50 |
-
|
| 51 |
-
//
|
| 52 |
if (images && images.length > 0) {
|
| 53 |
images.forEach(base64String => {
|
|
|
|
| 54 |
const cleanData = base64String.replace(/^data:image\/\w+;base64,/, "");
|
| 55 |
currentParts.push({
|
| 56 |
-
inlineData: {
|
|
|
|
|
|
|
|
|
|
| 57 |
});
|
| 58 |
});
|
| 59 |
}
|
| 60 |
|
| 61 |
-
const contents = [
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
try {
|
| 64 |
-
const response = await genAI.models.generateContent({
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
return response.text();
|
| 66 |
} catch (error) {
|
| 67 |
console.error("Worker AI Error:", error);
|
|
@@ -70,11 +93,62 @@ export const AIEngine = {
|
|
| 70 |
},
|
| 71 |
|
| 72 |
/**
|
| 73 |
-
* 3.
|
| 74 |
-
*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
*/
|
| 76 |
generateImage: async (prompt) => {
|
| 77 |
-
const modelId = '
|
| 78 |
|
| 79 |
try {
|
| 80 |
console.log(`[AIEngine] Generating Image for: "${prompt}"...`);
|
|
@@ -83,23 +157,18 @@ export const AIEngine = {
|
|
| 83 |
prompt: prompt,
|
| 84 |
config: {
|
| 85 |
numberOfImages: 1,
|
| 86 |
-
|
| 87 |
-
outputMimeType: "image/png"
|
| 88 |
}
|
| 89 |
});
|
| 90 |
|
| 91 |
-
// The SDK returns the image data directly in the response
|
| 92 |
const image = response.generatedImages[0];
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
throw new Error("No image data received from Imagen.");
|
| 96 |
-
}
|
| 97 |
-
console.log("Yay, image generated")
|
| 98 |
-
// Return Base64 string
|
| 99 |
return image.image.toString('base64');
|
| 100 |
} catch (error) {
|
| 101 |
console.error("Image Gen Error:", error);
|
| 102 |
-
return null;
|
| 103 |
}
|
| 104 |
}
|
| 105 |
};
|
|
|
|
| 1 |
import { GoogleGenAI } from '@google/genai';
|
| 2 |
import fs from 'fs';
|
| 3 |
+
import path from 'path';
|
| 4 |
+
|
| 5 |
+
// Load prompts safely
|
| 6 |
+
const promptsPath = path.resolve('./prompts.json');
|
| 7 |
+
const prompts = JSON.parse(fs.readFileSync(promptsPath, 'utf8'));
|
| 8 |
|
| 9 |
// Initialize SDK
|
| 10 |
+
// Make sure process.env.GEMINI_API_KEY is set in your environment
|
| 11 |
const genAI = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
|
|
|
|
| 12 |
|
| 13 |
export const AIEngine = {
|
| 14 |
/**
|
| 15 |
* 1. PROJECT MANAGER (Reasoning & Delegation)
|
| 16 |
+
* Uses High-Reasoning Model
|
| 17 |
*/
|
| 18 |
callPM: async (history, input) => {
|
| 19 |
+
const modelId = 'gemini-2.0-pro-exp-02-05';
|
| 20 |
+
|
| 21 |
const config = {
|
| 22 |
thinkingConfig: { thinkingLevel: 'HIGH' },
|
| 23 |
tools: [{ googleSearch: {} }],
|
| 24 |
+
systemInstruction: {
|
| 25 |
+
parts: [{ text: prompts.pm_system_prompt }]
|
| 26 |
+
}
|
| 27 |
};
|
| 28 |
|
| 29 |
+
const contents = [
|
| 30 |
+
...history,
|
| 31 |
+
{ role: 'user', parts: [{ text: input }] }
|
| 32 |
+
];
|
| 33 |
|
| 34 |
try {
|
| 35 |
+
const response = await genAI.models.generateContent({
|
| 36 |
+
model: modelId,
|
| 37 |
+
config,
|
| 38 |
+
contents,
|
| 39 |
+
});
|
| 40 |
return response.text();
|
| 41 |
} catch (error) {
|
| 42 |
console.error("PM AI Error:", error);
|
|
|
|
| 46 |
|
| 47 |
/**
|
| 48 |
* 2. WORKER (Coding & Execution)
|
| 49 |
+
* Uses Flash Model (Fast) + Image Support
|
| 50 |
*/
|
| 51 |
callWorker: async (history, input, images = []) => {
|
| 52 |
+
const modelId = 'gemini-2.0-flash';
|
| 53 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
const config = {
|
|
|
|
| 55 |
tools: [{ googleSearch: {} }],
|
| 56 |
+
systemInstruction: {
|
| 57 |
+
parts: [{ text: prompts.worker_system_prompt }]
|
| 58 |
+
}
|
| 59 |
};
|
|
|
|
| 60 |
|
| 61 |
const currentParts = [{ text: input }];
|
| 62 |
+
|
| 63 |
+
// Handle Image Injection (Base64)
|
| 64 |
if (images && images.length > 0) {
|
| 65 |
images.forEach(base64String => {
|
| 66 |
+
// Strip prefix if present
|
| 67 |
const cleanData = base64String.replace(/^data:image\/\w+;base64,/, "");
|
| 68 |
currentParts.push({
|
| 69 |
+
inlineData: {
|
| 70 |
+
mimeType: "image/png",
|
| 71 |
+
data: cleanData
|
| 72 |
+
}
|
| 73 |
});
|
| 74 |
});
|
| 75 |
}
|
| 76 |
|
| 77 |
+
const contents = [
|
| 78 |
+
...history,
|
| 79 |
+
{ role: 'user', parts: currentParts }
|
| 80 |
+
];
|
| 81 |
|
| 82 |
try {
|
| 83 |
+
const response = await genAI.models.generateContent({
|
| 84 |
+
model: modelId,
|
| 85 |
+
config,
|
| 86 |
+
contents,
|
| 87 |
+
});
|
| 88 |
return response.text();
|
| 89 |
} catch (error) {
|
| 90 |
console.error("Worker AI Error:", error);
|
|
|
|
| 93 |
},
|
| 94 |
|
| 95 |
/**
|
| 96 |
+
* 3. ONBOARDING ANALYST (Question Generation)
|
| 97 |
+
* Returns STRICT JSON for the Frontend
|
| 98 |
+
*/
|
| 99 |
+
generateEntryQuestions: async (description) => {
|
| 100 |
+
const modelId = 'gemini-2.0-flash';
|
| 101 |
+
const input = `[MODE 1: QUESTIONS]\nAnalyze this game idea: "${description}".\nGenerate 3 crucial technical questions to clarify scope. Output ONLY raw JSON array.`;
|
| 102 |
+
|
| 103 |
+
try {
|
| 104 |
+
const response = await genAI.models.generateContent({
|
| 105 |
+
model: modelId,
|
| 106 |
+
config: {
|
| 107 |
+
responseMimeType: "application/json",
|
| 108 |
+
systemInstruction: { parts: [{ text: prompts.analyst_system_prompt }] }
|
| 109 |
+
},
|
| 110 |
+
contents: [{ role: 'user', parts: [{ text: input }] }]
|
| 111 |
+
});
|
| 112 |
+
|
| 113 |
+
const text = response.text();
|
| 114 |
+
return JSON.parse(text);
|
| 115 |
+
} catch (e) {
|
| 116 |
+
console.error("Analyst Error:", e);
|
| 117 |
+
// Fallback to prevent frontend crash
|
| 118 |
+
return [{ id: "fallback", label: "Please describe the core gameplay loop in detail.", type: "textarea" }];
|
| 119 |
+
}
|
| 120 |
+
},
|
| 121 |
+
|
| 122 |
+
/**
|
| 123 |
+
* 4. PROJECT GRADER (Feasibility Check)
|
| 124 |
+
* Returns STRICT JSON
|
| 125 |
+
*/
|
| 126 |
+
gradeProject: async (description, answers) => {
|
| 127 |
+
const modelId = 'gemini-2.0-flash';
|
| 128 |
+
const input = `[MODE 2: GRADING]\nIdea: "${description}"\nUser Answers: ${JSON.stringify(answers)}\n\nAssess feasibility for an AI Builder. Output ONLY raw JSON.`;
|
| 129 |
+
|
| 130 |
+
try {
|
| 131 |
+
const response = await genAI.models.generateContent({
|
| 132 |
+
model: modelId,
|
| 133 |
+
config: {
|
| 134 |
+
responseMimeType: "application/json",
|
| 135 |
+
systemInstruction: { parts: [{ text: prompts.analyst_system_prompt }] }
|
| 136 |
+
},
|
| 137 |
+
contents: [{ role: 'user', parts: [{ text: input }] }]
|
| 138 |
+
});
|
| 139 |
+
return JSON.parse(response.text());
|
| 140 |
+
} catch (e) {
|
| 141 |
+
console.error("Grading Error:", e);
|
| 142 |
+
return { feasibility: 80, rating: "B", summary: "Standard project structure detected." };
|
| 143 |
+
}
|
| 144 |
+
},
|
| 145 |
+
|
| 146 |
+
/**
|
| 147 |
+
* 5. IMAGE GENERATOR (Visual Assets)
|
| 148 |
+
* Uses Imagen 3
|
| 149 |
*/
|
| 150 |
generateImage: async (prompt) => {
|
| 151 |
+
const modelId = 'imagen-3.0-generate-001';
|
| 152 |
|
| 153 |
try {
|
| 154 |
console.log(`[AIEngine] Generating Image for: "${prompt}"...`);
|
|
|
|
| 157 |
prompt: prompt,
|
| 158 |
config: {
|
| 159 |
numberOfImages: 1,
|
| 160 |
+
aspectRatio: "16:9", // Widescreen for thumbnails
|
| 161 |
+
outputMimeType: "image/png"
|
| 162 |
}
|
| 163 |
});
|
| 164 |
|
|
|
|
| 165 |
const image = response.generatedImages[0];
|
| 166 |
+
if (!image || !image.image) throw new Error("No image data returned");
|
| 167 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
return image.image.toString('base64');
|
| 169 |
} catch (error) {
|
| 170 |
console.error("Image Gen Error:", error);
|
| 171 |
+
return null;
|
| 172 |
}
|
| 173 |
}
|
| 174 |
};
|