Spaces:
Running
Running
File size: 7,517 Bytes
7df4949 a99425a 8a9ee13 a99425a 7df4949 a99425a 7df4949 a99425a 7df4949 71c46c8 a99425a 7df4949 a99425a 7df4949 a99425a 7df4949 a99425a 9a86204 9e7d11c bf51995 9e7d11c 7df4949 a99425a 7df4949 a629af4 7df4949 c02ca67 e320b50 a99425a 7df4949 c02ca67 7df4949 a99425a 7df4949 a99425a 7df4949 a99425a 7df4949 a99425a 7df4949 a99425a 9a86204 9e7d11c bf51995 9e7d11c 7df4949 09b65a7 a99425a ab4c596 09b65a7 77053f7 a99425a 09b65a7 a99425a 09b65a7 a99425a 66b4eb0 9e7d11c 9a86204 9e7d11c bf51995 9e7d11c a99425a 9e7d11c 7bf01d4 a99425a 09b65a7 a99425a ab4c596 09b65a7 77053f7 a99425a 3733446 a99425a 9e7d11c 9a86204 9e7d11c bf51995 9e7d11c a99425a 9e7d11c 7bf01d4 a99425a 09b65a7 7df4949 09b65a7 2552385 09b65a7 2552385 09b65a7 a99425a 9e7d11c 09b65a7 9e7d11c 09b65a7 9e7d11c 09b65a7 9e7d11c 1633b4e 9e7d11c 1633b4e 9e7d11c 7df4949 9e7d11c a99425a 7df4949 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 |
import { GoogleGenAI } from '@google/genai';
import fs from 'fs';
import path from 'path';
import mime from 'mime';
// Load prompts safely
const promptsPath = path.resolve('./prompts.json');
const prompts = JSON.parse(fs.readFileSync(promptsPath, 'utf8'));
// Initialize SDK
// Make sure process.env.GEMINI_API_KEY is set in your environment
const genAI = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
export const AIEngine = {
/**
* 1. PROJECT MANAGER (Reasoning & Delegation)
* Uses High-Reasoning Model
*/
callPM: async (history, input) => {
const modelId = 'gemini-3-pro-preview';
const config = {
thinkingConfig: { thinkingLevel: 'HIGH' },
tools: [{ googleSearch: {} }],
systemInstruction: {
parts: [{ text: prompts.pm_system_prompt }]
}
};
const contents = [
...history,
{ role: 'user', parts: [{ text: input }] }
];
try {
const response = await genAI.models.generateContent({
model: modelId,
config,
contents,
});
console.log(response.usageMetadata.total_token_count);
// Return both text and usage metadata
return {
text: response.text,
usage: response.usageMetadata
};
} catch (error) {
console.error("PM AI Error:", error);
throw error;
}
},
/**
* 2. WORKER (Coding & Execution)
* Uses Flash Model (Fast) + Image Support
*/
callWorker: async (history, input, images = []) => {
const modelId = "gemini-3-flash-preview"; // 'gemini-3-pro-preview';
const config = {
thinkingConfig: { thinkingLevel: 'HIGH' },
// tools: [{ googleSearch: {} }],
systemInstruction: {
parts: [{ text: prompts.worker_system_prompt }]
}
};
const currentParts = [{ text: input }];
// Handle Image Injection (Base64)
if (images && images.length > 0) {
images.forEach(base64String => {
// Strip prefix if present
const cleanData = base64String.replace(/^data:image\/\w+;base64,/, "");
currentParts.push({
inlineData: {
mimeType: "image/png",
data: cleanData
}
});
});
}
const contents = [
...history,
{ role: 'user', parts: currentParts }
];
try {
const response = await genAI.models.generateContent({
model: modelId,
config,
contents,
});
console.log(response.usageMetadata.total_token_count);
// Return both text and usage metadata
return {
text: response.text,
usage: response.usageMetadata
};
} catch (error) {
console.error("Worker AI Error:", error);
throw error;
}
},
/**
* 3. ONBOARDING ANALYST (Question Generation)
* Returns STRICT JSON for the Frontend
*/
generateEntryQuestions: async (description) => {
const modelId = "gemini-3-flash-preview"; // 'gemini-2.5-flash';
// Using the updated prompt which handles REJECTED/ACCEPTED logic
const input = `[MODE 1: QUESTIONS]\nAnalyze this game idea: "${description}". Check for TOS violations or nonsense. If good, ask 3 questions. Output ONLY raw JSON.`;
try {
const response = await genAI.models.generateContent({
model: modelId,
config: {
responseMimeType: "application/json",
systemInstruction: { parts: [{ text: prompts.analyst_system_prompt }] }
},
contents: [{ role: 'user', parts: [{ text: input }] }]
});
const text = response.text;
const parsed = JSON.parse(text);
console.log(response.usageMetadata.total_token_count)
// Attach usage to the JSON object
return {
...parsed,
usage: response.usageMetadata
};
} catch (e) {
console.error("Analyst Error:", e);
// On failure, we don't return usage, so no charge applies
// return { status: "ACCEPTED", questions: [{ id: "fallback", label: "Please describe the core gameplay loop in detail.", type: "textarea" }] };
throw e;
}
},
/**
* 4. PROJECT GRADER (Feasibility Check)
* Returns STRICT JSON
*/
gradeProject: async (description, answers) => {
const modelId = "gemini-3-flash-preview"; // 'gemini-2.5-flash';
// Using the updated prompt to respect Title and relaxed Grading
const input = `[MODE 2: GRADING]\nIdea: "${description}"\nUser Answers: ${JSON.stringify(answers)}\n\nAssess feasibility. Output JSON with title and rating.`;
try {
const response = await genAI.models.generateContent({
model: modelId,
config: {
responseMimeType: "application/json",
systemInstruction: { parts: [{ text: prompts.analyst_system_prompt }] }
},
contents: [{ role: 'user', parts: [{ text: input }] }]
});
const parsed = JSON.parse(response.text);
console.log(response.usageMetadata.total_token_count);
// Attach usage to the JSON object
return {
...parsed,
usage: response.usageMetadata
};
} catch (e) {
console.error("Grading Error:", e);
// On failure, no usage returned
// return { feasibility: 80, rating: "B", title: "Untitled Project", summary: "Standard project structure detected." };
throw e;
}
},
/**
* 5. IMAGE GENERATOR (Visual Assets)
* Uses Gemini 2.5 Flash Image with Stream (Correct Implementation)
*/
generateImage: async (prompt) => {
// Inject the prompt template from JSON to ensure adherence to instructions
const finalPrompt = prompts.image_gen_prompt.replace('{{DESCRIPTION}}', prompt);
const config = {
responseModalities: ['IMAGE', 'TEXT'],
};
const model = 'gemini-2.5-flash-image';
const contents = [
{
role: 'user',
parts: [{ text: finalPrompt }],
},
];
try {
const response = await genAI.models.generateContentStream({
model,
config,
contents,
});
let finalDataUrl = null;
for await (const chunk of response) {
if (!chunk.candidates || !chunk.candidates[0].content || !chunk.candidates[0].content.parts) {
continue;
}
// Capture image data if present
if (chunk.candidates?.[0]?.content?.parts?.[0]?.inlineData) {
const inlineData = chunk.candidates[0].content.parts[0].inlineData;
const rawB64 = (inlineData.data || "").replace(/\s+/g, "");
const mimeType = inlineData.mimeType || "image/png";
const buffer = Buffer.from(rawB64, "base64");
const base64 = buffer.toString("base64");
finalDataUrl = `data:${mimeType};base64,${base64}`;
// We do NOT return here immediately, we continue to allow the stream to finish
// so we can access the aggregated usage metadata at the end.
}
}
// Retrieve the full response object to get usage metadata
const aggregatedResponse = await response.response;
// console.log(aggregatedResponse.usageMetadata.total_token_count);
return {
image: finalDataUrl,
usage: 2000// aggregatedResponse.usageMetadata
};
} catch (error) {
console.error("Image Gen Error:", error);
// On failure, return null (logic in backend handles null as no-op)
return null;
}
}
}; |