everydaycats commited on
Commit
09b65a7
·
verified ·
1 Parent(s): 557b22a

Update aiEngine.js

Browse files
Files changed (1) hide show
  1. aiEngine.js +40 -85
aiEngine.js CHANGED
@@ -95,26 +95,18 @@ export const AIEngine = {
95
  },
96
 
97
  /**
98
- * 3. ONBOARDING ANALYST (Question Generation + Gatekeeping)
99
- * Returns STRICT JSON for the Frontend.
100
- * Can return { status: "REJECTED", ... } or { status: "ACCEPTED", questions: ... }
101
  */
102
  generateEntryQuestions: async (description) => {
103
- const modelId = 'gemini-flash-latest';
104
- // Updated prompt to enforce Gatekeeping (TOS/Nonsense check)
105
  const input = `[MODE 1: QUESTIONS]\nAnalyze this game idea: "${description}". Check for TOS violations or nonsense. If good, ask 3 questions. Output ONLY raw JSON.`;
106
 
 
107
  const response = await genAI.models.generateContent({
108
  model: modelId,
109
-
110
-
111
-
112
- config: {
113
-
114
- /*thinkingConfig: {
115
- thinkingBudget: -1,
116
- },
117
- */
118
  responseMimeType: "application/json",
119
  systemInstruction: { parts: [{ text: prompts.analyst_system_prompt }] }
120
  },
@@ -125,32 +117,23 @@ export const AIEngine = {
125
  return JSON.parse(text);
126
  } catch (e) {
127
  console.error("Analyst Error:", e);
128
- // Fallback to prevent frontend crash
129
  return { status: "ACCEPTED", questions: [{ id: "fallback", label: "Please describe the core gameplay loop in detail.", type: "textarea" }] };
130
  }
131
  },
132
 
133
  /**
134
- * 4. PROJECT GRADER (Feasibility Check + Title)
135
  * Returns STRICT JSON
136
  */
137
  gradeProject: async (description, answers) => {
138
- const modelId = 'gemini-flash-latest';
139
- // Updated prompt to ask for Title and Rating
140
  const input = `[MODE 2: GRADING]\nIdea: "${description}"\nUser Answers: ${JSON.stringify(answers)}\n\nAssess feasibility. Output JSON with title and rating.`;
141
 
142
  try {
143
  const response = await genAI.models.generateContent({
144
  model: modelId,
145
-
146
-
147
-
148
  config: {
149
-
150
- /* thinkingConfig: {
151
- thinkingBudget: -1,
152
- },
153
- */
154
  responseMimeType: "application/json",
155
  systemInstruction: { parts: [{ text: prompts.analyst_system_prompt }] }
156
  },
@@ -165,70 +148,42 @@ export const AIEngine = {
165
 
166
  /**
167
  * 5. IMAGE GENERATOR (Visual Assets)
168
- * Uses Imagen 3
169
  */
170
  generateImage: async (prompt) => {
171
- const modelId = 'gemini-2.5-flash-image';
172
-
173
- try {
174
-
175
- const config = {
176
- responseModalities: [
177
- 'IMAGE',
178
- 'TEXT',
179
- ],
180
- };
181
- const model = 'gemini-2.5-flash-image';
182
- const contents = [
183
- {
184
- role: 'user',
185
- parts: [
186
- {
187
- text: prompt,
188
- },
189
- ],
190
- },
191
- ];
192
-
193
- const response = await genAI.models.generateContentStream({
194
- // const response = await genAI.models.generateContent({
195
-
196
- model,
197
- config,
198
- contents,
199
- });
200
- let fileIndex = 0;
201
- for await (const chunk of response) {
202
- if (!chunk.candidates || !chunk.candidates[0].content || !chunk.candidates[0].content.parts) {
203
- continue;
204
- }
205
- if (chunk.candidates?.[0]?.content?.parts?.[0]?.inlineData) {
206
-
207
- // inlineData is the object from the model: { mimeType: 'image/png', data: '...base64...' }
208
- const inlineData = chunk.candidates[0].content.parts[0].inlineData;
209
- const rawB64 = (inlineData.data || "").replace(/\s+/g, ""); // remove whitespace/newlines just in case
210
- const mimeType = inlineData.mimeType || "image/png";
211
-
212
- // ensure it's a Buffer (optional, for validation)
213
- const buffer = Buffer.from(rawB64, "base64");
214
 
215
- // produce a canonical base64 string (this also validates)
216
- const base64 = buffer.toString("base64");
217
-
218
- // build a browser-friendly data URL and return it
219
- const dataUrl = `data:${mimeType};base64,${base64}`;
220
- return dataUrl;
 
 
 
 
221
 
222
-
223
- // Here
 
 
 
 
224
 
225
- }
226
- else {
227
- console.log(chunk.text);
228
- }
229
- }
230
-
231
- // return image.image.toString('base64');
 
 
 
 
 
232
  } catch (error) {
233
  console.error("Image Gen Error:", error);
234
  return null;
 
95
  },
96
 
97
  /**
98
+ * 3. ONBOARDING ANALYST (Question Generation)
99
+ * Returns STRICT JSON for the Frontend
 
100
  */
101
  generateEntryQuestions: async (description) => {
102
+ const modelId = 'gemini-2.5-flash';
103
+ // Using the updated prompt which handles REJECTED/ACCEPTED logic
104
  const input = `[MODE 1: QUESTIONS]\nAnalyze this game idea: "${description}". Check for TOS violations or nonsense. If good, ask 3 questions. Output ONLY raw JSON.`;
105
 
106
+ try {
107
  const response = await genAI.models.generateContent({
108
  model: modelId,
109
+ config: {
 
 
 
 
 
 
 
 
110
  responseMimeType: "application/json",
111
  systemInstruction: { parts: [{ text: prompts.analyst_system_prompt }] }
112
  },
 
117
  return JSON.parse(text);
118
  } catch (e) {
119
  console.error("Analyst Error:", e);
 
120
  return { status: "ACCEPTED", questions: [{ id: "fallback", label: "Please describe the core gameplay loop in detail.", type: "textarea" }] };
121
  }
122
  },
123
 
124
  /**
125
+ * 4. PROJECT GRADER (Feasibility Check)
126
  * Returns STRICT JSON
127
  */
128
  gradeProject: async (description, answers) => {
129
+ const modelId = 'gemini-2.5-flash';
130
+ // Using the updated prompt to respect Title and relaxed Grading
131
  const input = `[MODE 2: GRADING]\nIdea: "${description}"\nUser Answers: ${JSON.stringify(answers)}\n\nAssess feasibility. Output JSON with title and rating.`;
132
 
133
  try {
134
  const response = await genAI.models.generateContent({
135
  model: modelId,
 
 
 
136
  config: {
 
 
 
 
 
137
  responseMimeType: "application/json",
138
  systemInstruction: { parts: [{ text: prompts.analyst_system_prompt }] }
139
  },
 
148
 
149
  /**
150
  * 5. IMAGE GENERATOR (Visual Assets)
151
+ * Uses Gemini 2.5 Flash Image with Stream (Correct Implementation)
152
  */
153
  generateImage: async (prompt) => {
154
+ // Inject the prompt template from JSON to ensure adherence to instructions
155
+ const finalPrompt = prompts.image_gen_prompt.replace('{{DESCRIPTION}}', prompt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
+ const config = {
158
+ responseModalities: ['IMAGE', 'TEXT'],
159
+ };
160
+ const model = 'gemini-2.5-flash-image';
161
+ const contents = [
162
+ {
163
+ role: 'user',
164
+ parts: [{ text: finalPrompt }],
165
+ },
166
+ ];
167
 
168
+ try {
169
+ const response = await genAI.models.generateContentStream({
170
+ model,
171
+ config,
172
+ contents,
173
+ });
174
 
175
+ for await (const chunk of response) {
176
+ if (!chunk.candidates || !chunk.candidates[0].content || !chunk.candidates[0].content.parts) {
177
+ continue;
178
+ }
179
+ if (chunk.candidates?.[0]?.content?.parts?.[0]?.inlineData) {
180
+ const inlineData = chunk.candidates[0].content.parts[0].inlineData;
181
+ const rawB64 = (inlineData.data || "").replace(/\s+/g, "");
182
+ // Return raw Base64
183
+ return rawB64;
184
+ }
185
+ }
186
+ return null;
187
  } catch (error) {
188
  console.error("Image Gen Error:", error);
189
  return null;