Update aiEngine.js
Browse files- aiEngine.js +159 -53
aiEngine.js
CHANGED
|
@@ -2,20 +2,31 @@ import OpenAI from "openai";
|
|
| 2 |
import { BedrockRuntimeClient, ConverseCommand } from "@aws-sdk/client-bedrock-runtime";
|
| 3 |
import { NodeHttpHandler } from "@smithy/node-http-handler";
|
| 4 |
import fs from 'fs';
|
|
|
|
| 5 |
|
| 6 |
-
// Load prompts
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
// --- CLIENT INITIALIZATION ---
|
|
|
|
| 10 |
const bedrockClient = new BedrockRuntimeClient({
|
| 11 |
region: process.env.AWS_REGION || "us-east-1",
|
| 12 |
requestHandler: new NodeHttpHandler({ http2Handler: undefined }),
|
| 13 |
credentials: {
|
| 14 |
-
|
| 15 |
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
|
| 16 |
}
|
| 17 |
});
|
| 18 |
|
|
|
|
| 19 |
const azureOpenAI = new OpenAI({
|
| 20 |
apiKey: process.env.AZURE_OPENAI_API_KEY,
|
| 21 |
baseURL: `${process.env.AZURE_OPENAI_ENDPOINT}/openai/deployments/${process.env.AZURE_DEPLOYMENT_NAME}`,
|
|
@@ -23,82 +34,177 @@ const azureOpenAI = new OpenAI({
|
|
| 23 |
defaultHeaders: { "api-key": process.env.AZURE_OPENAI_API_KEY }
|
| 24 |
});
|
| 25 |
|
| 26 |
-
//
|
| 27 |
-
|
|
|
|
|
|
|
| 28 |
return history.map(h => ({
|
| 29 |
-
role: h.role === 'model' ? 'assistant' :
|
| 30 |
content: h.parts[0].text
|
| 31 |
}));
|
| 32 |
}
|
| 33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
export const AIEngine = {
|
| 35 |
-
/
|
|
|
|
|
|
|
|
|
|
| 36 |
callPM: async (history, input) => {
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
| 38 |
const command = new ConverseCommand({
|
| 39 |
modelId: "arn:aws:bedrock:us-east-1:106774395747:inference-profile/global.anthropic.claude-sonnet-4-6",
|
| 40 |
-
system: [{ text:
|
| 41 |
messages: [...chatHistory, { role: "user", content: [{ text: input }] }],
|
| 42 |
-
inferenceConfig: { maxTokens:
|
| 43 |
additionalModelRequestFields: {
|
| 44 |
thinking: { type: "adaptive" },
|
| 45 |
output_config: { effort: "high" }
|
| 46 |
}
|
| 47 |
});
|
| 48 |
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
},
|
| 57 |
|
| 58 |
-
/
|
|
|
|
|
|
|
|
|
|
| 59 |
callWorker: async (history, input, images = []) => {
|
| 60 |
-
const chatHistory =
|
|
|
|
| 61 |
|
| 62 |
-
//
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
},
|
| 78 |
|
| 79 |
-
/
|
|
|
|
|
|
|
|
|
|
| 80 |
generateEntryQuestions: async (description) => {
|
| 81 |
-
const
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
},
|
| 92 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
gradeProject: async (description, answers) => {
|
| 94 |
-
const
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
}
|
| 104 |
};
|
|
|
|
| 2 |
import { BedrockRuntimeClient, ConverseCommand } from "@aws-sdk/client-bedrock-runtime";
|
| 3 |
import { NodeHttpHandler } from "@smithy/node-http-handler";
|
| 4 |
import fs from 'fs';
|
| 5 |
+
import path from 'path';
|
| 6 |
|
| 7 |
+
// Load prompts safely
|
| 8 |
+
let prompts = {};
|
| 9 |
+
try {
|
| 10 |
+
const promptsPath = path.resolve('./prompts.json');
|
| 11 |
+
if (fs.existsSync(promptsPath)) {
|
| 12 |
+
prompts = JSON.parse(fs.readFileSync(promptsPath, 'utf8'));
|
| 13 |
+
}
|
| 14 |
+
} catch (e) {
|
| 15 |
+
console.error("Error loading prompts:", e);
|
| 16 |
+
}
|
| 17 |
|
| 18 |
// --- CLIENT INITIALIZATION ---
|
| 19 |
+
// 1. AWS Bedrock (Claude)
|
| 20 |
const bedrockClient = new BedrockRuntimeClient({
|
| 21 |
region: process.env.AWS_REGION || "us-east-1",
|
| 22 |
requestHandler: new NodeHttpHandler({ http2Handler: undefined }),
|
| 23 |
credentials: {
|
| 24 |
+
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
|
| 25 |
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
|
| 26 |
}
|
| 27 |
});
|
| 28 |
|
| 29 |
+
// 2. Azure OpenAI (GPT-5 Mini)
|
| 30 |
const azureOpenAI = new OpenAI({
|
| 31 |
apiKey: process.env.AZURE_OPENAI_API_KEY,
|
| 32 |
baseURL: `${process.env.AZURE_OPENAI_ENDPOINT}/openai/deployments/${process.env.AZURE_DEPLOYMENT_NAME}`,
|
|
|
|
| 34 |
defaultHeaders: { "api-key": process.env.AZURE_OPENAI_API_KEY }
|
| 35 |
});
|
| 36 |
|
| 37 |
+
// --- HELPER: History Transformer ---
|
| 38 |
+
// Converts your App's { role: 'user', parts: [{text: ''}] } format
|
| 39 |
+
// to formats acceptable by Azure/Bedrock
|
| 40 |
+
function toAzureHistory(history) {
|
| 41 |
return history.map(h => ({
|
| 42 |
+
role: h.role === 'model' ? 'assistant' : 'user',
|
| 43 |
content: h.parts[0].text
|
| 44 |
}));
|
| 45 |
}
|
| 46 |
|
| 47 |
+
function toBedrockHistory(history) {
|
| 48 |
+
return history.map(h => ({
|
| 49 |
+
role: h.role === 'model' ? 'assistant' : 'user',
|
| 50 |
+
content: [{ text: h.parts[0].text }]
|
| 51 |
+
}));
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
export const AIEngine = {
|
| 55 |
+
/**
|
| 56 |
+
* 1. PROJECT MANAGER (Reasoning & Delegation)
|
| 57 |
+
* Uses AWS Bedrock -> Claude 3.5 Sonnet (v2/4.6)
|
| 58 |
+
*/
|
| 59 |
callPM: async (history, input) => {
|
| 60 |
+
// Prepare History
|
| 61 |
+
const chatHistory = toBedrockHistory(history);
|
| 62 |
+
const sysPrompt = prompts.pm_system_prompt || "You are a Senior Project Manager.";
|
| 63 |
+
|
| 64 |
const command = new ConverseCommand({
|
| 65 |
modelId: "arn:aws:bedrock:us-east-1:106774395747:inference-profile/global.anthropic.claude-sonnet-4-6",
|
| 66 |
+
system: [{ text: sysPrompt }],
|
| 67 |
messages: [...chatHistory, { role: "user", content: [{ text: input }] }],
|
| 68 |
+
inferenceConfig: { maxTokens: 4000, temperature: 1 },
|
| 69 |
additionalModelRequestFields: {
|
| 70 |
thinking: { type: "adaptive" },
|
| 71 |
output_config: { effort: "high" }
|
| 72 |
}
|
| 73 |
});
|
| 74 |
|
| 75 |
+
try {
|
| 76 |
+
const response = await bedrockClient.send(command);
|
| 77 |
+
const text = response.output.message.content.find(b => b.text)?.text;
|
| 78 |
+
|
| 79 |
+
// Extract usage
|
| 80 |
+
const inputTokens = response.usage?.inputTokens || 0;
|
| 81 |
+
const outputTokens = response.usage?.outputTokens || 0;
|
| 82 |
+
|
| 83 |
+
console.log(`[AI-PM] Claude Usage: ${inputTokens + outputTokens}`);
|
| 84 |
+
|
| 85 |
+
return {
|
| 86 |
+
text,
|
| 87 |
+
usage: { totalTokenCount: inputTokens + outputTokens }
|
| 88 |
+
};
|
| 89 |
+
} catch (error) {
|
| 90 |
+
console.error("PM AI Error (Bedrock):", error);
|
| 91 |
+
throw error;
|
| 92 |
+
}
|
| 93 |
},
|
| 94 |
|
| 95 |
+
/**
|
| 96 |
+
* 2. WORKER (Coding & Execution)
|
| 97 |
+
* Uses Azure OpenAI -> GPT-5 Mini
|
| 98 |
+
*/
|
| 99 |
callWorker: async (history, input, images = []) => {
|
| 100 |
+
const chatHistory = toAzureHistory(history);
|
| 101 |
+
const sysPrompt = prompts.worker_system_prompt || "You are a specialized Worker.";
|
| 102 |
|
| 103 |
+
// Azure GPT-5 Mini Setup
|
| 104 |
+
// Note: For now, we append images to the user content if needed,
|
| 105 |
+
// but simplified here for text-parity with your script.
|
| 106 |
+
|
| 107 |
+
try {
|
| 108 |
+
const response = await azureOpenAI.chat.completions.create({
|
| 109 |
+
model: process.env.AZURE_DEPLOYMENT_NAME, // "gpt-5-mini"
|
| 110 |
+
messages: [
|
| 111 |
+
{ role: "system", content: sysPrompt },
|
| 112 |
+
...chatHistory,
|
| 113 |
+
{ role: "user", content: input }
|
| 114 |
+
],
|
| 115 |
+
reasoning_effort: "high"
|
| 116 |
+
});
|
| 117 |
+
|
| 118 |
+
const text = response.choices[0].message.content;
|
| 119 |
+
const usage = response.usage?.total_tokens || 0;
|
| 120 |
+
|
| 121 |
+
console.log(`[AI-WORKER] GPT-5 Usage: ${usage}`);
|
| 122 |
|
| 123 |
+
return {
|
| 124 |
+
text,
|
| 125 |
+
usage: { totalTokenCount: usage }
|
| 126 |
+
};
|
| 127 |
+
} catch (error) {
|
| 128 |
+
console.error("Worker AI Error (Azure):", error);
|
| 129 |
+
throw error;
|
| 130 |
+
}
|
| 131 |
},
|
| 132 |
|
| 133 |
+
/**
|
| 134 |
+
* 3. ONBOARDING ANALYST (Question Generation)
|
| 135 |
+
* Returns STRICT JSON
|
| 136 |
+
*/
|
| 137 |
generateEntryQuestions: async (description) => {
|
| 138 |
+
const sysPrompt = prompts.analyst_system_prompt || "Return JSON only.";
|
| 139 |
+
const input = `[MODE 1: QUESTIONS]\nAnalyze this game idea: "${description}". Check for TOS violations or nonsense. If good, ask 3 questions. Output ONLY raw JSON.`;
|
| 140 |
+
|
| 141 |
+
try {
|
| 142 |
+
const response = await azureOpenAI.chat.completions.create({
|
| 143 |
+
model: process.env.AZURE_DEPLOYMENT_NAME,
|
| 144 |
+
messages: [
|
| 145 |
+
{ role: "system", content: sysPrompt },
|
| 146 |
+
{ role: "user", content: input }
|
| 147 |
+
],
|
| 148 |
+
response_format: { type: "json_object" }
|
| 149 |
+
});
|
| 150 |
+
|
| 151 |
+
const parsed = JSON.parse(response.choices[0].message.content);
|
| 152 |
+
return {
|
| 153 |
+
...parsed,
|
| 154 |
+
usage: { totalTokenCount: response.usage.total_tokens }
|
| 155 |
+
};
|
| 156 |
+
} catch (e) {
|
| 157 |
+
console.error("Analyst Error:", e);
|
| 158 |
+
// Fallback structure
|
| 159 |
+
return {
|
| 160 |
+
status: "ACCEPTED",
|
| 161 |
+
questions: [{ id: "fallback", label: "Elaborate?", type: "textarea" }],
|
| 162 |
+
usage: { totalTokenCount: 0 }
|
| 163 |
+
};
|
| 164 |
+
}
|
| 165 |
},
|
| 166 |
|
| 167 |
+
/**
|
| 168 |
+
* 4. PROJECT GRADER (Feasibility Check)
|
| 169 |
+
* Returns STRICT JSON
|
| 170 |
+
*/
|
| 171 |
gradeProject: async (description, answers) => {
|
| 172 |
+
const sysPrompt = prompts.analyst_system_prompt || "Return JSON only.";
|
| 173 |
+
const input = `[MODE 2: GRADING]\nIdea: "${description}"\nUser Answers: ${JSON.stringify(answers)}\n\nAssess feasibility. Output JSON with title and rating.`;
|
| 174 |
+
|
| 175 |
+
try {
|
| 176 |
+
const response = await azureOpenAI.chat.completions.create({
|
| 177 |
+
model: process.env.AZURE_DEPLOYMENT_NAME,
|
| 178 |
+
messages: [
|
| 179 |
+
{ role: "system", content: sysPrompt },
|
| 180 |
+
{ role: "user", content: input }
|
| 181 |
+
],
|
| 182 |
+
response_format: { type: "json_object" }
|
| 183 |
+
});
|
| 184 |
+
|
| 185 |
+
const parsed = JSON.parse(response.choices[0].message.content);
|
| 186 |
+
return {
|
| 187 |
+
...parsed,
|
| 188 |
+
usage: { totalTokenCount: response.usage.total_tokens }
|
| 189 |
+
};
|
| 190 |
+
} catch (e) {
|
| 191 |
+
console.error("Grading Error:", e);
|
| 192 |
+
return {
|
| 193 |
+
feasibility: 50,
|
| 194 |
+
rating: 'C',
|
| 195 |
+
title: "Untitled",
|
| 196 |
+
usage: { totalTokenCount: 0 }
|
| 197 |
+
};
|
| 198 |
+
}
|
| 199 |
+
},
|
| 200 |
+
|
| 201 |
+
/**
|
| 202 |
+
* 5. IMAGE GENERATOR
|
| 203 |
+
* Kept as placeholder since you said "not being used" for now,
|
| 204 |
+
* but prevents crashes if App.js calls it.
|
| 205 |
+
*/
|
| 206 |
+
generateImage: async (prompt) => {
|
| 207 |
+
console.log("Image generation currently skipped in Azure/Bedrock migration.");
|
| 208 |
+
return null;
|
| 209 |
}
|
| 210 |
};
|