Update aiEngine.js
Browse files- aiEngine.js +141 -1
aiEngine.js
CHANGED
|
@@ -20,6 +20,145 @@ const flattenHistory = (history, currentInput, systemPrompt) => {
|
|
| 20 |
return `System: ${systemPrompt}\n\n${context}\nUser: ${currentInput}\nAssistant:`;
|
| 21 |
};
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
/*
|
| 24 |
// HELPER: STREAM SPLITTER & USAGE PARSER
|
| 25 |
const handleStreamResponse = async (response, onThink, onOutput) => {
|
|
@@ -77,6 +216,7 @@ const handleStreamResponse = async (response, onThink, onOutput) => {
|
|
| 77 |
};
|
| 78 |
};
|
| 79 |
*/
|
|
|
|
| 80 |
const handleStreamResponse = async (response, onThink, onOutput) => {
|
| 81 |
if (!response.ok) throw new Error(`Stream Error: ${response.statusText}`);
|
| 82 |
|
|
@@ -277,4 +417,4 @@ export const AIEngine = {
|
|
| 277 |
console.log("GenerateQ error: ",error)
|
| 278 |
}
|
| 279 |
}
|
| 280 |
-
};
|
|
|
|
| 20 |
return `System: ${systemPrompt}\n\n${context}\nUser: ${currentInput}\nAssistant:`;
|
| 21 |
};
|
| 22 |
|
| 23 |
+
// --- ROBUST STREAM HANDLER ---
|
| 24 |
+
const handleStreamResponse = async (response, onThink, onOutput) => {
|
| 25 |
+
if (!response.ok) throw new Error(`Stream Error: ${response.statusText}`);
|
| 26 |
+
|
| 27 |
+
const reader = response.body.getReader();
|
| 28 |
+
const decoder = new TextDecoder("utf-8");
|
| 29 |
+
|
| 30 |
+
// Buffer the entire stream data to prevent split-parsing issues
|
| 31 |
+
let fullStreamData = "";
|
| 32 |
+
|
| 33 |
+
while (true) {
|
| 34 |
+
const { done, value } = await reader.read();
|
| 35 |
+
if (done) break;
|
| 36 |
+
|
| 37 |
+
const chunk = decoder.decode(value, { stream: true });
|
| 38 |
+
fullStreamData += chunk;
|
| 39 |
+
|
| 40 |
+
// Still send live feedback to frontend, but we will parse from the buffer later
|
| 41 |
+
if (chunk.startsWith("__THINK__")) {
|
| 42 |
+
if (onThink) onThink(chunk.replace("__THINK__", ""));
|
| 43 |
+
} else if (chunk.includes("__THINK__")) {
|
| 44 |
+
const parts = chunk.split("__THINK__");
|
| 45 |
+
if (parts[0]) onOutput?.(parts[0]);
|
| 46 |
+
if (parts[1]) onThink?.(parts[1]);
|
| 47 |
+
} else {
|
| 48 |
+
if (chunk) onOutput?.(chunk);
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
let usage = { totalTokenCount: 0 };
|
| 53 |
+
let fullText = fullStreamData;
|
| 54 |
+
|
| 55 |
+
// --- PARSE FROM THE COMPLETE BUFFER ---
|
| 56 |
+
// This is safer because the tag and JSON are guaranteed to be complete.
|
| 57 |
+
if (fullStreamData.includes("__USAGE__")) {
|
| 58 |
+
const parts = fullStreamData.split("__USAGE__");
|
| 59 |
+
fullText = parts[0]; // The actual text response
|
| 60 |
+
try {
|
| 61 |
+
if (parts[1]) {
|
| 62 |
+
usage = JSON.parse(parts[1]);
|
| 63 |
+
}
|
| 64 |
+
} catch (e) {
|
| 65 |
+
console.warn("Failed to parse final usage footer from stream buffer.", e);
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
// Now, we must strip any thought blocks from the final text to be saved
|
| 70 |
+
fullText = fullText.split("__THINK__")[0];
|
| 71 |
+
|
| 72 |
+
return { text: fullText.trim(), usage };
|
| 73 |
+
};
|
| 74 |
+
|
| 75 |
+
export const AIEngine = {
|
| 76 |
+
callPMStream: async (history, input, onThink, onOutput) => {
|
| 77 |
+
const response = await fetch(`${REMOTE_SERVER_URL}/api/stream`, {
|
| 78 |
+
method: 'POST',
|
| 79 |
+
headers: { 'Content-Type': 'application/json' },
|
| 80 |
+
body: JSON.stringify({ model: "claude", prompt: flattenHistory(history, input, ""), system_prompt: prompts.pm_system_prompt })
|
| 81 |
+
});
|
| 82 |
+
return await handleStreamResponse(response, onThink, onOutput);
|
| 83 |
+
},
|
| 84 |
+
|
| 85 |
+
callWorkerStream: async (history, input, onThink, onOutput, images = []) => {
|
| 86 |
+
const response = await fetch(`${REMOTE_SERVER_URL}/api/stream`, {
|
| 87 |
+
method: 'POST',
|
| 88 |
+
headers: { 'Content-Type': 'application/json' },
|
| 89 |
+
body: JSON.stringify({ model: "gpt", prompt: flattenHistory(history, input, ""), system_prompt: prompts.worker_system_prompt, images })
|
| 90 |
+
});
|
| 91 |
+
return await handleStreamResponse(response, onThink, onOutput);
|
| 92 |
+
},
|
| 93 |
+
|
| 94 |
+
callPM: async (history, input) => {
|
| 95 |
+
const response = await fetch(`${REMOTE_SERVER_URL}/api/generate`, {
|
| 96 |
+
method: 'POST',
|
| 97 |
+
headers: { 'Content-Type': 'application/json' },
|
| 98 |
+
body: JSON.stringify({ model: "claude", prompt: flattenHistory(history, input, ""), system_prompt: prompts.pm_system_prompt })
|
| 99 |
+
});
|
| 100 |
+
const result = await response.json();
|
| 101 |
+
return { text: result.data, usage: result.usage || { totalTokenCount: 0 } };
|
| 102 |
+
},
|
| 103 |
+
|
| 104 |
+
callWorker: async (history, input) => {
|
| 105 |
+
const response = await fetch(`${REMOTE_SERVER_URL}/api/generate`, {
|
| 106 |
+
method: 'POST',
|
| 107 |
+
headers: { 'Content-Type': 'application/json' },
|
| 108 |
+
body: JSON.stringify({ model: "gpt", prompt: flattenHistory(history, input, ""), system_prompt: prompts.worker_system_prompt })
|
| 109 |
+
});
|
| 110 |
+
const result = await response.json();
|
| 111 |
+
return { text: result.data, usage: result.usage || { totalTokenCount: 0 } };
|
| 112 |
+
},
|
| 113 |
+
|
| 114 |
+
generateEntryQuestions: async (desc) => {
|
| 115 |
+
const response = await fetch(`${REMOTE_SERVER_URL}/api/generate`, {
|
| 116 |
+
method: 'POST',
|
| 117 |
+
headers: { 'Content-Type': 'application/json' },
|
| 118 |
+
body: JSON.stringify({ model: "gpt", prompt: `Analyze: ${desc}`, system_prompt: prompts.analyst_system_prompt })
|
| 119 |
+
});
|
| 120 |
+
const result = await response.json();
|
| 121 |
+
const parsed = JSON.parse(result.data);
|
| 122 |
+
parsed.usage = result.usage;
|
| 123 |
+
return parsed;
|
| 124 |
+
},
|
| 125 |
+
|
| 126 |
+
gradeProject: async (desc, ans) => {
|
| 127 |
+
const response = await fetch(`${REMOTE_SERVER_URL}/api/generate`, {
|
| 128 |
+
method: 'POST',
|
| 129 |
+
headers: { 'Content-Type': 'application/json' },
|
| 130 |
+
body: JSON.stringify({ model: "gpt", prompt: `Grade: ${desc} ${JSON.stringify(ans)}`, system_prompt: prompts.analyst_system_prompt })
|
| 131 |
+
});
|
| 132 |
+
const result = await response.json();
|
| 133 |
+
const parsed = JSON.parse(result.data);
|
| 134 |
+
parsed.usage = result.usage;
|
| 135 |
+
return parsed;
|
| 136 |
+
}
|
| 137 |
+
};
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
/*import dotenv from 'dotenv';
|
| 141 |
+
import fs from 'fs';
|
| 142 |
+
import path from 'path';
|
| 143 |
+
|
| 144 |
+
dotenv.config();
|
| 145 |
+
|
| 146 |
+
const REMOTE_SERVER_URL = process.env.REMOTE_AI_URL || "http://localhost:7860";
|
| 147 |
+
|
| 148 |
+
let prompts = {};
|
| 149 |
+
try {
|
| 150 |
+
const promptsPath = path.resolve('./prompts.json');
|
| 151 |
+
if (fs.existsSync(promptsPath)) prompts = JSON.parse(fs.readFileSync(promptsPath, 'utf8'));
|
| 152 |
+
} catch (e) { console.error("Prompt Load Error:", e); }
|
| 153 |
+
|
| 154 |
+
const flattenHistory = (history, currentInput, systemPrompt) => {
|
| 155 |
+
const context = history.map(m => {
|
| 156 |
+
const roleName = m.role === 'model' ? 'Assistant' : 'User';
|
| 157 |
+
return `${roleName}: ${m.parts[0].text}`;
|
| 158 |
+
}).join('\n');
|
| 159 |
+
return `System: ${systemPrompt}\n\n${context}\nUser: ${currentInput}\nAssistant:`;
|
| 160 |
+
};
|
| 161 |
+
|
| 162 |
/*
|
| 163 |
// HELPER: STREAM SPLITTER & USAGE PARSER
|
| 164 |
const handleStreamResponse = async (response, onThink, onOutput) => {
|
|
|
|
| 216 |
};
|
| 217 |
};
|
| 218 |
*/
|
| 219 |
+
/*
|
| 220 |
const handleStreamResponse = async (response, onThink, onOutput) => {
|
| 221 |
if (!response.ok) throw new Error(`Stream Error: ${response.statusText}`);
|
| 222 |
|
|
|
|
| 417 |
console.log("GenerateQ error: ",error)
|
| 418 |
}
|
| 419 |
}
|
| 420 |
+
}; */
|