Spaces:
Running
Running
Update app.js
Browse files
app.js
CHANGED
|
@@ -3,6 +3,8 @@ import cors from 'cors';
|
|
| 3 |
import fs from 'fs';
|
| 4 |
import path from 'path';
|
| 5 |
import { createClient } from '@supabase/supabase-js';
|
|
|
|
|
|
|
| 6 |
|
| 7 |
const PORT = 7860;
|
| 8 |
const SUPABASE_URL = process.env.SUPABASE_URL;
|
|
@@ -66,6 +68,45 @@ const StateManager = {
|
|
| 66 |
}
|
| 67 |
};
|
| 68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
const callAI = async (history, input, contextData, images, systemPrompt, projectContext, modelId) => {
|
| 70 |
let contextStr = "";
|
| 71 |
try { contextStr = JSON.stringify(contextData, null, 2); } catch {}
|
|
@@ -108,7 +149,8 @@ function extractCommands(text) {
|
|
| 108 |
|
| 109 |
// NEW: Extract AI queries sent to local MCPs
|
| 110 |
parse(/<mcp_query>([\s\S]*?)<\/mcp_query>/gi, 'mcp_query');
|
| 111 |
-
|
|
|
|
| 112 |
return commands;
|
| 113 |
}
|
| 114 |
|
|
@@ -279,7 +321,9 @@ app.post('/init-project', async (req, res) => {
|
|
| 279 |
setImmediate(async () => {
|
| 280 |
try {
|
| 281 |
const initInput = `PROJECT: ${name}\nDESC: ${description}\nUSER TIMEZONE OFFSET: ${timezoneOffset}\nTask: Init PRD, First Thrust, Schedule Morning Briefing.`;
|
| 282 |
-
|
|
|
|
|
|
|
| 283 |
aiResult.text += `\n<notification>Project '${name}' initialized successfully!</notification>`;
|
| 284 |
await StateManager.addHistory(lead.id, 'user', initInput);
|
| 285 |
await StateManager.addHistory(lead.id, 'model', aiResult.text);
|
|
@@ -304,7 +348,9 @@ app.post('/process', async (req, res) => {
|
|
| 304 |
const projectContext = `[PRD]: ${lead?.requirements_doc?.substring(0, 3000)}...\n[CURRENT THRUST]: ${activeThrust ? JSON.stringify(activeThrust) : "None"}\n[RECENT TIMELINE]: ${JSON.stringify(timeline ||[])}`;
|
| 305 |
const history = await StateManager.getHistory(projectId);
|
| 306 |
|
| 307 |
-
let aiResult = await callAI(history, prompt, context, images, sysPrompt, projectContext, selectedModel);
|
|
|
|
|
|
|
| 308 |
let cmds = extractCommands(aiResult.text);
|
| 309 |
let flags = await executeCommands(userId, projectId, cmds);
|
| 310 |
|
|
@@ -349,8 +395,9 @@ app.post('/automated-briefing', async (req, res) => {
|
|
| 349 |
const projectContext = `[PRD]: ${lead.requirements_doc}\n[RECENT TIMELINE]: ${JSON.stringify(timeline)}`;
|
| 350 |
const history = await StateManager.getHistory(projectId);
|
| 351 |
|
| 352 |
-
|
| 353 |
-
|
|
|
|
| 354 |
await StateManager.addHistory(projectId, 'model', aiResult.text);
|
| 355 |
const cmds = extractCommands(aiResult.text);
|
| 356 |
const flags = await executeCommands(lead.user_id, projectId, cmds);
|
|
|
|
| 3 |
import fs from 'fs';
|
| 4 |
import path from 'path';
|
| 5 |
import { createClient } from '@supabase/supabase-js';
|
| 6 |
+
import { search } from "./research.js";
|
| 7 |
+
|
| 8 |
|
| 9 |
const PORT = 7860;
|
| 10 |
const SUPABASE_URL = process.env.SUPABASE_URL;
|
|
|
|
| 68 |
}
|
| 69 |
};
|
| 70 |
|
| 71 |
+
|
| 72 |
+
async function callAIWithResearch(history, input, context, images, systemPrompt, projectContext, modelId) {
|
| 73 |
+
const firstPass = await callAI(history, input, context, images, systemPrompt, projectContext, modelId);
|
| 74 |
+
|
| 75 |
+
// Check if the AI requested any research
|
| 76 |
+
const researchMatches = [...firstPass.text.matchAll(/<deep_research>([\s\S]*?)<\/deep_research>/gi)];
|
| 77 |
+
if (researchMatches.length === 0) return firstPass;
|
| 78 |
+
|
| 79 |
+
// Resolve all research requests in parallel
|
| 80 |
+
const resolved = await Promise.all(
|
| 81 |
+
researchMatches.map(async (match) => {
|
| 82 |
+
try {
|
| 83 |
+
const payload = JSON.parse(match[1].trim());
|
| 84 |
+
const { result } = await search({
|
| 85 |
+
query: payload.query,
|
| 86 |
+
urgent: payload.urgent ?? false,
|
| 87 |
+
deep: payload.deep ?? false,
|
| 88 |
+
supabase,
|
| 89 |
+
});
|
| 90 |
+
return `[RESEARCH: "${payload.query}"]\n${result}`;
|
| 91 |
+
} catch (e) {
|
| 92 |
+
console.error("[Research] Failed to resolve query:", e.message);
|
| 93 |
+
return null;
|
| 94 |
+
}
|
| 95 |
+
})
|
| 96 |
+
);
|
| 97 |
+
|
| 98 |
+
const researchBlock = resolved.filter(Boolean).join("\n\n────\n\n");
|
| 99 |
+
if (!researchBlock) return firstPass;
|
| 100 |
+
|
| 101 |
+
// Second pass: inject research results and let the AI continue
|
| 102 |
+
const augmentedInput =
|
| 103 |
+
`${input}\n\n` +
|
| 104 |
+
`[RESEARCH RESULTS — use these to complete your response accurately]:\n${researchBlock}`;
|
| 105 |
+
|
| 106 |
+
console.log(`[Research] Re-running AI with ${researchMatches.length} research result(s) injected.`);
|
| 107 |
+
return callAI(history, augmentedInput, context, images, systemPrompt, projectContext, modelId);
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
const callAI = async (history, input, contextData, images, systemPrompt, projectContext, modelId) => {
|
| 111 |
let contextStr = "";
|
| 112 |
try { contextStr = JSON.stringify(contextData, null, 2); } catch {}
|
|
|
|
| 149 |
|
| 150 |
// NEW: Extract AI queries sent to local MCPs
|
| 151 |
parse(/<mcp_query>([\s\S]*?)<\/mcp_query>/gi, 'mcp_query');
|
| 152 |
+
parse(/<deep_research>([\s\S]*?)<\/deep_research>/gi, 'deep_research');
|
| 153 |
+
|
| 154 |
return commands;
|
| 155 |
}
|
| 156 |
|
|
|
|
| 321 |
setImmediate(async () => {
|
| 322 |
try {
|
| 323 |
const initInput = `PROJECT: ${name}\nDESC: ${description}\nUSER TIMEZONE OFFSET: ${timezoneOffset}\nTask: Init PRD, First Thrust, Schedule Morning Briefing.`;
|
| 324 |
+
// const aiResult = await callAI([], initInput, {},[], prompts.init_system_prompt, "", SMART_MODEL_ID);
|
| 325 |
+
const aiResult = await callAIWithResearch([], initInput, {},[], prompts.init_system_prompt, "", SMART_MODEL_ID);
|
| 326 |
+
|
| 327 |
aiResult.text += `\n<notification>Project '${name}' initialized successfully!</notification>`;
|
| 328 |
await StateManager.addHistory(lead.id, 'user', initInput);
|
| 329 |
await StateManager.addHistory(lead.id, 'model', aiResult.text);
|
|
|
|
| 348 |
const projectContext = `[PRD]: ${lead?.requirements_doc?.substring(0, 3000)}...\n[CURRENT THRUST]: ${activeThrust ? JSON.stringify(activeThrust) : "None"}\n[RECENT TIMELINE]: ${JSON.stringify(timeline ||[])}`;
|
| 349 |
const history = await StateManager.getHistory(projectId);
|
| 350 |
|
| 351 |
+
// let aiResult = await callAI(history, prompt, context, images, sysPrompt, projectContext, selectedModel);
|
| 352 |
+
let aiResult = await callAIWithResearch(history, prompt, context, images, sysPrompt, projectContext, selectedModel);
|
| 353 |
+
|
| 354 |
let cmds = extractCommands(aiResult.text);
|
| 355 |
let flags = await executeCommands(userId, projectId, cmds);
|
| 356 |
|
|
|
|
| 395 |
const projectContext = `[PRD]: ${lead.requirements_doc}\n[RECENT TIMELINE]: ${JSON.stringify(timeline)}`;
|
| 396 |
const history = await StateManager.getHistory(projectId);
|
| 397 |
|
| 398 |
+
// const aiResult = await callAI(history, prompt, {}, [], prompts.director_system_prompt, projectContext, SMART_MODEL_ID);
|
| 399 |
+
const aiResult = await callAIWithResearch(history, prompt, {}, [], prompts.director_system_prompt, projectContext, SMART_MODEL_ID);
|
| 400 |
+
|
| 401 |
await StateManager.addHistory(projectId, 'model', aiResult.text);
|
| 402 |
const cmds = extractCommands(aiResult.text);
|
| 403 |
const flags = await executeCommands(lead.user_id, projectId, cmds);
|