everydaytok commited on
Commit
04392bb
·
verified ·
1 Parent(s): 2e4ad05

Update app.js

Browse files
Files changed (1) hide show
  1. app.js +143 -114
app.js CHANGED
@@ -1,137 +1,166 @@
1
  import express from 'express';
 
2
  import cors from 'cors';
 
 
 
3
  import dotenv from 'dotenv';
4
- import OpenAI from "openai";
5
- import { BedrockRuntimeClient, ConverseCommand, ConverseStreamCommand } from "@aws-sdk/client-bedrock-runtime";
6
- import { NodeHttpHandler } from "@smithy/node-http-handler";
7
 
8
  dotenv.config();
 
 
9
  const app = express();
10
  const PORT = process.env.PORT || 7860;
11
-
12
  app.use(cors());
13
- app.use(express.json({ limit: '50mb' }));
14
 
15
- // --- SYSTEM PROMPT DEFINITIONS ---
16
- const CLAUDE_SYSTEM_PROMPT = "You are a pro. Provide elite, high-level technical responses.";
17
- const GPT_SYSTEM_PROMPT = "You are a worker. Be concise, efficient, and get the job done.";
18
 
19
- const bedrockClient = new BedrockRuntimeClient({
20
- region: "us-east-1" ,
21
- requestHandler: new NodeHttpHandler({
22
- http2Handler: undefined,
23
- })
24
- });
 
 
 
 
 
25
 
26
- const azureOpenAI = new OpenAI({
27
- apiKey: "7U3m9NRkE38ThSWTr92hMgQ4hDCUFI9MAnFNrCgRL7MhdvckfTXwJQQJ99CBACHYHv6XJ3w3AAAAACOGV22P",
28
- baseURL: `https://hollowpad-resource.cognitiveservices.azure.com/openai/deployments/gpt-5-mini`,
29
- defaultQuery: { "api-version": "2024-05-01-preview" },
30
- defaultHeaders: { "api-key": "7U3m9NRkE38ThSWTr92hMgQ4hDCUFI9MAnFNrCgRL7MhdvckfTXwJQQJ99CBACHYHv6XJ3w3AAAAACOGV22P" }
31
- });
32
 
33
- // Standard Generation
34
- app.post('/api/generate', async (req, res) => {
35
- const { model, prompt, system_prompt} = req.body;
36
  try {
37
- if (model === "claude") {
38
- const command = new ConverseCommand({
39
- modelId: "arn:aws:bedrock:us-east-1:106774395747:inference-profile/global.anthropic.claude-sonnet-4-6",
40
- system: [{ text: system_prompt || CLAUDE_SYSTEM_PROMPT }],
41
- messages: [{ role: "user", content: [{ text: prompt }] }],
42
- inferenceConfig: { maxTokens: 48000, temperature: 1 },
43
- additionalModelRequestFields: {
44
- thinking: { type: "adaptive" },
45
- output_config: { effort: "high" }
46
- }
47
- });
48
- const response = await bedrockClient.send(command);
49
- const text = response.output.message.content.find(b => b.text)?.text;
50
- res.json({ success: true, data: text });
51
- } else {
52
- const response = await azureOpenAI.chat.completions.create({
53
- model: "gpt-5-mini",
54
- messages: [
55
- { role: "system", content: system_prompt || GPT_SYSTEM_PROMPT },
56
- { role: "user", content: prompt }
57
- ],
58
- reasoning_effort: "high"
59
- });
60
- res.json({ success: true, data: response.choices[0].message.content });
61
- }
62
- } catch (err) {
63
- console.error(`❌ [${model.toUpperCase()} ERROR]:`, err.message);
64
- res.status(500).json({ success: false, error: err.message });
65
- }
66
- });
67
 
68
- // Streaming Generation
69
- app.post('/api/stream', async (req, res) => {
70
- const { model, prompt, system_prompt, images } = req.body;
71
- console.log(`[STREAM] Request for ${model}`);
 
 
 
72
 
73
- res.setHeader('Content-Type', 'text/plain; charset=utf-8');
74
- res.setHeader('Transfer-Encoding', 'chunked');
75
- res.setHeader('X-Accel-Buffering', 'no');
76
- res.flushHeaders();
 
77
 
 
78
  try {
79
- if (model === "claude") {
80
- let contentBlock = [{ text: prompt }];
81
- if (images && images.length > 0) {
82
- const imageBlocks = images.map(imgStr => {
83
- const base64Data = imgStr.replace(/^data:image\/\w+;base64,/, "");
84
- return { image: { format: 'png', source: { bytes: Buffer.from(base64Data, 'base64') } } };
85
- });
86
- contentBlock = [...imageBlocks, ...contentBlock];
87
- }
88
-
89
- const command = new ConverseStreamCommand({
90
- modelId: "arn:aws:bedrock:us-east-1:106774395747:inference-profile/global.anthropic.claude-sonnet-4-6",
91
- system: [{ text: system_prompt || CLAUDE_SYSTEM_PROMPT }],
92
- messages: [{ role: "user", content: contentBlock }],
93
- inferenceConfig: { maxTokens: 48000, temperature: 1 },
94
- additionalModelRequestFields: { thinking: { type: "adaptive" }, output_config: { effort: "high" } }
95
- });
96
-
97
- const response = await bedrockClient.send(command);
98
- for await (const chunk of response.stream) {
99
- if (chunk.contentBlockDelta) {
100
- const delta = chunk.contentBlockDelta.delta;
101
- if (delta.reasoningContent?.text) res.write(`__THINK__${delta.reasoningContent.text}`);
102
- else if (delta.text) res.write(delta.text);
103
- }
104
- }
105
- res.end();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  } else {
107
- let messagesPayload = [{ role: "system", content: system_prompt || GPT_SYSTEM_PROMPT }];
108
- if (images && images.length > 0) {
109
- let userContent = [{ type: "text", text: prompt }];
110
- images.forEach(img => userContent.push({ type: "image_url", image_url: { url: img } }));
111
- messagesPayload.push({ role: "user", content: userContent });
112
- } else {
113
- messagesPayload.push({ role: "user", content: prompt });
114
- }
115
-
116
- const stream = await azureOpenAI.chat.completions.create({
117
- model: "gpt-5-mini",
118
- messages: messagesPayload,
119
- reasoning_effort: "high",
120
- stream: true,
121
- });
122
-
123
- for await (const chunk of stream) {
124
- const delta = chunk.choices[0]?.delta;
125
- if (delta?.reasoning_content) res.write(`__THINK__${delta.reasoning_content}`);
126
- else if (delta?.content) res.write(delta.content);
127
- }
128
- res.end();
129
  }
 
 
 
130
  } catch (err) {
131
- console.error(`❌ [STREAM ERROR]:`, err.message);
132
- res.write(`ERROR: ${err.message}`);
133
- res.end();
134
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  });
136
 
137
- app.listen(PORT, '0.0.0.0', () => console.log(`Main AI Agents live on port ${PORT}`));
 
1
  import express from 'express';
2
+ import bodyParser from 'body-parser';
3
  import cors from 'cors';
4
+ import { StateManager, initDB } from './stateManager.js';
5
+ import { AIEngine } from './aiEngine.js';
6
+ import crypto from "crypto";
7
  import dotenv from 'dotenv';
 
 
 
8
 
9
  dotenv.config();
10
+ initDB();
11
+ const supabase = StateManager.getSupabaseClient();
12
  const app = express();
13
  const PORT = process.env.PORT || 7860;
 
14
  app.use(cors());
15
+ app.use(bodyParser.json({ limit: '50mb' }));
16
 
17
+ const WORKER_PHASES = ["Worker: Analyzing Request...", "Worker: Reading Context...", "Worker: Checking Hierarchy...", "Worker: Thinking..."];
18
+ const PM_PHASES = ["Manager: Reviewing...", "Manager: Formulating Strategy...", "Manager: Delegating...", "Manager: Thinking..."];
 
19
 
20
+ function startStatusLoop(projectId, type = 'worker') {
21
+ const phases = type === 'pm' ? PM_PHASES : WORKER_PHASES;
22
+ let index = 0;
23
+ StateManager.setStatus(projectId, phases[0]);
24
+ const interval = setInterval(() => {
25
+ index++;
26
+ if (index < phases.length) StateManager.setStatus(projectId, phases[index]);
27
+ else clearInterval(interval);
28
+ }, 1500);
29
+ return () => clearInterval(interval);
30
+ }
31
 
32
+ async function checkMinimumCredits(userId, type = 'basic') {
33
+ const { data } = await supabase.from('users').select('credits').eq('id', userId).single();
34
+ const credits = data?.credits?.[type] || 0;
35
+ if (credits < 50) throw new Error(`Insufficient ${type} credits.`);
36
+ }
 
37
 
38
+ async function deductUserCredits(userId, amount, type = 'basic') {
 
 
39
  try {
40
+ const { data } = await supabase.from('users').select('credits').eq('id', userId).single();
41
+ const newVal = Math.max(0, (data?.credits?.[type] || 0) - amount);
42
+ await supabase.from('users').update({ credits: { ...data.credits, [type]: newVal } }).eq('id', userId);
43
+ } catch (e) { console.error("Credit Error:", e.message); }
44
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
+ const formatContext = ({ hierarchyContext, scriptContext, logContext }) => {
47
+ let out = "";
48
+ if (scriptContext) out += `\n[SCRIPT]: ${scriptContext.targetName}\n[SOURCE]: ${scriptContext.scriptSource}`;
49
+ if (logContext) out += `\n[LOGS]: ${logContext.logs}`;
50
+ if (hierarchyContext) out += `\n[HIERARCHY]: ${hierarchyContext}`;
51
+ return out;
52
+ };
53
 
54
+ const extractTags = (text) => ({
55
+ pmQuestion: text.match(/\[ASK_PM:\s*(.*?)\]/s)?.[1],
56
+ pmRoute: text.match(/\[ROUTE_TO_PM:\s*(.*?)\]/s)?.[1],
57
+ workerPrompt: text.match(/WORKER_PROMPT:\s*(.*)/s)?.[1]
58
+ });
59
 
60
+ async function runAsyncFeedback(projectId, userId, fullInput, images = []) {
61
  try {
62
+ const project = await StateManager.getProject(projectId);
63
+ StateManager.clearSnapshot(projectId);
64
+
65
+ // --- STEP 1: WORKER ---
66
+ let stopStatus = startStatusLoop(projectId, 'worker');
67
+ let workerPass1 = "";
68
+ const result1 = await AIEngine.callWorkerStream(
69
+ project.workerHistory, fullInput,
70
+ (thought) => { stopStatus(); StateManager.setStatus(projectId, "Worker: Thinking..."); StateManager.appendSnapshotOnly(projectId, thought); },
71
+ (chunk) => { stopStatus(); StateManager.setStatus(projectId, "Worker: Coding..."); workerPass1 += chunk; StateManager.appendStream(projectId, chunk); },
72
+ images
73
+ );
74
+ stopStatus();
75
+
76
+ const tags = extractTags(workerPass1);
77
+ if (tags.pmQuestion || tags.pmRoute) {
78
+ // Commit Pass 1
79
+ await StateManager.addHistory(projectId, 'worker', 'user', fullInput);
80
+ await StateManager.addHistory(projectId, 'worker', 'model', workerPass1);
81
+ await StateManager.queueCommand(projectId, workerPass1);
82
+
83
+ // --- STEP 2: PROJECT MANAGER ---
84
+ StateManager.clearSnapshot(projectId);
85
+ stopStatus = startStatusLoop(projectId, 'pm');
86
+ const pmInput = tags.pmRoute ? `[ROUTED TASK]: ${tags.pmRoute}` : `[QUESTION]: ${tags.pmQuestion}`;
87
+ let pmText = "";
88
+ await AIEngine.callPMStream(
89
+ project.pmHistory, pmInput,
90
+ (thought) => { stopStatus(); StateManager.setStatus(projectId, "Manager: Thinking..."); StateManager.appendSnapshotOnly(projectId, thought); },
91
+ (chunk) => { stopStatus(); StateManager.setStatus(projectId, "Manager: Architecting..."); pmText += chunk; StateManager.appendSnapshotOnly(projectId, chunk); }
92
+ );
93
+ stopStatus();
94
+
95
+ await StateManager.addHistory(projectId, 'pm', 'user', pmInput);
96
+ await StateManager.addHistory(projectId, 'pm', 'model', pmText);
97
+ await StateManager.queueCommand(projectId, pmText);
98
+
99
+ // Notify Worker of PM Answer
100
+ const pmAnswerVisible = `[PM RESPONSE]:\n${pmText}`;
101
+ await StateManager.addHistory(projectId, 'worker', 'model', pmAnswerVisible);
102
+
103
+ // --- STEP 3: WORKER FINAL PASS ---
104
+ const nextTask = extractTags(pmText).workerPrompt || "Proceed with implementation.";
105
+ StateManager.clearSnapshot(projectId);
106
+ stopStatus = startStatusLoop(projectId, 'worker');
107
+ let workerPass2 = "";
108
+ const currentHistory = await StateManager.getProject(projectId); // Refresh history
109
+ await AIEngine.callWorkerStream(
110
+ currentHistory.workerHistory, nextTask,
111
+ (thought) => { stopStatus(); StateManager.appendSnapshotOnly(projectId, thought); },
112
+ (chunk) => { stopStatus(); workerPass2 += chunk; StateManager.appendStream(projectId, chunk); }
113
+ );
114
+ stopStatus();
115
+ await StateManager.addHistory(projectId, 'worker', 'model', workerPass2);
116
+ await StateManager.queueCommand(projectId, workerPass2);
117
  } else {
118
+ await StateManager.addHistory(projectId, 'worker', 'user', fullInput);
119
+ await StateManager.addHistory(projectId, 'worker', 'model', workerPass1);
120
+ await StateManager.queueCommand(projectId, workerPass1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  }
122
+
123
+ await StateManager.updateProject(projectId, { status: "idle" });
124
+ await deductUserCredits(userId, 10); // Standard cost
125
  } catch (err) {
126
+ console.error("Feedback Loop Failed:", err.message);
127
+ StateManager.setStatus(projectId, "Error: " + err.message);
128
+ await StateManager.updateProject(projectId, { status: "error" });
129
  }
130
+ }
131
+
132
+ app.post('/project/feedback', async (req, res) => {
133
+ const { userId, projectId, prompt, hierarchyContext, scriptContext, logContext, images } = req.body;
134
+ try {
135
+ const project = await StateManager.getProject(projectId);
136
+ if (!project || project.userId !== userId) return res.status(403).json({ error: "Auth Error" });
137
+ await checkMinimumCredits(userId, 'basic');
138
+ await StateManager.updateProject(projectId, { status: "working" });
139
+ res.json({ success: true });
140
+ const context = formatContext({ hierarchyContext, scriptContext, logContext });
141
+ runAsyncFeedback(projectId, userId, `USER: ${prompt || "Auto Feedback"}${context}`, images || []);
142
+ } catch (err) { res.status(500).json({ error: err.message }); }
143
+ });
144
+
145
+ app.post('/project/ping', async (req, res) => {
146
+ const { projectId, userId, isFrontend } = req.body;
147
+ const project = await StateManager.getProject(projectId);
148
+ if (!project || project.userId !== userId) return res.json({ action: "IDLE" });
149
+ if (isFrontend) return res.json({ status: StateManager.getStatus(projectId), snapshot: StateManager.getSnapshot(projectId) });
150
+ const command = await StateManager.popCommand(projectId);
151
+ const stream = StateManager.popStream(projectId);
152
+ let resData = { action: stream ? "STREAM_APPEND" : "IDLE", stream };
153
+ if (command) { resData.action = command.type; resData.target = command.payload; resData.code = command.payload; }
154
+ res.json(resData);
155
+ });
156
+
157
+ // Standard Handlers
158
+ app.post('/onboarding/create', async (req, res) => {
159
+ const { userId, description, answers } = req.body;
160
+ const projectId = `proj_${Date.now()}_${crypto.randomBytes(2).toString("hex")}`;
161
+ const grade = await AIEngine.gradeProject(description, answers);
162
+ await supabase.from('projects').insert({ id: projectId, user_id: userId, info: { title: grade.title, description, status: "idle" } });
163
+ res.json({ success: true, projectId, grade });
164
  });
165
 
166
+ app.listen(PORT, () => console.log(`AI Backend live on ${PORT}`));