everydaytok commited on
Commit
48725d9
·
verified ·
1 Parent(s): ac15076

Update aiEngine.js

Browse files
Files changed (1) hide show
  1. aiEngine.js +153 -41
aiEngine.js CHANGED
@@ -3,6 +3,7 @@ import fs from 'fs';
3
  import path from 'path';
4
 
5
  dotenv.config();
 
6
  const REMOTE_SERVER_URL = process.env.REMOTE_AI_URL || "http://localhost:7860";
7
 
8
  let prompts = {};
@@ -19,73 +20,184 @@ const flattenHistory = (history, currentInput, systemPrompt) => {
19
  return `System: ${systemPrompt}\n\n${context}\nUser: ${currentInput}\nAssistant:`;
20
  };
21
 
 
22
  const handleStreamResponse = async (response, onThink, onOutput) => {
23
  if (!response.ok) throw new Error(`Stream Error: ${response.statusText}`);
 
24
  const reader = response.body.getReader();
25
  const decoder = new TextDecoder("utf-8");
26
  let fullText = "";
27
 
28
- try {
29
- while (true) {
30
- const { done, value } = await reader.read();
31
- if (done) break;
32
- const chunk = decoder.decode(value, { stream: true });
33
- if (chunk.startsWith("__THINK__")) {
34
- if (onThink) onThink(chunk.replace("__THINK__", ""));
35
- } else if (chunk.includes("__THINK__")) {
36
- const parts = chunk.split("__THINK__");
37
- if (parts[0] && onOutput) { onOutput(parts[0]); fullText += parts[0]; }
38
- if (parts[1] && onThink) onThink(parts[1]);
39
- } else {
40
- if (onOutput) onOutput(chunk);
41
- fullText += chunk;
 
 
 
42
  }
 
 
 
43
  }
44
- } catch (e) { console.error("Stream reader error:", e.message); }
45
  return { text: fullText, usage: { totalTokenCount: 0 } };
46
  };
47
 
48
  export const AIEngine = {
 
49
  callPMStream: async (history, input, onThink, onOutput) => {
50
- const systemPrompt = prompts.pm_system_prompt || "You are a lead lead architect.";
51
- const response = await fetch(`${REMOTE_SERVER_URL}/api/stream`, {
52
- method: 'POST',
53
- headers: { 'Content-Type': 'application/json' },
54
- body: JSON.stringify({ model: "claude", prompt: flattenHistory(history, input, ""), system_prompt: systemPrompt })
55
- });
56
- return await handleStreamResponse(response, onThink, onOutput);
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  },
 
 
58
  callWorkerStream: async (history, input, onThink, onOutput, images = []) => {
59
  const systemPrompt = prompts.worker_system_prompt || "You are a worker.";
60
- const response = await fetch(`${REMOTE_SERVER_URL}/api/stream`, {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  method: 'POST',
62
  headers: { 'Content-Type': 'application/json' },
63
- body: JSON.stringify({ model: "gpt", prompt: flattenHistory(history, input, ""), system_prompt: systemPrompt, images })
 
 
 
 
64
  });
65
- return await handleStreamResponse(response, onThink, onOutput);
 
 
 
 
 
 
 
 
 
 
 
66
  },
67
- callPM: async (history, input) => {
68
- const res = await fetch(`${REMOTE_SERVER_URL}/api/generate`, {
69
- method: 'POST', headers: { 'Content-Type': 'application/json' },
70
- body: JSON.stringify({ model: "claude", prompt: flattenHistory(history, input, ""), system_prompt: prompts.pm_system_prompt })
 
 
 
 
 
 
 
 
 
 
71
  });
72
- const result = await res.json();
73
- return { text: result.data, usage: { totalTokenCount: 0 } };
 
 
 
 
 
 
 
 
 
 
74
  },
 
75
  generateEntryQuestions: async (desc) => {
76
- const res = await fetch(`${REMOTE_SERVER_URL}/api/generate`, {
77
- method: 'POST', headers: { 'Content-Type': 'application/json' },
78
- body: JSON.stringify({ model: "gpt", prompt: `[OUTPUT ONLY JSON]\n Questions: ${desc}`, system_prompt: prompts.analyst_system_prompt })
 
 
 
 
 
 
79
  });
80
- const result = await res.json();
81
- return JSON.parse(result.data);
 
 
 
 
 
82
  },
 
83
  gradeProject: async (desc, ans) => {
84
- const res = await fetch(`${REMOTE_SERVER_URL}/api/generate`, {
85
- method: 'POST', headers: { 'Content-Type': 'application/json' },
86
- body: JSON.stringify({ model: "gpt", prompt: `[OUTPUT ONLY JSON]\n Grade: ${desc}\nAns: ${JSON.stringify(ans)}`, system_prompt: prompts.analyst_system_prompt })
 
 
 
 
 
 
87
  });
88
- const result = await res.json();
89
- return JSON.parse(result.data);
 
 
 
 
 
90
  }
91
  };
 
3
  import path from 'path';
4
 
5
  dotenv.config();
6
+
7
  const REMOTE_SERVER_URL = process.env.REMOTE_AI_URL || "http://localhost:7860";
8
 
9
  let prompts = {};
 
20
  return `System: ${systemPrompt}\n\n${context}\nUser: ${currentInput}\nAssistant:`;
21
  };
22
 
23
+ // HELPER: STREAM SPLITTER
24
  const handleStreamResponse = async (response, onThink, onOutput) => {
25
  if (!response.ok) throw new Error(`Stream Error: ${response.statusText}`);
26
+
27
  const reader = response.body.getReader();
28
  const decoder = new TextDecoder("utf-8");
29
  let fullText = "";
30
 
31
+ while (true) {
32
+ const { done, value } = await reader.read();
33
+ if (done) break;
34
+
35
+ const chunk = decoder.decode(value, { stream: true });
36
+
37
+ if (chunk.startsWith("__THINK__")) {
38
+ const thoughtContent = chunk.replace("__THINK__", "");
39
+ if (onThink) onThink(thoughtContent);
40
+ } else if (chunk.includes("__THINK__")) {
41
+ const parts = chunk.split("__THINK__");
42
+ if (parts[0] && onOutput) {
43
+ onOutput(parts[0]);
44
+ fullText += parts[0];
45
+ }
46
+ if (parts[1] && onThink) {
47
+ onThink(parts[1]);
48
  }
49
+ } else {
50
+ if (onOutput) onOutput(chunk);
51
+ fullText += chunk;
52
  }
53
+ }
54
  return { text: fullText, usage: { totalTokenCount: 0 } };
55
  };
56
 
57
  export const AIEngine = {
58
+ // --- PM STREAMING ---
59
  callPMStream: async (history, input, onThink, onOutput) => {
60
+ const systemPrompt = prompts.pm_system_prompt || "You are a pro manager.";
61
+ const fullPrompt = flattenHistory(history, input, "");
62
+
63
+ try {
64
+ const response = await fetch(`${REMOTE_SERVER_URL}/api/stream`, {
65
+ method: 'POST',
66
+ headers: { 'Content-Type': 'application/json' },
67
+ body: JSON.stringify({
68
+ model: "claude",
69
+ prompt: fullPrompt,
70
+ system_prompt: systemPrompt
71
+ })
72
+ });
73
+
74
+ return await handleStreamResponse(response, onThink, onOutput);
75
+
76
+ } catch (error) {
77
+ console.log("PM Stream error: ", error);
78
+ throw error;
79
+ }
80
  },
81
+
82
+ // --- WORKER STREAMING (WITH IMAGES) ---
83
  callWorkerStream: async (history, input, onThink, onOutput, images = []) => {
84
  const systemPrompt = prompts.worker_system_prompt || "You are a worker.";
85
+ const fullPrompt = flattenHistory(history, input, "");
86
+
87
+ try {
88
+ const response = await fetch(`${REMOTE_SERVER_URL}/api/stream`, {
89
+ method: 'POST',
90
+ headers: { 'Content-Type': 'application/json' },
91
+ body: JSON.stringify({
92
+ model: "gpt",
93
+ prompt: fullPrompt,
94
+ system_prompt: systemPrompt,
95
+ images: images // Pass images to Main Agent
96
+ })
97
+ });
98
+
99
+ return await handleStreamResponse(response, onThink, onOutput);
100
+
101
+ } catch (error) {
102
+ console.log("Worker Stream error: ", error);
103
+ throw error;
104
+ }
105
+ },
106
+
107
+ // --- LEGACY BLOCKING CALLS ---
108
+ callPM: async (history, input) => {
109
+ const systemPrompt = prompts.pm_system_prompt || "You are a pro manager.";
110
+ const fullPrompt = flattenHistory(history, input, "" );
111
+
112
+ try {
113
+ const response = await fetch(`${REMOTE_SERVER_URL}/api/generate`, {
114
  method: 'POST',
115
  headers: { 'Content-Type': 'application/json' },
116
+ body: JSON.stringify({
117
+ model: "claude",
118
+ prompt: fullPrompt,
119
+ system_prompt: systemPrompt
120
+ })
121
  });
122
+
123
+ const result = await response.json();
124
+ if (!result.success) throw new Error(result.error);
125
+
126
+ return {
127
+ text: result.data,
128
+ usage: { totalTokenCount: 0 }
129
+ };
130
+ } catch (error) {
131
+ console.log("PM error: ",error);
132
+ return { text: "", error };
133
+ }
134
  },
135
+
136
+ callWorker: async (history, input, images = []) => {
137
+ const systemPrompt = prompts.worker_system_prompt || "You are a worker.";
138
+ const fullPrompt = flattenHistory(history, input, "" );
139
+
140
+ try {
141
+ const response = await fetch(`${REMOTE_SERVER_URL}/api/generate`, {
142
+ method: 'POST',
143
+ headers: { 'Content-Type': 'application/json' },
144
+ body: JSON.stringify({
145
+ model: "gpt",
146
+ prompt: fullPrompt,
147
+ system_prompt: systemPrompt
148
+ })
149
  });
150
+
151
+ const result = await response.json();
152
+ if (!result.success) throw new Error(result.error);
153
+
154
+ return {
155
+ text: result.data,
156
+ usage: { totalTokenCount: 0 }
157
+ };
158
+ } catch (error) {
159
+ console.log("Worker error: ",error);
160
+ return { text: "", error };
161
+ }
162
  },
163
+
164
  generateEntryQuestions: async (desc) => {
165
+ try {
166
+ const response = await fetch(`${REMOTE_SERVER_URL}/api/generate`, {
167
+ method: 'POST',
168
+ headers: { 'Content-Type': 'application/json' },
169
+ body: JSON.stringify({
170
+ model: "gpt",
171
+ prompt: `[OUTPUT ONLY JSON]\n Generate entry questions for this idea: ${desc}`,
172
+ system_prompt: `Goal: ${prompts.analyst_system_prompt}`
173
+ })
174
  });
175
+
176
+ const result = await response.json();
177
+ return JSON.parse(result.data);
178
+
179
+ } catch (error) {
180
+ console.log("GenerateQ error: ",error)
181
+ }
182
  },
183
+
184
  gradeProject: async (desc, ans) => {
185
+ try {
186
+ const response = await fetch(`${REMOTE_SERVER_URL}/api/generate`, {
187
+ method: 'POST',
188
+ headers: { 'Content-Type': 'application/json' },
189
+ body: JSON.stringify({
190
+ model: "gpt",
191
+ prompt: `[OUTPUT ONLY JSON]\n. Grade this project. Desc: ${desc}\nAnswers: ${JSON.stringify(ans)}`,
192
+ system_prompt: prompts.analyst_system_prompt
193
+ })
194
  });
195
+
196
+ const result = await response.json();
197
+ return JSON.parse(result.data);
198
+
199
+ } catch (error) {
200
+ console.log("GenerateQ error: ",error)
201
+ }
202
  }
203
  };