File size: 8,209 Bytes
23a8239
7df4949
7afc828
23a8239
6ae9370
48725d9
23a8239
 
01a73c9
7afc828
 
 
01a73c9
 
 
 
 
 
7df4949
01a73c9
97401bc
01a73c9
f911ea7
01a73c9
f911ea7
23a8239
 
 
01a73c9
 
 
 
f911ea7
23a8239
7afc828
01a73c9
3ad9602
 
01a73c9
3ad9602
 
01a73c9
3ad9602
 
 
 
 
01a73c9
3ad9602
 
 
01a73c9
 
 
 
 
 
 
 
 
3ad9602
 
 
01a73c9
 
 
3ad9602
 
 
01a73c9
 
 
97401bc
01a73c9
 
 
 
 
 
 
3ad9602
 
01a73c9
 
 
 
3ad9602
 
 
01a73c9
 
f911ea7
01a73c9
 
8d5582b
 
 
 
01a73c9
3ad9602
 
 
01a73c9
 
 
 
 
3ad9602
 
 
 
 
01a73c9
 
 
 
3ad9602
 
 
01a73c9
 
 
 
 
 
3ad9602
 
 
 
01a73c9
 
f911ea7
01a73c9
8d5582b
 
 
 
01a73c9
3ad9602
 
 
01a73c9
 
 
 
 
3ad9602
 
 
 
 
 
01a73c9
 
 
3ad9602
 
 
01a73c9
 
 
 
 
3ad9602
 
 
 
 
01a73c9
 
3ad9602
 
 
 
01a73c9
 
 
 
 
3ad9602
 
01a73c9
f911ea7
3ad9602
 
 
 
 
 
01a73c9
 
 
 
 
3ad9602
 
 
01a73c9
3ad9602
859196a
01a73c9
 
 
 
48725d9
 
01a73c9
48725d9
01a73c9
 
 
 
 
 
23a8239
01a73c9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
import dotenv from 'dotenv';
import fs from 'fs';
import path from 'path';

dotenv.config();

const REMOTE_SERVER_URL = process.env.REMOTE_AI_URL || "http://localhost:7860";

// --- PROMPT LOADING ---
let prompts = {};
try {
    const promptsPath = path.resolve('./prompts.json');
    if (fs.existsSync(promptsPath)) {
        prompts = JSON.parse(fs.readFileSync(promptsPath, 'utf8'));
    }
} catch (e) { 
    console.error("Prompt Load Error:", e); 
}

// --- HISTORY FLATTENER (WITH ECONOMIC LIMITS) ---
const flattenHistory = (history, currentInput, systemPrompt, limit = 10, gdd = null) => {
    // ECONOMIC CAP: Slice history to the last 'limit' messages to save tokens
    const recentHistory = history.slice(-limit); 
    
    let context = recentHistory.map(m => {
        const roleName = m.role === 'model' ? 'Assistant' : 'User';
        return `${roleName}: ${m.parts[0].text}`;
    }).join('\n');

    // Inject GDD only if provided (usually for PM)
    const projectAnchor = gdd ? `[PROJECT GDD REFERENCE]:\n${gdd}\n\n` : "";
    
    return `System: ${systemPrompt}\n\n${projectAnchor}${context}\nUser: ${currentInput}\nAssistant:`;
};

// --- STREAM HANDLER & USAGE PARSER ---
const handleStreamResponse = async (response, onThink, onOutput) => {
    if (!response.ok) throw new Error(`Stream Error: ${response.statusText}`);
    
    const reader = response.body.getReader();
    const decoder = new TextDecoder("utf-8");
    
    let fullStreamData = "";

    while (true) {
        const { done, value } = await reader.read();
        if (done) break;
        
        const chunk = decoder.decode(value, { stream: true });
        fullStreamData += chunk;

        // Streaming Logic: Don't show Usage to frontend, parse thoughts
        if (!chunk.includes("__USAGE__")) {
            if (chunk.includes("__THINK__")) {
                const parts = chunk.split("__THINK__");
                if (parts[0] && onOutput) onOutput(parts[0]);
                if (parts[1] && onThink) onThink(parts[1]);
            } else {
                if (onOutput) onOutput(chunk);
            }
        }
    }
    
    // --- USAGE EXTRACTION FOR BILLING ---
    let usage = { totalTokenCount: 0, inputTokens: 0, outputTokens: 0 };
    let finalCleanText = fullStreamData;

    if (fullStreamData.includes("__USAGE__")) {
        const parts = fullStreamData.split("__USAGE__");
        finalCleanText = parts[0]; // The actual text content
        const usageRaw = parts[1];
        
        try { 
            const parsedUsage = JSON.parse(usageRaw);
            usage.totalTokenCount = parsedUsage.totalTokenCount || 0;
            usage.inputTokens = parsedUsage.inputTokens || 0;
            usage.outputTokens = parsedUsage.outputTokens || 0;
        } catch (e) { 
            console.warn("Usage Parse Failed in Engine:", e); 
        }
    }

    // Clean any remaining tags
    finalCleanText = finalCleanText.split("__THINK__")[0].trim();

    return { text: finalCleanText, usage };
};

export const AIEngine = {
    // --- STREAMING METHODS (Main Loop) ---

    callPMStream: async (history, input, onThink, onOutput, gdd = null) => {
        const systemPrompt = prompts.pm_system_prompt || "You are a Project Manager.";
        // ECONOMIC CAP: 15 messages max for PM to maintain context but control costs
        const prompt = flattenHistory(history, input, systemPrompt, 
                                      //15,
                                      10,
                                      gdd); 
        
        const response = await fetch(`${REMOTE_SERVER_URL}/api/stream`, {
            method: 'POST',
            headers: { 'Content-Type': 'application/json' },
            body: JSON.stringify({ 
                model: "claude", 
                prompt: prompt, 
                system_prompt: systemPrompt 
            })
        });
        return await handleStreamResponse(response, onThink, onOutput);
    },

    callWorkerStream: async (history, input, onThink, onOutput, images = []) => {
        const systemPrompt = prompts.worker_system_prompt || "You are a Senior Engineer.";
        // ECONOMIC CAP: 8 messages max for Worker (they only need recent context)
        const prompt = flattenHistory(history, input, systemPrompt, 8, null); 
        
        const response = await fetch(`${REMOTE_SERVER_URL}/api/stream`, {
            method: 'POST',
            headers: { 'Content-Type': 'application/json' },
            body: JSON.stringify({ 
                model: "gpt", 
                prompt: prompt, 
                system_prompt: systemPrompt,
                images: images
            })
        });
        return await handleStreamResponse(response, onThink, onOutput);
    },

    // --- BLOCKING CALLS (Background Initialization) ---

    callPM: async (history, input, gdd = null) => {
        const systemPrompt = prompts.pm_system_prompt || "You are a Project Manager.";
        const prompt = flattenHistory(history, input, systemPrompt, 
                                     // 15,
                                      10,
                                      gdd); // Limit 15

        const response = await fetch(`${REMOTE_SERVER_URL}/api/generate`, {
            method: 'POST',
            headers: { 'Content-Type': 'application/json' },
            body: JSON.stringify({ 
                model: "claude", 
                prompt: prompt, 
                system_prompt: systemPrompt 
            })
        });
        const result = await response.json();
        return { text: result.data, usage: result.usage || { totalTokenCount: 0 } };
    },

    callWorker: async (history, input) => {
        const systemPrompt = prompts.worker_system_prompt || "You are a Senior Engineer.";
        const prompt = flattenHistory(history, input, systemPrompt, 8, null); // Limit 8

        const response = await fetch(`${REMOTE_SERVER_URL}/api/generate`, {
            method: 'POST',
            headers: { 'Content-Type': 'application/json' },
            body: JSON.stringify({ 
                model: "gpt", 
                prompt: prompt, 
                system_prompt: systemPrompt 
            })
        });
        const result = await response.json();
        return { text: result.data, usage: result.usage || { totalTokenCount: 0 } };
    },

    // --- UTILITIES (One-off calls) ---

    generateEntryQuestions: async (desc) => {
        const response = await fetch(`${REMOTE_SERVER_URL}/api/generate`, {
            method: 'POST',
            headers: { 'Content-Type': 'application/json' },
            body: JSON.stringify({ 
                model: "gpt", 
                prompt: `Analyze this project idea: ${desc}`, 
                system_prompt: prompts.analyst_system_prompt || "Output JSON only."
            })
        });
        const result = await response.json();
        // Return parsed data AND usage for billing
        return { ...JSON.parse(result.data), usage: result.usage };
    },

    gradeProject: async (desc, ans) => {
        const response = await fetch(`${REMOTE_SERVER_URL}/api/generate`, {
            method: 'POST',
            headers: { 'Content-Type': 'application/json' },
            body: JSON.stringify({ 
                model: "gpt", 
                prompt: `Grade this project. Description: ${desc} Answers: ${JSON.stringify(ans)}`, 
                system_prompt: prompts.analyst_system_prompt || "Output JSON only."
            })
        });
        const result = await response.json();
        const parsed = JSON.parse(result.data);
        parsed.usage = result.usage; // Attach usage for billing
        return parsed;
    },
    
    generateImage: async (prompt) => {
         try {
            const response = await fetch(`${REMOTE_SERVER_URL}/api/image`, {
                method: 'POST',
                headers: { 'Content-Type': 'application/json' },
                body: JSON.stringify({ prompt })
            });
            const result = await response.json();
            return result; // Expected { image: "base64..." }
         } catch (e) {
             console.error("Image Gen Error:", e);
             return null;
         }
    }
};