everydaycats commited on
Commit
69ede17
·
verified ·
1 Parent(s): 50ca5e6

Create aiEngine.js

Browse files
Files changed (1) hide show
  1. aiEngine.js +75 -0
aiEngine.js ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // aiEngine.js
2
+ import { GoogleGenAI } from '@google/genai';
3
+ import dotenv from 'dotenv';
4
+ dotenv.config();
5
+
6
+ const genAI = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
7
+
8
+ // Prompt Loader
9
+ import fs from 'fs';
10
+ const prompts = JSON.parse(fs.readFileSync('./prompts.json', 'utf8'));
11
+
12
+ export const AIEngine = {
13
+ /**
14
+ * PM MODEL (Gemini 3.0 Pro Preview - High Thinking)
15
+ */
16
+ callPM: async (history, input) => {
17
+ const modelId = 'gemini-3-pro-preview'; // Per prompt requirements
18
+ const config = {
19
+ thinkingConfig: { thinkingLevel: 'HIGH' },
20
+ tools: [{ googleSearch: {} }],
21
+ };
22
+
23
+ const contents = [
24
+ { role: 'user', parts: [{ text: prompts.pm_system_prompt }] }, // System instruction injection
25
+ ...history,
26
+ { role: 'user', parts: [{ text: input }] }
27
+ ];
28
+
29
+ try {
30
+ const response = await genAI.models.generateContent({
31
+ model: modelId,
32
+ config,
33
+ contents,
34
+ });
35
+ return response.text(); // Simple text return for non-stream internal logic
36
+ } catch (error) {
37
+ console.error("PM AI Error:", error);
38
+ throw error;
39
+ }
40
+ },
41
+
42
+ /**
43
+ * WORKER MODEL (Gemini 2.5 Flash - Fast execution)
44
+ */
45
+ callWorker: async (history, input, imagePart = null) => {
46
+ const modelId = 'gemini-flash-latest'; // Per prompt requirements
47
+ const config = {
48
+ thinkingConfig: { thinkingBudget: -1 }, // Standard generation
49
+ tools: [{ googleSearch: {} }],
50
+ };
51
+
52
+ const currentParts = [{ text: input }];
53
+ if (imagePart) {
54
+ currentParts.push(imagePart);
55
+ }
56
+
57
+ const contents = [
58
+ { role: 'user', parts: [{ text: prompts.worker_system_prompt }] },
59
+ ...history,
60
+ { role: 'user', parts: currentParts }
61
+ ];
62
+
63
+ try {
64
+ const response = await genAI.models.generateContent({
65
+ model: modelId,
66
+ config,
67
+ contents,
68
+ });
69
+ return response.text();
70
+ } catch (error) {
71
+ console.error("Worker AI Error:", error);
72
+ throw error;
73
+ }
74
+ }
75
+ };