lenzcom commited on
Commit
5d53edb
·
verified ·
1 Parent(s): 5639fcf

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. server.js +63 -58
server.js CHANGED
@@ -1,115 +1,120 @@
 
1
  import express from 'express';
2
- import { SystemMessage, HumanMessage, AIMessage, LlamaCppLLM } from './src/index.js';
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import bodyParser from 'body-parser';
4
  import fs from 'fs';
5
  import path from 'path';
6
 
7
  // --- CONFIGURATION ---
8
  const PORT = 7860;
9
- const LOG_FILE = 'activity.log';
10
  const MODEL_PATH = './models/Qwen3-1.7B-Q8_0.gguf';
11
 
12
- // --- LOGGING UTILS ---
13
- function log(type, content) {
14
- const timestamp = new Date().toISOString();
15
- const entry = `[${timestamp}] [${type}] ${JSON.stringify(content)}
16
- `;
17
- console.log(entry.trim());
18
- fs.appendFileSync(LOG_FILE, entry);
19
- }
 
 
 
 
 
 
20
 
21
  // --- AI ENGINE ---
22
  let llm = null;
23
- let defaultSystemPrompt = "You are a helpful and intelligent AI assistant. You answer concisely and accurately.";
24
 
25
  async function initModel() {
 
 
 
 
26
  try {
27
- log('SYSTEM', 'Loading model...');
28
  if (!fs.existsSync(MODEL_PATH)) {
29
- log('ERROR', `Model not found at ${MODEL_PATH}`);
 
 
 
 
 
 
30
  return;
31
  }
32
 
33
  llm = new LlamaCppLLM({
34
  modelPath: MODEL_PATH,
35
- temperature: 0.7, // Higher for creativity
36
- maxTokens: 1024 // Longer responses
37
  });
38
 
39
- // Warmup
40
  await llm.invoke("Hello");
41
- log('SYSTEM', 'Model loaded successfully. Ready to serve.');
42
  } catch (err) {
43
- log('FATAL', err.message);
44
  }
45
  }
46
 
47
  // --- SERVER SETUP ---
48
  const app = express();
49
  app.use(bodyParser.json());
 
50
 
51
- // 1. CHAT ENDPOINT (Main Interaction)
52
  app.post('/chat', async (req, res) => {
53
- if (!llm) return res.status(503).json({ error: "System initializing..." });
54
 
55
  try {
56
- const { message, history = [], system_instruction } = req.body;
 
57
 
58
- // Construct Message Chain
59
  const messages = [];
60
-
61
- // 1. System Prompt (Training/Instruction)
62
- messages.push(new SystemMessage(system_instruction || defaultSystemPrompt));
63
 
64
- // 2. Chat History (Short-term memory)
65
  history.forEach(msg => {
66
  if (msg.role === 'user') messages.push(new HumanMessage(msg.content));
67
  if (msg.role === 'ai') messages.push(new AIMessage(msg.content));
68
  });
69
 
70
- // 3. Current Message
71
  messages.push(new HumanMessage(message));
 
72
 
73
- log('REQUEST', { message, instruction: system_instruction || "default" });
74
-
75
- // Invoke AI
76
  const response = await llm.invoke(messages);
77
-
78
- log('RESPONSE', response.content);
79
  res.json({ reply: response.content });
80
 
81
  } catch (error) {
82
- log('ERROR', error.message);
83
  res.status(500).json({ error: error.message });
84
  }
85
  });
86
 
87
- // 2. CONFIG ENDPOINT (Set default behavior)
88
- app.post('/config', (req, res) => {
89
- const { system_prompt, temperature } = req.body;
90
- if (system_prompt) {
91
- defaultSystemPrompt = system_prompt;
92
- log('CONFIG', `Updated default system prompt to: ${system_prompt.substring(0, 50)}...`);
93
- }
94
- // Note: Temperature change requires LLM reload in current simple wrapper, skipping for now
95
- res.json({ success: true, current_system_prompt: defaultSystemPrompt });
96
- });
97
-
98
- // 3. LOGS ENDPOINT (Realtime Monitoring)
99
- app.get('/logs', (req, res) => {
100
- if (fs.existsSync(LOG_FILE)) {
101
- const logs = fs.readFileSync(LOG_FILE, 'utf-8');
102
- // Return last 100 lines
103
- const lines = logs.trim().split('\n').slice(-100).join('\n');
104
- res.send(`<pre>${lines}</pre>`);
105
- } else {
106
- res.send('No logs yet.');
107
- }
108
  });
109
 
110
- app.get('/', (req, res) => res.send('AI Agent v2 (Chat Mode) Running.'));
111
-
112
  app.listen(PORT, '0.0.0.0', () => {
113
- console.log(`Server listening on ${PORT}`);
114
  initModel();
115
- });
 
1
+ console.log("--> SERVER STARTING...");
2
  import express from 'express';
3
+ // import { SystemMessage, HumanMessage, AIMessage, LlamaCppLLM } from './src/index.js'; // CÓ THỂ LỖI Ở ĐÂY
4
+ // Để chắc chắn, tôi sẽ import trực tiếp từ file thực tế nếu cần,
5
+ // nhưng trước hết hãy thử wrap import để bắt lỗi
6
+ let SystemMessage, HumanMessage, AIMessage, LlamaCppLLM;
7
+
8
+ try {
9
+ const module = await import('./src/index.js');
10
+ SystemMessage = module.SystemMessage;
11
+ HumanMessage = module.HumanMessage;
12
+ AIMessage = module.AIMessage;
13
+ LlamaCppLLM = module.LlamaCppLLM;
14
+ console.log("--> Modules loaded successfully");
15
+ } catch (e) {
16
+ console.error("--> FATAL IMPORT ERROR:", e);
17
+ // Vẫn giữ server chạy để xem log
18
+ }
19
+
20
  import bodyParser from 'body-parser';
21
  import fs from 'fs';
22
  import path from 'path';
23
 
24
  // --- CONFIGURATION ---
25
  const PORT = 7860;
 
26
  const MODEL_PATH = './models/Qwen3-1.7B-Q8_0.gguf';
27
 
28
+ // ... (Các phần code khác giữ nguyên nhưng thêm check null) ...
29
+
30
+ // --- LOAD PROMPTS ---
31
+ const PROMPTS = {
32
+ "default": "You are a helpful AI assistant. Answer concisely.",
33
+ "talk2people": fs.existsSync('MssterPrompt_Talk2People.txt')
34
+ ? fs.readFileSync('MssterPrompt_Talk2People.txt', 'utf-8')
35
+ : "Default Talk2People prompt (File missing)",
36
+ "coder": "You are an expert programmer."
37
+ };
38
+
39
+ console.log("--> Prompts loaded. Talk2People length:", PROMPTS['talk2people'].length);
40
+
41
+ const WELCOME_MESSAGE = "Xin chào đây là hệ thống bot AI hoạt động riêng tư được đào tạo bởi Dạ Hành Studio , Ko phụ thuộc vào Google";
42
 
43
  // --- AI ENGINE ---
44
  let llm = null;
 
45
 
46
  async function initModel() {
47
+ if (!LlamaCppLLM) {
48
+ console.error("--> Cannot init model because LlamaCppLLM class is missing.");
49
+ return;
50
+ }
51
  try {
52
+ console.log('--> Loading model...');
53
  if (!fs.existsSync(MODEL_PATH)) {
54
+ console.error(`--> Model not found at ${MODEL_PATH}`);
55
+ // Check if models folder exists
56
+ if (fs.existsSync('./models')) {
57
+ console.log("--> Models dir content:", fs.readdirSync('./models'));
58
+ } else {
59
+ console.log("--> Models dir MISSING!");
60
+ }
61
  return;
62
  }
63
 
64
  llm = new LlamaCppLLM({
65
  modelPath: MODEL_PATH,
66
+ temperature: 0.7,
67
+ maxTokens: 1024
68
  });
69
 
 
70
  await llm.invoke("Hello");
71
+ console.log('--> Model loaded successfully.');
72
  } catch (err) {
73
+ console.error('--> FATAL MODEL ERROR:', err.message);
74
  }
75
  }
76
 
77
  // --- SERVER SETUP ---
78
  const app = express();
79
  app.use(bodyParser.json());
80
+ app.use(express.static('public'));
81
 
 
82
  app.post('/chat', async (req, res) => {
83
+ if (!llm) return res.status(503).json({ reply: "Hệ thống đang khởi động hoặc lỗi model. Vui lòng chờ..." });
84
 
85
  try {
86
+ const { message, history = [], role = 'default' } = req.body;
87
+ const systemInstruction = PROMPTS[role] || PROMPTS['default'];
88
 
 
89
  const messages = [];
90
+ messages.push(new SystemMessage(systemInstruction));
 
 
91
 
 
92
  history.forEach(msg => {
93
  if (msg.role === 'user') messages.push(new HumanMessage(msg.content));
94
  if (msg.role === 'ai') messages.push(new AIMessage(msg.content));
95
  });
96
 
 
97
  messages.push(new HumanMessage(message));
98
+ console.log(`[${role}] User: ${message}`);
99
 
 
 
 
100
  const response = await llm.invoke(messages);
101
+ console.log(`[${role}] AI: ${response.content}`);
 
102
  res.json({ reply: response.content });
103
 
104
  } catch (error) {
105
+ console.error(error);
106
  res.status(500).json({ error: error.message });
107
  }
108
  });
109
 
110
+ app.get('/info', (req, res) => {
111
+ res.json({
112
+ welcome: WELCOME_MESSAGE,
113
+ roles: Object.keys(PROMPTS)
114
+ });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  });
116
 
 
 
117
  app.listen(PORT, '0.0.0.0', () => {
118
+ console.log(`--> Server listening on ${PORT}`);
119
  initModel();
120
+ });