lenzcom commited on
Commit
d541171
·
verified ·
1 Parent(s): 5d53edb

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. server.js +37 -37
server.js CHANGED
@@ -1,10 +1,7 @@
1
  console.log("--> SERVER STARTING...");
2
  import express from 'express';
3
- // import { SystemMessage, HumanMessage, AIMessage, LlamaCppLLM } from './src/index.js'; // CÓ THỂ LỖI Ở ĐÂY
4
- // Để chắc chắn, tôi sẽ import trực tiếp từ file thực tế nếu cần,
5
- // nhưng trước hết hãy thử wrap import để bắt lỗi
6
  let SystemMessage, HumanMessage, AIMessage, LlamaCppLLM;
7
-
8
  try {
9
  const module = await import('./src/index.js');
10
  SystemMessage = module.SystemMessage;
@@ -14,7 +11,6 @@ try {
14
  console.log("--> Modules loaded successfully");
15
  } catch (e) {
16
  console.error("--> FATAL IMPORT ERROR:", e);
17
- // Vẫn giữ server chạy để xem log
18
  }
19
 
20
  import bodyParser from 'body-parser';
@@ -25,48 +21,33 @@ import path from 'path';
25
  const PORT = 7860;
26
  const MODEL_PATH = './models/Qwen3-1.7B-Q8_0.gguf';
27
 
28
- // ... (Các phần code khác giữ nguyên nhưng thêm check null) ...
29
-
30
  // --- LOAD PROMPTS ---
31
  const PROMPTS = {
32
  "default": "You are a helpful AI assistant. Answer concisely.",
33
  "talk2people": fs.existsSync('MssterPrompt_Talk2People.txt')
34
  ? fs.readFileSync('MssterPrompt_Talk2People.txt', 'utf-8')
35
- : "Default Talk2People prompt (File missing)",
36
  "coder": "You are an expert programmer."
37
  };
38
 
39
- console.log("--> Prompts loaded. Talk2People length:", PROMPTS['talk2people'].length);
40
-
41
  const WELCOME_MESSAGE = "Xin chào đây là hệ thống bot AI hoạt động riêng tư được đào tạo bởi Dạ Hành Studio , Ko phụ thuộc vào Google";
42
 
43
  // --- AI ENGINE ---
44
  let llm = null;
45
 
46
  async function initModel() {
47
- if (!LlamaCppLLM) {
48
- console.error("--> Cannot init model because LlamaCppLLM class is missing.");
49
- return;
50
- }
51
  try {
52
  console.log('--> Loading model...');
53
  if (!fs.existsSync(MODEL_PATH)) {
54
  console.error(`--> Model not found at ${MODEL_PATH}`);
55
- // Check if models folder exists
56
- if (fs.existsSync('./models')) {
57
- console.log("--> Models dir content:", fs.readdirSync('./models'));
58
- } else {
59
- console.log("--> Models dir MISSING!");
60
- }
61
  return;
62
  }
63
-
64
  llm = new LlamaCppLLM({
65
  modelPath: MODEL_PATH,
66
  temperature: 0.7,
67
  maxTokens: 1024
68
  });
69
-
70
  await llm.invoke("Hello");
71
  console.log('--> Model loaded successfully.');
72
  } catch (err) {
@@ -77,44 +58,63 @@ async function initModel() {
77
  // --- SERVER SETUP ---
78
  const app = express();
79
  app.use(bodyParser.json());
 
 
80
  app.use(express.static('public'));
81
 
82
- app.post('/chat', async (req, res) => {
83
- if (!llm) return res.status(503).json({ reply: "Hệ thống đang khởi động hoặc lỗi model. Vui lòng chờ..." });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
 
 
85
  try {
86
  const { message, history = [], role = 'default' } = req.body;
87
  const systemInstruction = PROMPTS[role] || PROMPTS['default'];
88
 
89
- const messages = [];
90
- messages.push(new SystemMessage(systemInstruction));
91
-
92
  history.forEach(msg => {
93
  if (msg.role === 'user') messages.push(new HumanMessage(msg.content));
94
  if (msg.role === 'ai') messages.push(new AIMessage(msg.content));
95
  });
96
-
97
  messages.push(new HumanMessage(message));
98
- console.log(`[${role}] User: ${message}`);
99
 
100
  const response = await llm.invoke(messages);
101
- console.log(`[${role}] AI: ${response.content}`);
102
  res.json({ reply: response.content });
103
-
104
  } catch (error) {
105
- console.error(error);
106
  res.status(500).json({ error: error.message });
107
  }
108
  });
109
 
110
  app.get('/info', (req, res) => {
111
- res.json({
112
- welcome: WELCOME_MESSAGE,
113
- roles: Object.keys(PROMPTS)
114
- });
115
  });
116
 
117
  app.listen(PORT, '0.0.0.0', () => {
118
  console.log(`--> Server listening on ${PORT}`);
119
  initModel();
120
- });
 
1
  console.log("--> SERVER STARTING...");
2
  import express from 'express';
3
+ // --- DYNAMIC IMPORT SECTION ---
 
 
4
  let SystemMessage, HumanMessage, AIMessage, LlamaCppLLM;
 
5
  try {
6
  const module = await import('./src/index.js');
7
  SystemMessage = module.SystemMessage;
 
11
  console.log("--> Modules loaded successfully");
12
  } catch (e) {
13
  console.error("--> FATAL IMPORT ERROR:", e);
 
14
  }
15
 
16
  import bodyParser from 'body-parser';
 
21
  const PORT = 7860;
22
  const MODEL_PATH = './models/Qwen3-1.7B-Q8_0.gguf';
23
 
 
 
24
  // --- LOAD PROMPTS ---
25
  const PROMPTS = {
26
  "default": "You are a helpful AI assistant. Answer concisely.",
27
  "talk2people": fs.existsSync('MssterPrompt_Talk2People.txt')
28
  ? fs.readFileSync('MssterPrompt_Talk2People.txt', 'utf-8')
29
+ : "You are a creative Video Director specializing in realistic Vietnamese scenes.",
30
  "coder": "You are an expert programmer."
31
  };
32
 
 
 
33
  const WELCOME_MESSAGE = "Xin chào đây là hệ thống bot AI hoạt động riêng tư được đào tạo bởi Dạ Hành Studio , Ko phụ thuộc vào Google";
34
 
35
  // --- AI ENGINE ---
36
  let llm = null;
37
 
38
  async function initModel() {
39
+ if (!LlamaCppLLM) return;
 
 
 
40
  try {
41
  console.log('--> Loading model...');
42
  if (!fs.existsSync(MODEL_PATH)) {
43
  console.error(`--> Model not found at ${MODEL_PATH}`);
 
 
 
 
 
 
44
  return;
45
  }
 
46
  llm = new LlamaCppLLM({
47
  modelPath: MODEL_PATH,
48
  temperature: 0.7,
49
  maxTokens: 1024
50
  });
 
51
  await llm.invoke("Hello");
52
  console.log('--> Model loaded successfully.');
53
  } catch (err) {
 
58
  // --- SERVER SETUP ---
59
  const app = express();
60
  app.use(bodyParser.json());
61
+
62
+ // Serve static files from 'public' folder
63
  app.use(express.static('public'));
64
 
65
+ // FALLBACK ROUTE: If index.html is missing or static fail, serve HTML directly
66
+ app.get('/', (req, res) => {
67
+ // Check if public/index.html exists
68
+ const indexPath = path.join(process.cwd(), 'public', 'index.html');
69
+ if (fs.existsSync(indexPath)) {
70
+ res.sendFile(indexPath);
71
+ } else {
72
+ // Fallback HTML content
73
+ res.send(`
74
+ <!DOCTYPE html>
75
+ <html lang="vi">
76
+ <head>
77
+ <meta charset="UTF-8">
78
+ <title>Dạ Hành AI Studio (Fallback)</title>
79
+ <style>body{background:#121212;color:white;font-family:sans-serif;display:flex;align-items:center;justify-content:center;height:100vh}a{color:#00ff88}</style>
80
+ </head>
81
+ <body>
82
+ <div style="text-align:center">
83
+ <h1>DẠ HÀNH STUDIO</h1>
84
+ <p>Hệ thống đang khởi động hoặc không tìm thấy giao diện.</p>
85
+ <p>Hãy thử truy cập <a href="/info">/info</a> để kiểm tra API.</p>
86
+ </div>
87
+ </body>
88
+ </html>
89
+ `);
90
+ }
91
+ });
92
 
93
+ app.post('/chat', async (req, res) => {
94
+ if (!llm) return res.status(503).json({ reply: "Hệ thống đang khởi động..." });
95
  try {
96
  const { message, history = [], role = 'default' } = req.body;
97
  const systemInstruction = PROMPTS[role] || PROMPTS['default'];
98
 
99
+ const messages = [new SystemMessage(systemInstruction)];
 
 
100
  history.forEach(msg => {
101
  if (msg.role === 'user') messages.push(new HumanMessage(msg.content));
102
  if (msg.role === 'ai') messages.push(new AIMessage(msg.content));
103
  });
 
104
  messages.push(new HumanMessage(message));
 
105
 
106
  const response = await llm.invoke(messages);
 
107
  res.json({ reply: response.content });
 
108
  } catch (error) {
 
109
  res.status(500).json({ error: error.message });
110
  }
111
  });
112
 
113
  app.get('/info', (req, res) => {
114
+ res.json({ welcome: WELCOME_MESSAGE, roles: Object.keys(PROMPTS) });
 
 
 
115
  });
116
 
117
  app.listen(PORT, '0.0.0.0', () => {
118
  console.log(`--> Server listening on ${PORT}`);
119
  initModel();
120
+ });