lenzcom commited on
Commit
e6a509e
·
verified ·
1 Parent(s): 15585fd

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. server.js +40 -32
server.js CHANGED
@@ -1,40 +1,35 @@
1
  import express from 'express';
2
- import { SystemMessage, HumanMessage, Runnable, LlamaCppLLM } from './src/index.js'; // Sửa lại đường dẫn import cho đúng cấu trúc flat nếu cần, nhưng ở đây src vẫn nằm trong root
3
  import bodyParser from 'body-parser';
4
  import path from 'path';
5
  import fs from 'fs';
6
 
7
- // ... (Giữ nguyên phần Class Logic) ...
8
  class EmailClassifierRunnable extends Runnable {
9
  constructor(llm) {
10
  super();
11
  this.llm = llm;
12
  }
13
-
14
  async _call(input, config) {
 
 
 
15
  const messages = this._buildPrompt(input);
16
  const response = await this.llm.invoke(messages, config);
17
  return this._parseClassification(response.content);
18
  }
19
-
20
  _buildPrompt(email) {
21
  return [
22
- new SystemMessage(`You are an email classification assistant. Classify into: Spam, Invoice, Meeting Request, Urgent, Personal, Other.
23
- Respond in strict JSON format: {"category": "Name", "confidence": 0.95, "reason": "Explanation"}`),
24
- new HumanMessage(`Classify this email:
25
- Subject: ${email.subject}
26
- Body: ${email.body}`)
27
  ];
28
  }
29
-
30
  _parseClassification(response) {
31
  try {
32
  const jsonMatch = response.match(/\{[\s\S]*\}/);
33
- if (!jsonMatch) throw new Error('No JSON found');
34
  return JSON.parse(jsonMatch[0]);
35
- } catch (error) {
36
- return { category: 'Other', confidence: 0.0, reason: 'Failed to parse', raw: response };
37
- }
38
  }
39
  }
40
 
@@ -43,9 +38,17 @@ const PORT = 7860;
43
 
44
  app.use(bodyParser.json());
45
 
46
- // Log directory content for debugging
47
- console.log("Current directory:", process.cwd());
48
- console.log("Files in models:", fs.readdirSync('./models'));
 
 
 
 
 
 
 
 
49
 
50
  let classifier = null;
51
 
@@ -53,7 +56,10 @@ async function initModel() {
53
  try {
54
  console.log("Loading model...");
55
  const modelPath = path.resolve('./models/Qwen3-1.7B-Q8_0.gguf');
56
- console.log("Model absolute path:", modelPath);
 
 
 
57
 
58
  const llm = new LlamaCppLLM({
59
  modelPath: modelPath,
@@ -61,35 +67,37 @@ async function initModel() {
61
  maxTokens: 200
62
  });
63
 
64
- // Test invoke to make sure it works
65
- await llm.invoke("Hello");
66
 
67
  classifier = new EmailClassifierRunnable(llm);
68
- console.log("Model loaded and ready!");
69
  } catch (err) {
70
- console.error("FATAL ERROR loading model:", err);
71
- process.exit(1); // Exit to let Docker restart (or show error)
 
72
  }
73
  }
74
 
75
  app.post('/classify', async (req, res) => {
76
- if (!classifier) return res.status(503).json({ error: "Model is loading..." });
 
 
 
 
 
77
  try {
78
- const { subject, body, from } = req.body;
79
- const result = await classifier.invoke({ subject, body, from: from || 'unknown' });
80
  res.json(result);
81
  } catch (error) {
82
- console.error(error);
83
  res.status(500).json({ error: error.message });
84
  }
85
  });
86
 
87
- app.get('/', (req, res) => {
88
- res.send('AI Agent Email Classifier is Running. POST to /classify to use.');
89
- });
90
 
91
- // Start server immediately, load model in background
92
  app.listen(PORT, '0.0.0.0', () => {
93
- console.log(`Server running on port ${PORT}`);
94
  initModel();
95
  });
 
1
  import express from 'express';
2
+ import { SystemMessage, HumanMessage, Runnable, LlamaCppLLM } from './src/index.js';
3
  import bodyParser from 'body-parser';
4
  import path from 'path';
5
  import fs from 'fs';
6
 
7
+ // ... (Giữ nguyên logic EmailClassifierRunnable) ...
8
  class EmailClassifierRunnable extends Runnable {
9
  constructor(llm) {
10
  super();
11
  this.llm = llm;
12
  }
 
13
  async _call(input, config) {
14
+ // Mock implementation if LLM fails
15
+ if (!this.llm) return { category: "Error", confidence: 0, reason: "LLM not initialized" };
16
+
17
  const messages = this._buildPrompt(input);
18
  const response = await this.llm.invoke(messages, config);
19
  return this._parseClassification(response.content);
20
  }
 
21
  _buildPrompt(email) {
22
  return [
23
+ new SystemMessage(`You are an email classification assistant. Classify into: Spam, Invoice, Meeting Request, Urgent, Personal, Other. Respond in JSON.`),
24
+ new HumanMessage(`Classify:\nSubject: ${email.subject}\nBody: ${email.body}`)
 
 
 
25
  ];
26
  }
 
27
  _parseClassification(response) {
28
  try {
29
  const jsonMatch = response.match(/\{[\s\S]*\}/);
30
+ if (!jsonMatch) throw new Error('No JSON');
31
  return JSON.parse(jsonMatch[0]);
32
+ } catch (e) { return { category: 'Other', confidence: 0, reason: 'Parse fail' }; }
 
 
33
  }
34
  }
35
 
 
38
 
39
  app.use(bodyParser.json());
40
 
41
+ // Global error log
42
+ const errorLog = [];
43
+
44
+ app.get('/debug', (req, res) => {
45
+ res.json({
46
+ cwd: process.cwd(),
47
+ files: fs.readdirSync('.'),
48
+ models: fs.existsSync('./models') ? fs.readdirSync('./models') : 'No models dir',
49
+ errors: errorLog
50
+ });
51
+ });
52
 
53
  let classifier = null;
54
 
 
56
  try {
57
  console.log("Loading model...");
58
  const modelPath = path.resolve('./models/Qwen3-1.7B-Q8_0.gguf');
59
+
60
+ if (!fs.existsSync(modelPath)) {
61
+ throw new Error(`Model file not found at ${modelPath}`);
62
+ }
63
 
64
  const llm = new LlamaCppLLM({
65
  modelPath: modelPath,
 
67
  maxTokens: 200
68
  });
69
 
70
+ // Test run
71
+ await llm.invoke("Hi");
72
 
73
  classifier = new EmailClassifierRunnable(llm);
74
+ console.log("Model loaded!");
75
  } catch (err) {
76
+ console.error("Model Load Error:", err);
77
+ errorLog.push(err.toString());
78
+ // DO NOT EXIT, let the server run to debug
79
  }
80
  }
81
 
82
  app.post('/classify', async (req, res) => {
83
+ if (!classifier) {
84
+ return res.status(503).json({
85
+ error: "Model not ready",
86
+ logs: errorLog
87
+ });
88
+ }
89
  try {
90
+ const { subject, body } = req.body;
91
+ const result = await classifier.invoke({ subject, body, from: 'api' });
92
  res.json(result);
93
  } catch (error) {
 
94
  res.status(500).json({ error: error.message });
95
  }
96
  });
97
 
98
+ app.get('/', (req, res) => res.send('Server Running (Check /debug for status)'));
 
 
99
 
 
100
  app.listen(PORT, '0.0.0.0', () => {
101
+ console.log(`Listening on ${PORT}`);
102
  initModel();
103
  });