lenzcom commited on
Commit
8a5b096
·
verified ·
1 Parent(s): e6a509e

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. server.js +2 -98
server.js CHANGED
@@ -1,103 +1,7 @@
1
  import express from 'express';
2
- import { SystemMessage, HumanMessage, Runnable, LlamaCppLLM } from './src/index.js';
3
- import bodyParser from 'body-parser';
4
- import path from 'path';
5
- import fs from 'fs';
6
-
7
- // ... (Giữ nguyên logic EmailClassifierRunnable) ...
8
- class EmailClassifierRunnable extends Runnable {
9
- constructor(llm) {
10
- super();
11
- this.llm = llm;
12
- }
13
- async _call(input, config) {
14
- // Mock implementation if LLM fails
15
- if (!this.llm) return { category: "Error", confidence: 0, reason: "LLM not initialized" };
16
-
17
- const messages = this._buildPrompt(input);
18
- const response = await this.llm.invoke(messages, config);
19
- return this._parseClassification(response.content);
20
- }
21
- _buildPrompt(email) {
22
- return [
23
- new SystemMessage(`You are an email classification assistant. Classify into: Spam, Invoice, Meeting Request, Urgent, Personal, Other. Respond in JSON.`),
24
- new HumanMessage(`Classify:\nSubject: ${email.subject}\nBody: ${email.body}`)
25
- ];
26
- }
27
- _parseClassification(response) {
28
- try {
29
- const jsonMatch = response.match(/\{[\s\S]*\}/);
30
- if (!jsonMatch) throw new Error('No JSON');
31
- return JSON.parse(jsonMatch[0]);
32
- } catch (e) { return { category: 'Other', confidence: 0, reason: 'Parse fail' }; }
33
- }
34
- }
35
-
36
  const app = express();
37
  const PORT = 7860;
38
 
39
- app.use(bodyParser.json());
40
-
41
- // Global error log
42
- const errorLog = [];
43
-
44
- app.get('/debug', (req, res) => {
45
- res.json({
46
- cwd: process.cwd(),
47
- files: fs.readdirSync('.'),
48
- models: fs.existsSync('./models') ? fs.readdirSync('./models') : 'No models dir',
49
- errors: errorLog
50
- });
51
- });
52
-
53
- let classifier = null;
54
-
55
- async function initModel() {
56
- try {
57
- console.log("Loading model...");
58
- const modelPath = path.resolve('./models/Qwen3-1.7B-Q8_0.gguf');
59
-
60
- if (!fs.existsSync(modelPath)) {
61
- throw new Error(`Model file not found at ${modelPath}`);
62
- }
63
-
64
- const llm = new LlamaCppLLM({
65
- modelPath: modelPath,
66
- temperature: 0.1,
67
- maxTokens: 200
68
- });
69
-
70
- // Test run
71
- await llm.invoke("Hi");
72
-
73
- classifier = new EmailClassifierRunnable(llm);
74
- console.log("Model loaded!");
75
- } catch (err) {
76
- console.error("Model Load Error:", err);
77
- errorLog.push(err.toString());
78
- // DO NOT EXIT, let the server run to debug
79
- }
80
- }
81
-
82
- app.post('/classify', async (req, res) => {
83
- if (!classifier) {
84
- return res.status(503).json({
85
- error: "Model not ready",
86
- logs: errorLog
87
- });
88
- }
89
- try {
90
- const { subject, body } = req.body;
91
- const result = await classifier.invoke({ subject, body, from: 'api' });
92
- res.json(result);
93
- } catch (error) {
94
- res.status(500).json({ error: error.message });
95
- }
96
- });
97
-
98
- app.get('/', (req, res) => res.send('Server Running (Check /debug for status)'));
99
 
100
- app.listen(PORT, '0.0.0.0', () => {
101
- console.log(`Listening on ${PORT}`);
102
- initModel();
103
- });
 
1
  import express from 'express';
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  const app = express();
3
  const PORT = 7860;
4
 
5
+ app.get('/', (req, res) => res.send('Hello World - Node is working'));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ app.listen(PORT, '0.0.0.0', () => console.log(`Listening on ${PORT}`));