lenzcom commited on
Commit
133468f
·
verified ·
1 Parent(s): b455e44

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. server.js +79 -2
server.js CHANGED
@@ -1,7 +1,84 @@
1
  import express from 'express';
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  const app = express();
3
  const PORT = 7860;
4
 
5
- app.get('/', (req, res) => res.send('Hello World - Node is working'));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- app.listen(PORT, '0.0.0.0', () => console.log(`Listening on ${PORT}`));
 
 
 
 
1
  import express from 'express';
2
+ import { SystemMessage, HumanMessage, Runnable, LlamaCppLLM } from './src/index.js';
3
+ import bodyParser from 'body-parser';
4
+ import path from 'path';
5
+ import fs from 'fs';
6
+
7
+ // Classify Logic
8
+ class EmailClassifierRunnable extends Runnable {
9
+ constructor(llm) {
10
+ super();
11
+ this.llm = llm;
12
+ }
13
+ async _call(input, config) {
14
+ // Mock fallback if model fails
15
+ if (!this.llm) return { category: "Error", confidence: 0, reason: "Model not initialized" };
16
+
17
+ const messages = this._buildPrompt(input);
18
+ const response = await this.llm.invoke(messages, config);
19
+ return this._parseClassification(response.content);
20
+ }
21
+ _buildPrompt(email) {
22
+ return [
23
+ new SystemMessage(`You are an email classification assistant. Classify into: Spam, Invoice, Meeting Request, Urgent, Personal, Other. Respond in JSON like {"category": "X", "confidence": 0.9, "reason": "Y"}.`),
24
+ new HumanMessage(`Classify:\nSubject: ${email.subject}\nBody: ${email.body}`)
25
+ ];
26
+ }
27
+ _parseClassification(response) {
28
+ try {
29
+ const jsonMatch = response.match(/\{[\s\S]*\}/);
30
+ if (!jsonMatch) throw new Error('No JSON found');
31
+ return JSON.parse(jsonMatch[0]);
32
+ } catch (e) { return { category: 'Other', confidence: 0, reason: 'Failed to parse JSON', raw: response }; }
33
+ }
34
+ }
35
+
36
  const app = express();
37
  const PORT = 7860;
38
 
39
+ app.use(bodyParser.json());
40
+
41
+ let classifier = null;
42
+
43
+ async function initModel() {
44
+ try {
45
+ console.log("Loading model...");
46
+ // Ensure model exists
47
+ if (!fs.existsSync('./models/Qwen3-1.7B-Q8_0.gguf')) {
48
+ console.error("Model file missing!");
49
+ return;
50
+ }
51
+
52
+ const llm = new LlamaCppLLM({
53
+ modelPath: './models/Qwen3-1.7B-Q8_0.gguf',
54
+ temperature: 0.1,
55
+ maxTokens: 200
56
+ });
57
+
58
+ // Warmup
59
+ await llm.invoke("Hi");
60
+
61
+ classifier = new EmailClassifierRunnable(llm);
62
+ console.log("Model loaded successfully!");
63
+ } catch (err) {
64
+ console.error("Failed to load model:", err);
65
+ }
66
+ }
67
+
68
+ app.post('/classify', async (req, res) => {
69
+ if (!classifier) return res.status(503).json({ error: "Model loading or failed" });
70
+ try {
71
+ const { subject, body } = req.body;
72
+ const result = await classifier.invoke({ subject, body, from: 'api' });
73
+ res.json(result);
74
+ } catch (error) {
75
+ res.status(500).json({ error: error.message });
76
+ }
77
+ });
78
+
79
+ app.get('/', (req, res) => res.send('AI Email Classifier Running. POST /classify to use.'));
80
 
81
+ app.listen(PORT, '0.0.0.0', () => {
82
+ console.log(`Server listening on ${PORT}`);
83
+ initModel();
84
+ });