lenzcom commited on
Commit
32cf8db
·
verified ·
1 Parent(s): 133468f

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. activity.log +0 -0
  2. backup/server_email_v1.js +84 -0
  3. server.js +84 -53
activity.log ADDED
File without changes
backup/server_email_v1.js ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import express from 'express';
2
+ import { SystemMessage, HumanMessage, Runnable, LlamaCppLLM } from './src/index.js';
3
+ import bodyParser from 'body-parser';
4
+ import path from 'path';
5
+ import fs from 'fs';
6
+
7
+ // Classify Logic
8
+ class EmailClassifierRunnable extends Runnable {
9
+ constructor(llm) {
10
+ super();
11
+ this.llm = llm;
12
+ }
13
+ async _call(input, config) {
14
+ // Mock fallback if model fails
15
+ if (!this.llm) return { category: "Error", confidence: 0, reason: "Model not initialized" };
16
+
17
+ const messages = this._buildPrompt(input);
18
+ const response = await this.llm.invoke(messages, config);
19
+ return this._parseClassification(response.content);
20
+ }
21
+ _buildPrompt(email) {
22
+ return [
23
+ new SystemMessage(`You are an email classification assistant. Classify into: Spam, Invoice, Meeting Request, Urgent, Personal, Other. Respond in JSON like {"category": "X", "confidence": 0.9, "reason": "Y"}.`),
24
+ new HumanMessage(`Classify:\nSubject: ${email.subject}\nBody: ${email.body}`)
25
+ ];
26
+ }
27
+ _parseClassification(response) {
28
+ try {
29
+ const jsonMatch = response.match(/\{[\s\S]*\}/);
30
+ if (!jsonMatch) throw new Error('No JSON found');
31
+ return JSON.parse(jsonMatch[0]);
32
+ } catch (e) { return { category: 'Other', confidence: 0, reason: 'Failed to parse JSON', raw: response }; }
33
+ }
34
+ }
35
+
36
+ const app = express();
37
+ const PORT = 7860;
38
+
39
+ app.use(bodyParser.json());
40
+
41
+ let classifier = null;
42
+
43
+ async function initModel() {
44
+ try {
45
+ console.log("Loading model...");
46
+ // Ensure model exists
47
+ if (!fs.existsSync('./models/Qwen3-1.7B-Q8_0.gguf')) {
48
+ console.error("Model file missing!");
49
+ return;
50
+ }
51
+
52
+ const llm = new LlamaCppLLM({
53
+ modelPath: './models/Qwen3-1.7B-Q8_0.gguf',
54
+ temperature: 0.1,
55
+ maxTokens: 200
56
+ });
57
+
58
+ // Warmup
59
+ await llm.invoke("Hi");
60
+
61
+ classifier = new EmailClassifierRunnable(llm);
62
+ console.log("Model loaded successfully!");
63
+ } catch (err) {
64
+ console.error("Failed to load model:", err);
65
+ }
66
+ }
67
+
68
+ app.post('/classify', async (req, res) => {
69
+ if (!classifier) return res.status(503).json({ error: "Model loading or failed" });
70
+ try {
71
+ const { subject, body } = req.body;
72
+ const result = await classifier.invoke({ subject, body, from: 'api' });
73
+ res.json(result);
74
+ } catch (error) {
75
+ res.status(500).json({ error: error.message });
76
+ }
77
+ });
78
+
79
+ app.get('/', (req, res) => res.send('AI Email Classifier Running. POST /classify to use.'));
80
+
81
+ app.listen(PORT, '0.0.0.0', () => {
82
+ console.log(`Server listening on ${PORT}`);
83
+ initModel();
84
+ });
server.js CHANGED
@@ -1,82 +1,113 @@
1
  import express from 'express';
2
- import { SystemMessage, HumanMessage, Runnable, LlamaCppLLM } from './src/index.js';
3
  import bodyParser from 'body-parser';
4
- import path from 'path';
5
  import fs from 'fs';
 
6
 
7
- // Classify Logic
8
- class EmailClassifierRunnable extends Runnable {
9
- constructor(llm) {
10
- super();
11
- this.llm = llm;
12
- }
13
- async _call(input, config) {
14
- // Mock fallback if model fails
15
- if (!this.llm) return { category: "Error", confidence: 0, reason: "Model not initialized" };
16
-
17
- const messages = this._buildPrompt(input);
18
- const response = await this.llm.invoke(messages, config);
19
- return this._parseClassification(response.content);
20
- }
21
- _buildPrompt(email) {
22
- return [
23
- new SystemMessage(`You are an email classification assistant. Classify into: Spam, Invoice, Meeting Request, Urgent, Personal, Other. Respond in JSON like {"category": "X", "confidence": 0.9, "reason": "Y"}.`),
24
- new HumanMessage(`Classify:\nSubject: ${email.subject}\nBody: ${email.body}`)
25
- ];
26
- }
27
- _parseClassification(response) {
28
- try {
29
- const jsonMatch = response.match(/\{[\s\S]*\}/);
30
- if (!jsonMatch) throw new Error('No JSON found');
31
- return JSON.parse(jsonMatch[0]);
32
- } catch (e) { return { category: 'Other', confidence: 0, reason: 'Failed to parse JSON', raw: response }; }
33
- }
34
- }
35
-
36
- const app = express();
37
  const PORT = 7860;
 
 
38
 
39
- app.use(bodyParser.json());
 
 
 
 
 
 
 
40
 
41
- let classifier = null;
 
 
42
 
43
  async function initModel() {
44
  try {
45
- console.log("Loading model...");
46
- // Ensure model exists
47
- if (!fs.existsSync('./models/Qwen3-1.7B-Q8_0.gguf')) {
48
- console.error("Model file missing!");
49
  return;
50
  }
51
 
52
- const llm = new LlamaCppLLM({
53
- modelPath: './models/Qwen3-1.7B-Q8_0.gguf',
54
- temperature: 0.1,
55
- maxTokens: 200
56
  });
57
 
58
  // Warmup
59
- await llm.invoke("Hi");
60
-
61
- classifier = new EmailClassifierRunnable(llm);
62
- console.log("Model loaded successfully!");
63
  } catch (err) {
64
- console.error("Failed to load model:", err);
65
  }
66
  }
67
 
68
- app.post('/classify', async (req, res) => {
69
- if (!classifier) return res.status(503).json({ error: "Model loading or failed" });
 
 
 
 
 
 
70
  try {
71
- const { subject, body } = req.body;
72
- const result = await classifier.invoke({ subject, body, from: 'api' });
73
- res.json(result);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  } catch (error) {
 
75
  res.status(500).json({ error: error.message });
76
  }
77
  });
78
 
79
- app.get('/', (req, res) => res.send('AI Email Classifier Running. POST /classify to use.'));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
  app.listen(PORT, '0.0.0.0', () => {
82
  console.log(`Server listening on ${PORT}`);
 
1
  import express from 'express';
2
+ import { SystemMessage, HumanMessage, AIMessage, LlamaCppLLM } from './src/index.js';
3
  import bodyParser from 'body-parser';
 
4
  import fs from 'fs';
5
+ import path from 'path';
6
 
7
+ // --- CONFIGURATION ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  const PORT = 7860;
9
+ const LOG_FILE = 'activity.log';
10
+ const MODEL_PATH = './models/Qwen3-1.7B-Q8_0.gguf';
11
 
12
+ // --- LOGGING UTILS ---
13
+ function log(type, content) {
14
+ const timestamp = new Date().toISOString();
15
+ const entry = `[${timestamp}] [${type}] ${JSON.stringify(content)}
16
+ `;
17
+ console.log(entry.trim());
18
+ fs.appendFileSync(LOG_FILE, entry);
19
+ }
20
 
21
+ // --- AI ENGINE ---
22
+ let llm = null;
23
+ let defaultSystemPrompt = "You are a helpful and intelligent AI assistant. You answer concisely and accurately.";
24
 
25
  async function initModel() {
26
  try {
27
+ log('SYSTEM', 'Loading model...');
28
+ if (!fs.existsSync(MODEL_PATH)) {
29
+ log('ERROR', `Model not found at ${MODEL_PATH}`);
 
30
  return;
31
  }
32
 
33
+ llm = new LlamaCppLLM({
34
+ modelPath: MODEL_PATH,
35
+ temperature: 0.7, // Higher for creativity
36
+ maxTokens: 1024 // Longer responses
37
  });
38
 
39
  // Warmup
40
+ await llm.invoke("Hello");
41
+ log('SYSTEM', 'Model loaded successfully. Ready to serve.');
 
 
42
  } catch (err) {
43
+ log('FATAL', err.message);
44
  }
45
  }
46
 
47
+ // --- SERVER SETUP ---
48
+ const app = express();
49
+ app.use(bodyParser.json());
50
+
51
+ // 1. CHAT ENDPOINT (Main Interaction)
52
+ app.post('/chat', async (req, res) => {
53
+ if (!llm) return res.status(503).json({ error: "System initializing..." });
54
+
55
  try {
56
+ const { message, history = [], system_instruction } = req.body;
57
+
58
+ // Construct Message Chain
59
+ const messages = [];
60
+
61
+ // 1. System Prompt (Training/Instruction)
62
+ messages.push(new SystemMessage(system_instruction || defaultSystemPrompt));
63
+
64
+ // 2. Chat History (Short-term memory)
65
+ history.forEach(msg => {
66
+ if (msg.role === 'user') messages.push(new HumanMessage(msg.content));
67
+ if (msg.role === 'ai') messages.push(new AIMessage(msg.content));
68
+ });
69
+
70
+ // 3. Current Message
71
+ messages.push(new HumanMessage(message));
72
+
73
+ log('REQUEST', { message, instruction: system_instruction || "default" });
74
+
75
+ // Invoke AI
76
+ const response = await llm.invoke(messages);
77
+
78
+ log('RESPONSE', response.content);
79
+ res.json({ reply: response.content });
80
+
81
  } catch (error) {
82
+ log('ERROR', error.message);
83
  res.status(500).json({ error: error.message });
84
  }
85
  });
86
 
87
+ // 2. CONFIG ENDPOINT (Set default behavior)
88
+ app.post('/config', (req, res) => {
89
+ const { system_prompt, temperature } = req.body;
90
+ if (system_prompt) {
91
+ defaultSystemPrompt = system_prompt;
92
+ log('CONFIG', `Updated default system prompt to: ${system_prompt.substring(0, 50)}...`);
93
+ }
94
+ // Note: Temperature change requires LLM reload in current simple wrapper, skipping for now
95
+ res.json({ success: true, current_system_prompt: defaultSystemPrompt });
96
+ });
97
+
98
+ // 3. LOGS ENDPOINT (Realtime Monitoring)
99
+ app.get('/logs', (req, res) => {
100
+ if (fs.existsSync(LOG_FILE)) {
101
+ const logs = fs.readFileSync(LOG_FILE, 'utf-8');
102
+ // Return last 100 lines
103
+ const lines = logs.trim().split('\n').slice(-100).join('\n');
104
+ res.send(`<pre>${lines}</pre>`);
105
+ } else {
106
+ res.send('No logs yet.');
107
+ }
108
+ });
109
+
110
+ app.get('/', (req, res) => res.send('AI Agent v2 (Chat Mode) Running.'));
111
 
112
  app.listen(PORT, '0.0.0.0', () => {
113
  console.log(`Server listening on ${PORT}`);