Nand0ZZ commited on
Commit
b11482b
Β·
1 Parent(s): 216db05
Files changed (5) hide show
  1. .env +1 -0
  2. Dockerfile +57 -0
  3. package-lock.json +0 -0
  4. package.json +28 -0
  5. room.js +736 -0
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ GROQ_API_KEY=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
Dockerfile ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use official Node.js image
2
+ FROM node:18-bullseye
3
+
4
+ # -----------------------------
5
+ # Install system dependencies
6
+ # -----------------------------
7
+ RUN apt-get update && apt-get install -y \
8
+ tesseract-ocr \
9
+ libtesseract-dev \
10
+ libleptonica-dev \
11
+ libvips \
12
+ poppler-utils \
13
+ ghostscript \
14
+ build-essential \
15
+ python3 \
16
+ && rm -rf /var/lib/apt/lists/*
17
+
18
+ # -----------------------------
19
+ # Set working directory
20
+ # -----------------------------
21
+ WORKDIR /app
22
+
23
+ # -----------------------------
24
+ # Copy package files
25
+ # -----------------------------
26
+ COPY package*.json ./
27
+
28
+ # -----------------------------
29
+ # Install Node dependencies
30
+ # -----------------------------
31
+ RUN npm install --production
32
+
33
+ # -----------------------------
34
+ # Copy app source
35
+ # -----------------------------
36
+ COPY . .
37
+
38
+ # -----------------------------
39
+ # Create uploads directory
40
+ # -----------------------------
41
+ RUN mkdir -p uploads
42
+
43
+ # -----------------------------
44
+ # Hugging Face requires port 7860
45
+ # -----------------------------
46
+ EXPOSE 7860
47
+
48
+ # -----------------------------
49
+ # Environment variables
50
+ # -----------------------------
51
+ ENV NODE_ENV=production
52
+ ENV PORT=7860
53
+
54
+ # -----------------------------
55
+ # Start the server
56
+ # -----------------------------
57
+ CMD ["node", "server.js"]
package-lock.json ADDED
The diff for this file is too large to render. See raw diff
 
package.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "wmad",
3
+ "version": "1.0.0",
4
+ "description": "AI-powered medical consultation with OCR support",
5
+ "main": "server.js",
6
+ "scripts": {
7
+ "start": "node server.js",
8
+ "dev": "nodemon server.js"
9
+ },
10
+ "dependencies": {
11
+ "@langchain/core": "^0.1.52",
12
+ "@langchain/groq": "^0.0.14",
13
+ "cors": "^2.8.5",
14
+ "dotenv": "^16.4.5",
15
+ "express": "^4.18.2",
16
+ "ioredis": "^5.8.2",
17
+ "langchain": "^0.1.30",
18
+ "mammoth": "^1.6.0",
19
+ "multer": "^1.4.5-lts.1",
20
+ "pdf-parse": "^1.1.1",
21
+ "sharp": "^0.33.2",
22
+ "socket.io": "^4.6.1",
23
+ "tesseract.js": "^5.0.4"
24
+ },
25
+ "devDependencies": {
26
+ "nodemon": "^3.0.3"
27
+ }
28
+ }
room.js ADDED
@@ -0,0 +1,736 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const express = require("express");
2
+ const http = require("http");
3
+ const path = require("path");
4
+ const multer = require("multer");
5
+ const fs = require("fs").promises;
6
+ const { Server } = require("socket.io");
7
+ const { ChatGroq } = require("@langchain/groq");
8
+ const { HumanMessage, SystemMessage } = require("@langchain/core/messages");
9
+ const mammoth = require("mammoth");
10
+ const pdf = require("pdf-parse");
11
+ const Tesseract = require("tesseract.js");
12
+ const sharp = require("sharp");
13
+ const cors = require("cors");
14
+
15
+ const app = express();
16
+ const server = http.createServer(app);
17
+ const io = new Server(server, {
18
+ cors: { origin: "*" },
19
+ maxHttpBufferSize: 1e8
20
+ });
21
+
22
+ app.use(cors());
23
+ app.use(express.json());
24
+ app.use(express.static(path.resolve("./public")));
25
+ app.use("/uploads", express.static(path.join(__dirname, "uploads")));
26
+
27
+ // Configure file upload
28
+ const storage = multer.diskStorage({
29
+ destination: async (req, file, cb) => {
30
+ const uploadDir = path.join(__dirname, "uploads");
31
+ await fs.mkdir(uploadDir, { recursive: true });
32
+ cb(null, uploadDir);
33
+ },
34
+ filename: (req, file, cb) => {
35
+ const uniqueName = `${Date.now()}-${file.originalname}`;
36
+ cb(null, uniqueName);
37
+ }
38
+ });
39
+
40
+ const upload = multer({
41
+ storage,
42
+ limits: { fileSize: 50 * 1024 * 1024 }
43
+ });
44
+
45
+ // Initialize Groq LLM
46
+ const llm = new ChatGroq({
47
+ model: "llama-3.3-70b-versatile",
48
+ temperature: 0.7,
49
+ maxTokens: 2000,
50
+ maxRetries: 2,
51
+ apiKey: process.env.GROQ_API_KEY
52
+ });
53
+
54
+
55
+ // Data structures
56
+ let rooms = {};
57
+ let users = {};
58
+
59
+ // 🚨 FEATURE 4: Emergency keywords detection
60
+ const EMERGENCY_KEYWORDS = [
61
+ 'chest pain', 'heart attack', 'can\'t breathe', 'breathless', 'severe bleeding',
62
+ 'unconscious', 'stroke', 'paralysis', 'severe headache', 'suicide',
63
+ 'overdose', 'seizure', 'choking', 'anaphylaxis', 'severe pain'
64
+ ];
65
+
66
+ // 🎯 FEATURE 1: Dynamic Dual-Persona AI Safety Engine
67
+ const PATIENT_AI_PROMPT = `You are an AI Medical Assistant helping a PATIENT. Your role:
68
+
69
+ **SAFETY-FIRST APPROACH**
70
+ 1. **Empathetic Support**: Be warm, reassuring, and supportive
71
+ 2. **Simple Language**: Avoid medical jargon, explain in simple terms
72
+ 3. **Symptom Clarification**: Ask ONE focused question at a time
73
+ 4. **No Premature Conclusions**: Never diagnose or interpret lab results
74
+ 5. **Safety Boundaries**: If critical values detected, advise immediate medical attention
75
+ 6. **Respond Only When**:
76
+ - Patient asks direct questions
77
+ - Patient is alone and needs guidance
78
+ - Someone mentions @ai
79
+
80
+ **RISK CONTROL**: Never share detailed medical analysis. Acknowledge uploads and reassure.`;
81
+
82
+ const DOCTOR_AI_PROMPT = `You are an AI Medical Assistant helping a DOCTOR. Your role:
83
+
84
+ **CLINICAL-GRADE ANALYSIS**
85
+ 1. **Detailed Insights**: Provide comprehensive medical analysis
86
+ 2. **Critical Findings**: Highlight abnormal values, red flags with clinical context
87
+ 3. **Medical Terminology**: Use appropriate professional language
88
+ 4. **Evidence-Based**: Reference standard clinical thresholds
89
+ 5. **Explainable AI**: Always explain WHY a finding is significant
90
+ 6. **Respond Only When**:
91
+ - Doctor asks about files/reports
92
+ - Doctor mentions @ai
93
+ - Doctor needs clinical summary
94
+
95
+ **TRANSPARENCY**: Provide clear reasoning for all flagged findings with confidence levels.`;
96
+
97
+ // πŸ”¬ FEATURE 3: Explainable AI Layer
98
+ async function analyzeFileWithXAI(content, fileName, previousReports = []) {
99
+ const analysisPrompt = `Analyze this medical report with EXPLAINABLE AI principles:
100
+
101
+ File: ${fileName}
102
+ Content: ${content.substring(0, 3000)}
103
+
104
+ ${previousReports.length > 0 ? `
105
+ **TEMPORAL CONTEXT** (Previous Reports):
106
+ ${previousReports.map((r, i) => `Report ${i+1} (${r.date}): ${r.keyFindings}`).join('\n')}
107
+ ` : ''}
108
+
109
+ Provide analysis in this EXACT format:
110
+
111
+ **CLINICAL SUMMARY**
112
+ β€’ Main diagnosis/finding (1 line)
113
+
114
+ **CRITICAL FINDINGS**
115
+ β€’ [Value/Finding]: [Normal Range] β†’ [Current Value] β†’ [Deviation %]
116
+ Reason: [Clinical explanation]
117
+ Confidence: [High/Medium/Low]
118
+
119
+ **TEMPORAL TRENDS** (if previous data available)
120
+ β€’ [Parameter]: [Previous β†’ Current] β†’ [Trend Analysis]
121
+
122
+ **IMMEDIATE CONCERNS**
123
+ β€’ [Priority level]: [Specific concern]
124
+
125
+ **RECOMMENDATIONS**
126
+ β€’ [Actionable next steps]
127
+
128
+ Be concise, clinical, and ALWAYS explain the "why" behind findings.`;
129
+
130
+ try {
131
+ const analysis = await llm.invoke([
132
+ new SystemMessage("You are a clinical AI analyzer specializing in explainable medical insights."),
133
+ new HumanMessage(analysisPrompt)
134
+ ]);
135
+ return analysis.content;
136
+ } catch (error) {
137
+ console.error("XAI Analysis error:", error);
138
+ return "Unable to analyze with full explainability.";
139
+ }
140
+ }
141
+
142
+ // πŸ• FEATURE 2: Temporal Health Intelligence
143
+ function extractTemporalData(room) {
144
+ if (!room.files || room.files.length < 2) return [];
145
+
146
+ return room.files.map(f => ({
147
+ name: f.name,
148
+ date: f.uploadedAt,
149
+ keyFindings: f.analysis ? f.analysis.substring(0, 200) : "No analysis",
150
+ content: f.content.substring(0, 500)
151
+ }));
152
+ }
153
+
154
+ async function performTemporalAnalysis(currentContent, fileName, room) {
155
+ const previousReports = extractTemporalData(room);
156
+
157
+ if (previousReports.length === 0) {
158
+ return await analyzeFileWithXAI(currentContent, fileName, []);
159
+ }
160
+
161
+ const temporalPrompt = `Perform TEMPORAL HEALTH INTELLIGENCE analysis:
162
+
163
+ **CURRENT REPORT**: ${fileName}
164
+ ${currentContent.substring(0, 2000)}
165
+
166
+ **HISTORICAL DATA**:
167
+ ${previousReports.map((r, i) => `
168
+ Report ${i+1} - ${new Date(r.date).toLocaleDateString()}:
169
+ ${r.keyFindings}
170
+ `).join('\n')}
171
+
172
+ Analyze:
173
+ 1. **Longitudinal Trends**: Compare current vs historical values
174
+ 2. **Progression/Deterioration**: Identify gradual changes over time
175
+ 3. **Early Warning Signs**: Flag subtle patterns that indicate future risk
176
+ 4. **Clinical Significance**: Is this progression normal or concerning?
177
+
178
+ Format as structured clinical analysis with temporal context.`;
179
+
180
+ try {
181
+ const analysis = await llm.invoke([
182
+ new SystemMessage("You are a temporal medical intelligence analyzer specializing in longitudinal health trends."),
183
+ new HumanMessage(temporalPrompt)
184
+ ]);
185
+ return analysis.content;
186
+ } catch (error) {
187
+ console.error("Temporal analysis error:", error);
188
+ return await analyzeFileWithXAI(currentContent, fileName, previousReports);
189
+ }
190
+ }
191
+
192
+ // 🚨 FEATURE 4: Emergency Detection and Escalation
193
+ async function detectEmergency(message, userRole) {
194
+ const messageLower = message.toLowerCase();
195
+
196
+ // Check for emergency keywords
197
+ const hasEmergencyKeyword = EMERGENCY_KEYWORDS.some(keyword =>
198
+ messageLower.includes(keyword)
199
+ );
200
+
201
+ if (!hasEmergencyKeyword) return { isEmergency: false };
202
+
203
+ // Enhanced AI-based emergency detection
204
+ const emergencyPrompt = `Analyze this message for medical emergency indicators:
205
+
206
+ Message: "${message}"
207
+
208
+ Classify emergency level:
209
+ - CRITICAL: Immediate life threat (chest pain, can't breathe, severe bleeding, stroke symptoms)
210
+ - HIGH: Urgent medical attention needed within hours
211
+ - MODERATE: Medical evaluation needed soon
212
+ - LOW: Non-emergency concern
213
+
214
+ Respond ONLY with JSON:
215
+ {
216
+ "level": "CRITICAL|HIGH|MODERATE|LOW",
217
+ "reasoning": "brief explanation",
218
+ "urgentAdvice": "immediate action to take"
219
+ }`;
220
+
221
+ try {
222
+ const response = await llm.invoke([
223
+ new SystemMessage("You are an emergency medical triage AI. Respond ONLY with valid JSON."),
224
+ new HumanMessage(emergencyPrompt)
225
+ ]);
226
+
227
+ const result = JSON.parse(response.content.replace(/```json|```/g, '').trim());
228
+
229
+ return {
230
+ isEmergency: result.level === "CRITICAL" || result.level === "HIGH",
231
+ level: result.level,
232
+ reasoning: result.reasoning,
233
+ urgentAdvice: result.urgentAdvice
234
+ };
235
+ } catch (error) {
236
+ console.error("Emergency detection error:", error);
237
+ return { isEmergency: hasEmergencyKeyword, level: "HIGH", reasoning: "Keyword detected" };
238
+ }
239
+ }
240
+
241
+ // πŸ“‹ FEATURE 5: Doctor Co-Pilot Documentation
242
+ async function generateClinicalDocumentation(roomId) {
243
+ const room = rooms[roomId];
244
+ if (!room) return null;
245
+
246
+ const conversationHistory = room.messages
247
+ .filter(m => m.role === 'Patient' || m.role === 'Doctor')
248
+ .map(m => `${m.role}: ${m.content}`)
249
+ .join('\n');
250
+
251
+ const filesSummary = room.files
252
+ .map(f => `- ${f.name}: ${f.analysis || 'No analysis'}`)
253
+ .join('\n');
254
+
255
+ const docPrompt = `Generate structured clinical documentation from this consultation:
256
+
257
+ **CONVERSATION**:
258
+ ${conversationHistory}
259
+
260
+ **UPLOADED FILES**:
261
+ ${filesSummary}
262
+
263
+ Generate SOAP NOTE format:
264
+
265
+ **SUBJECTIVE**
266
+ - Chief Complaint: [main issue]
267
+ - History of Present Illness: [brief narrative]
268
+ - Review of Systems: [relevant findings]
269
+
270
+ **OBJECTIVE**
271
+ - Vital signs/Reports: [from uploaded files]
272
+ - Physical findings: [mentioned in chat]
273
+
274
+ **ASSESSMENT**
275
+ - Primary diagnosis: [clinical impression]
276
+ - Differential diagnoses: [alternatives]
277
+
278
+ **PLAN**
279
+ - Investigations: [tests ordered]
280
+ - Treatment: [medications/interventions]
281
+ - Follow-up: [next steps]
282
+
283
+ Keep concise and clinically accurate.`;
284
+
285
+ try {
286
+ const documentation = await llm.invoke([
287
+ new SystemMessage("You are a medical documentation AI specializing in SOAP notes and clinical summaries."),
288
+ new HumanMessage(docPrompt)
289
+ ]);
290
+ return documentation.content;
291
+ } catch (error) {
292
+ console.error("Documentation generation error:", error);
293
+ return null;
294
+ }
295
+ }
296
+
297
+ // Helper: OCR for images
298
+ async function extractTextFromImage(imagePath) {
299
+ try {
300
+ console.log("Starting OCR:", imagePath);
301
+ const processedPath = imagePath + "_processed.jpg";
302
+ await sharp(imagePath)
303
+ .greyscale()
304
+ .normalize()
305
+ .sharpen()
306
+ .toFile(processedPath);
307
+
308
+ const { data: { text } } = await Tesseract.recognize(processedPath, 'eng');
309
+
310
+ try { await fs.unlink(processedPath); } catch (e) {}
311
+
312
+ console.log("OCR completed, text length:", text.length);
313
+ return text.trim();
314
+ } catch (error) {
315
+ console.error("OCR Error:", error);
316
+ return "";
317
+ }
318
+ }
319
+
320
+ // Helper: Extract text from files
321
+ async function extractFileContent(filePath, mimeType) {
322
+ try {
323
+ console.log("Extracting:", filePath, mimeType);
324
+
325
+ if (mimeType === "application/pdf") {
326
+ const dataBuffer = await fs.readFile(filePath);
327
+ const pdfData = await pdf(dataBuffer);
328
+ return pdfData.text;
329
+ } else if (mimeType.includes("word") || mimeType.includes("document")) {
330
+ const result = await mammoth.extractRawText({ path: filePath });
331
+ return result.value;
332
+ } else if (mimeType.includes("text")) {
333
+ return await fs.readFile(filePath, "utf-8");
334
+ } else if (mimeType.includes("image")) {
335
+ const ocrText = await extractTextFromImage(filePath);
336
+ return ocrText.length > 10 ? ocrText : "[Image - no text detected]";
337
+ }
338
+ return "[Unsupported format]";
339
+ } catch (error) {
340
+ console.error("Extraction error:", error);
341
+ return "[Extraction failed]";
342
+ }
343
+ }
344
+
345
+ // Helper: AI Response with risk-aware disclosure control
346
+ async function getAIResponse(roomId, userMessage, userRole, isFileQuery = false, emergencyContext = null) {
347
+ const room = rooms[roomId];
348
+ if (!room) return "Room not found";
349
+
350
+ // FEATURE 1: Dynamic persona selection
351
+ const systemPrompt = userRole === "doctor" ? DOCTOR_AI_PROMPT : PATIENT_AI_PROMPT;
352
+
353
+ const roleMessages = room.messages.filter(m =>
354
+ !m.forRole || m.forRole === userRole || (!m.forRole && m.role !== 'AI Assistant')
355
+ );
356
+
357
+ let context = `Room: ${roomId}
358
+ User Role: ${userRole}
359
+ Patient: ${room.patient || "Waiting"}
360
+ Doctor: ${room.doctor || "Not yet joined"}
361
+
362
+ ${emergencyContext ? `🚨 EMERGENCY CONTEXT: ${emergencyContext.reasoning}\nLevel: ${emergencyContext.level}` : ''}
363
+
364
+ Recent messages (last 5):
365
+ ${roleMessages.slice(-5).map(m => `${m.role}: ${m.content}`).join("\n")}`;
366
+
367
+ // FEATURE 1: Risk-based information disclosure
368
+ if (userRole === "doctor" && isFileQuery && room.files.length > 0) {
369
+ context += `\n\n**CLINICAL FILES** (with XAI explanations):\n${room.files.map((f, i) =>
370
+ `${i+1}. ${f.name}\n Analysis: ${f.analysis}\n Key content: ${f.content.substring(0, 400)}`
371
+ ).join("\n\n")}`;
372
+ } else if (userRole === "patient" && room.files.length > 0) {
373
+ // Patients get minimal, safe information
374
+ context += `\n\n**FILES UPLOADED**: ${room.files.map(f => f.name).join(', ')}
375
+ Note: Detailed medical analysis is being reviewed by your doctor.`;
376
+ }
377
+
378
+ const messages = [
379
+ new SystemMessage(systemPrompt),
380
+ new SystemMessage(context),
381
+ new HumanMessage(`[${userRole}]: ${userMessage}`)
382
+ ];
383
+
384
+ try {
385
+ const response = await llm.invoke(messages);
386
+ return response.content;
387
+ } catch (error) {
388
+ console.error("AI Error:", error);
389
+ return "I'm having trouble responding. Please try again.";
390
+ }
391
+ }
392
+
393
+ // File upload endpoint with TEMPORAL ANALYSIS
394
+ app.post("/upload", upload.single("file"), async (req, res) => {
395
+ try {
396
+ const { roomId, uploadedBy, uploaderRole } = req.body;
397
+ const file = req.file;
398
+
399
+ if (!file || !roomId) {
400
+ return res.status(400).json({ error: "File and roomId required" });
401
+ }
402
+
403
+ console.log("Upload:", file.originalname, "by", uploadedBy, "in", roomId);
404
+
405
+ const content = await extractFileContent(file.path, file.mimetype);
406
+ console.log("Content extracted, length:", content.length);
407
+
408
+ // FEATURE 2 & 3: Temporal analysis with XAI
409
+ let analysis = "";
410
+ if (content && content.length > 20 && !content.includes("no text detected")) {
411
+ if (rooms[roomId]) {
412
+ analysis = await performTemporalAnalysis(content, file.originalname, rooms[roomId]);
413
+ } else {
414
+ analysis = await analyzeFileWithXAI(content, file.originalname, []);
415
+ }
416
+ }
417
+
418
+ const fileInfo = {
419
+ name: file.originalname,
420
+ path: file.path,
421
+ url: `/uploads/${file.filename}`,
422
+ type: file.mimetype,
423
+ content: content.substring(0, 5000),
424
+ analysis: analysis,
425
+ uploadedAt: new Date().toISOString(),
426
+ uploadedBy: uploadedBy || "Unknown"
427
+ };
428
+
429
+ if (rooms[roomId]) {
430
+ rooms[roomId].files.push(fileInfo);
431
+
432
+ // Broadcast file upload to everyone in room
433
+ const fileMessage = {
434
+ role: uploadedBy || "User",
435
+ nickname: uploadedBy,
436
+ content: `πŸ“Ž Uploaded: ${file.originalname}`,
437
+ timestamp: new Date().toISOString(),
438
+ fileData: {
439
+ name: file.originalname,
440
+ url: fileInfo.url,
441
+ type: file.mimetype,
442
+ analysis: analysis
443
+ },
444
+ isFile: true
445
+ };
446
+
447
+ rooms[roomId].messages.push(fileMessage);
448
+ io.to(roomId).emit("chat-message", fileMessage);
449
+
450
+ // Emit file list update to all users in the room
451
+ io.to(roomId).emit("files-updated", { files: rooms[roomId].files });
452
+
453
+ // FEATURE 1: Role-specific AI responses (PRIVATE - not visible to other role)
454
+ if (content && content.length > 20) {
455
+ setTimeout(() => {
456
+ const doctorSocketId = Object.keys(users).find(
457
+ sid => users[sid].roomId === roomId && users[sid].role === "doctor"
458
+ );
459
+
460
+ if (doctorSocketId && rooms[roomId].doctor) {
461
+ const doctorAiMessage = `πŸ”¬ **Clinical Analysis** (with XAI)\n\n${analysis}`;
462
+ io.to(doctorSocketId).emit("ai-message", {
463
+ message: doctorAiMessage,
464
+ isPrivate: true,
465
+ forRole: "doctor"
466
+ });
467
+ }
468
+ }, 1000);
469
+ }
470
+
471
+ if (uploaderRole === "patient") {
472
+ setTimeout(() => {
473
+ const patientSocketId = Object.keys(users).find(
474
+ sid => users[sid].nickname === uploadedBy && users[sid].roomId === roomId
475
+ );
476
+
477
+ if (patientSocketId) {
478
+ const patientAiMessage = `βœ… I've received "${file.originalname}". Your doctor will review it shortly.`;
479
+ io.to(patientSocketId).emit("ai-message", {
480
+ message: patientAiMessage,
481
+ isPrivate: true,
482
+ forRole: "patient"
483
+ });
484
+ }
485
+ }, 500);
486
+ }
487
+ }
488
+
489
+ res.json({ success: true, file: fileInfo });
490
+ } catch (error) {
491
+ console.error("Upload error:", error);
492
+ res.status(500).json({ error: "Upload failed: " + error.message });
493
+ }
494
+ });
495
+
496
+ // FEATURE 5: Generate clinical documentation endpoint
497
+ app.post("/generate-documentation", async (req, res) => {
498
+ try {
499
+ const { roomId } = req.body;
500
+ if (!roomId || !rooms[roomId]) {
501
+ return res.status(400).json({ error: "Invalid room ID" });
502
+ }
503
+
504
+ const documentation = await generateClinicalDocumentation(roomId);
505
+ res.json({ success: true, documentation });
506
+ } catch (error) {
507
+ console.error("Documentation error:", error);
508
+ res.status(500).json({ error: "Documentation generation failed" });
509
+ }
510
+ });
511
+
512
+ // Socket.IO
513
+ io.on("connection", (socket) => {
514
+ console.log("Connected:", socket.id);
515
+
516
+ socket.on("join-room", async ({ roomId, nickname, role }) => {
517
+ socket.join(roomId);
518
+ users[socket.id] = { nickname, role, roomId };
519
+
520
+ if (!rooms[roomId]) {
521
+ rooms[roomId] = {
522
+ patient: null,
523
+ doctor: null,
524
+ messages: [],
525
+ files: [],
526
+ patientData: {},
527
+ emergencyMode: false
528
+ };
529
+ }
530
+
531
+ if (role === "patient" && !rooms[roomId].patient) {
532
+ rooms[roomId].patient = nickname;
533
+ } else if (role === "doctor" && !rooms[roomId].doctor) {
534
+ rooms[roomId].doctor = nickname;
535
+ }
536
+
537
+ socket.emit("room-history", {
538
+ messages: rooms[roomId].messages.filter(m => !m.forRole),
539
+ files: rooms[roomId].files
540
+ });
541
+
542
+ io.to(roomId).emit("user-joined", {
543
+ nickname,
544
+ role,
545
+ patient: rooms[roomId].patient,
546
+ doctor: rooms[roomId].doctor
547
+ });
548
+
549
+ // Role-specific greeting (PRIVATE - only to this user)
550
+ let greeting = "";
551
+ if (role === "patient") {
552
+ greeting = `Hello ${nickname}! πŸ‘‹ I'm here to help guide you. What brings you in today?`;
553
+ } else if (role === "doctor") {
554
+ greeting = `Welcome Dr. ${nickname}! πŸ‘¨β€βš•οΈ Clinical analysis tools ready. Use "Generate SOAP Note" for documentation.`;
555
+
556
+ // FEATURE 5: Doctor briefing
557
+ if (rooms[roomId].messages.length > 0 || rooms[roomId].files.length > 0) {
558
+ setTimeout(async () => {
559
+ const briefing = await getAIResponse(
560
+ roomId,
561
+ "Provide a 3-point clinical summary: chief complaint, temporal trends from files, critical findings.",
562
+ "doctor",
563
+ true
564
+ );
565
+
566
+ socket.emit("ai-message", {
567
+ message: `πŸ“‹ **Clinical Briefing**:\n${briefing}`,
568
+ isPrivate: true,
569
+ forRole: "doctor"
570
+ });
571
+ }, 1000);
572
+ }
573
+ }
574
+
575
+ if (greeting) {
576
+ socket.emit("ai-message", {
577
+ message: greeting,
578
+ isPrivate: true,
579
+ forRole: role
580
+ });
581
+ }
582
+ });
583
+
584
+ socket.on("chat-message", async ({ roomId, message }) => {
585
+ const user = users[socket.id];
586
+ if (!user || !rooms[roomId]) return;
587
+
588
+ // Check if this is an @ai request
589
+ const isAIRequest = message.toLowerCase().includes('@ai');
590
+
591
+ // FEATURE 4: Emergency detection
592
+ const emergencyCheck = await detectEmergency(message, user.role);
593
+
594
+ // If NOT an @ai request, broadcast message to everyone
595
+ if (!isAIRequest) {
596
+ const chatMessage = {
597
+ role: user.role === "patient" ? "Patient" : "Doctor",
598
+ nickname: user.nickname,
599
+ content: message,
600
+ timestamp: new Date().toISOString(),
601
+ isEmergency: emergencyCheck.isEmergency
602
+ };
603
+
604
+ rooms[roomId].messages.push(chatMessage);
605
+ io.to(roomId).emit("chat-message", chatMessage);
606
+ }
607
+
608
+ // FEATURE 4: Emergency escalation
609
+ if (emergencyCheck.isEmergency) {
610
+ rooms[roomId].emergencyMode = true;
611
+
612
+ // Alert patient immediately
613
+ if (user.role === "patient") {
614
+ const urgentMessage = `🚨 **URGENT MEDICAL ATTENTION NEEDED**\n\n${emergencyCheck.urgentAdvice}\n\nCall emergency services (911) immediately if symptoms worsen.`;
615
+ socket.emit("ai-message", {
616
+ message: urgentMessage,
617
+ isPrivate: true,
618
+ forRole: "patient",
619
+ isEmergency: true
620
+ });
621
+ }
622
+
623
+ // Alert doctor
624
+ const doctorSocketId = Object.keys(users).find(
625
+ sid => users[sid].roomId === roomId && users[sid].role === "doctor"
626
+ );
627
+
628
+ if (doctorSocketId) {
629
+ const doctorAlert = `🚨 **EMERGENCY ALERT**\n\nPatient: ${user.nickname}\nLevel: ${emergencyCheck.level}\nReason: ${emergencyCheck.reasoning}\n\nMessage: "${message}"\n\nImmediate evaluation required.`;
630
+ io.to(doctorSocketId).emit("ai-message", {
631
+ message: doctorAlert,
632
+ isPrivate: true,
633
+ forRole: "doctor",
634
+ isEmergency: true
635
+ });
636
+ }
637
+
638
+ return; // Don't process normal AI response in emergency
639
+ }
640
+
641
+ // Handle @ai requests - PRIVATE response only to requester
642
+ if (isAIRequest) {
643
+ const messageText = message.toLowerCase();
644
+ const isFileQuery =
645
+ messageText.includes("report") ||
646
+ messageText.includes("file") ||
647
+ messageText.includes("result") ||
648
+ messageText.includes("test") ||
649
+ messageText.includes("value") ||
650
+ messageText.includes("finding") ||
651
+ messageText.includes("trend");
652
+
653
+ setTimeout(async () => {
654
+ const aiResponse = await getAIResponse(roomId, message, user.role, isFileQuery);
655
+
656
+ // Send ONLY to the user who requested (not broadcast)
657
+ socket.emit("ai-message", {
658
+ message: aiResponse,
659
+ isPrivate: true,
660
+ forRole: user.role
661
+ });
662
+ }, 1500);
663
+ } else {
664
+ // Auto-respond logic for non-@ai messages
665
+ const messageText = message.toLowerCase();
666
+ const isFileQuery =
667
+ messageText.includes("report") ||
668
+ messageText.includes("file") ||
669
+ messageText.includes("result") ||
670
+ messageText.includes("test") ||
671
+ messageText.includes("value") ||
672
+ messageText.includes("finding") ||
673
+ messageText.includes("trend");
674
+
675
+ const shouldAIRespond =
676
+ (user.role === "patient" && !rooms[roomId].doctor && message.endsWith("?")) ||
677
+ (user.role === "doctor" && isFileQuery);
678
+
679
+ if (shouldAIRespond) {
680
+ setTimeout(async () => {
681
+ const aiResponse = await getAIResponse(roomId, message, user.role, isFileQuery);
682
+
683
+ socket.emit("ai-message", {
684
+ message: aiResponse,
685
+ isPrivate: true,
686
+ forRole: user.role
687
+ });
688
+ }, 1500);
689
+ }
690
+ }
691
+ });
692
+
693
+ // FEATURE 5: Generate documentation on request
694
+ socket.on("request-documentation", async ({ roomId }) => {
695
+ const user = users[socket.id];
696
+ if (!user || user.role !== "doctor") return;
697
+
698
+ const documentation = await generateClinicalDocumentation(roomId);
699
+ if (documentation) {
700
+ socket.emit("documentation-generated", { documentation });
701
+ }
702
+ });
703
+
704
+ socket.on("typing", ({ roomId }) => {
705
+ const user = users[socket.id];
706
+ if (user) {
707
+ socket.to(roomId).emit("user-typing", { nickname: user.nickname });
708
+ }
709
+ });
710
+
711
+ socket.on("disconnect", () => {
712
+ const user = users[socket.id];
713
+ if (user) {
714
+ const { roomId, nickname, role } = user;
715
+
716
+ if (rooms[roomId]) {
717
+ if (role === "patient") rooms[roomId].patient = null;
718
+ if (role === "doctor") rooms[roomId].doctor = null;
719
+
720
+ io.to(roomId).emit("user-left", {
721
+ nickname,
722
+ role,
723
+ patient: rooms[roomId].patient,
724
+ doctor: rooms[roomId].doctor
725
+ });
726
+ }
727
+
728
+ delete users[socket.id];
729
+ }
730
+ });
731
+ });
732
+
733
+ const PORT = process.env.PORT || 7860;
734
+ server.listen(PORT, "0.0.0.0", () =>
735
+ console.log(`πŸ₯ Enhanced Medical Chat Server running on port ${PORT}`)
736
+ );