NitinBot001 commited on
Commit
8240c7e
·
verified ·
1 Parent(s): 9bf50bf

Update script.js

Browse files
Files changed (1) hide show
  1. script.js +654 -351
script.js CHANGED
@@ -1,352 +1,655 @@
1
- document.addEventListener('DOMContentLoaded', () => {
2
- // DOM Elements
3
- const chatBox = document.getElementById('chat-box');
4
- const textInput = document.getElementById('text-input');
5
- const sendBtn = document.getElementById('send-btn');
6
- const micBtn = document.getElementById('mic-btn');
7
- const loadingIndicator = document.getElementById('loading-indicator');
8
- const statusIndicator = document.getElementById('status-indicator');
9
-
10
- // Mode Buttons
11
- const modeTextBtn = document.getElementById('mode-text');
12
- const modeVoiceBtn = document.getElementById('mode-voice');
13
- const modeVideoBtn = document.getElementById('mode-video');
14
-
15
- // Voice Controls
16
- const voiceControls = document.getElementById('voice-controls');
17
- const continuousToggle = document.getElementById('continuous-toggle');
18
- const rateSlider = document.getElementById('rate');
19
- const pitchSlider = document.getElementById('pitch');
20
-
21
- // Video Elements
22
- const videoFeed = document.getElementById('video-feed');
23
- const canvas = document.getElementById('canvas');
24
- const imageModal = document.getElementById('image-capture-modal');
25
- const closeModalBtn = document.getElementById('close-modal-btn');
26
-
27
- // State Variables
28
- let sessionId = null; // Will be set before each new query
29
- let currentMode = 'text'; // 'text', 'voice', 'video'
30
- let isListening = false;
31
- let isContinuousMode = false;
32
- let videoStream = null;
33
-
34
- const API_BASE_URL = 'https://nitinbot001-medbot-backend.hf.space';
35
-
36
- // Speech Recognition (STT) Setup
37
- const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
38
- let recognition;
39
- if (SpeechRecognition) {
40
- recognition = new SpeechRecognition();
41
- recognition.continuous = false;
42
- recognition.interimResults = false;
43
- recognition.lang = 'en-US'; // You can change this
44
- } else {
45
- micBtn.disabled = true;
46
- addMessageToUI('error', 'Speech Recognition is not supported in this browser.');
47
- }
48
-
49
- // Speech Synthesis (TTS) Setup
50
- const synth = window.speechSynthesis;
51
-
52
- // --- INITIALIZATION ---
53
- function initializeApp() {
54
- loadHistory();
55
- setupEventListeners();
56
- statusIndicator.textContent = '● Ready';
57
- statusIndicator.style.color = '#cccccc';
58
- addMessageToUI('ai', 'Hello! I am your MediBot Assistant. How can I help you today?');
59
- }
60
-
61
- async function startNewSession() {
62
- statusIndicator.textContent = '● Connecting...';
63
- statusIndicator.style.color = 'orange';
64
- try {
65
- const response = await fetch(`${API_BASE_URL}/start_session`, { method: 'POST' });
66
- if (!response.ok) throw new Error('Failed to start session');
67
- const data = await response.json();
68
- sessionId = data.session_id; // Set the session ID for the current transaction
69
- statusIndicator.textContent = '● Connected';
70
- statusIndicator.style.color = '#76ff03';
71
- console.log('New transaction session started:', sessionId);
72
- return true; // Indicate success
73
- } catch (error) {
74
- console.error('Session start error:', error);
75
- sessionId = null; // Ensure session ID is null on failure
76
- statusIndicator.textContent = '● Connection Failed';
77
- statusIndicator.style.color = '#ff4d4d';
78
- addMessageToUI('error', `Could not connect to the server. ${error.message}`);
79
- return false; // Indicate failure
80
- }
81
- }
82
-
83
- // --- EVENT LISTENERS ---
84
- function setupEventListeners() {
85
- sendBtn.addEventListener('click', handleTextInput);
86
- textInput.addEventListener('keydown', (e) => {
87
- if (e.key === 'Enter') handleTextInput();
88
- });
89
-
90
- // Mode Switching
91
- modeTextBtn.addEventListener('click', () => switchMode('text'));
92
- modeVoiceBtn.addEventListener('click', () => switchMode('voice'));
93
- modeVideoBtn.addEventListener('click', () => switchMode('video'));
94
-
95
- // Voice Controls
96
- micBtn.addEventListener('click', toggleListening);
97
- continuousToggle.addEventListener('change', (e) => {
98
- isContinuousMode = e.target.checked;
99
- if (recognition) {
100
- recognition.continuous = isContinuousMode;
101
- }
102
- });
103
-
104
- // STT Events
105
- if (recognition) {
106
- recognition.onstart = () => {
107
- isListening = true;
108
- micBtn.classList.add('listening');
109
- micBtn.innerHTML = '<i class="fas fa-stop"></i>';
110
- };
111
- recognition.onend = () => {
112
- isListening = false;
113
- micBtn.classList.remove('listening');
114
- micBtn.innerHTML = '<i class="fas fa-microphone"></i>';
115
- if (isContinuousMode && currentMode !== 'text') {
116
- recognition.start(); // Keep listening in continuous mode
117
- }
118
- };
119
- recognition.onresult = (event) => {
120
- const transcript = event.results[event.results.length - 1][0].transcript.trim();
121
- textInput.value = transcript;
122
- processUserQuery(transcript);
123
- };
124
- recognition.onerror = (event) => {
125
- console.error('Speech recognition error:', event.error);
126
- addMessageToUI('error', `Speech recognition error: ${event.error}`);
127
- };
128
- }
129
-
130
- // Image Capture Modal
131
- closeModalBtn.addEventListener('click', () => {
132
- imageModal.classList.add('hidden');
133
- addMessageToUI('ai', 'Capturing image in 1 second...');
134
- setTimeout(captureAndSendImage, 1000);
135
- });
136
- }
137
-
138
- // --- CORE LOGIC ---
139
- function handleTextInput() {
140
- const query = textInput.value.trim();
141
- if (query) {
142
- processUserQuery(query);
143
- textInput.value = '';
144
- }
145
- }
146
-
147
- async function processUserQuery(query) {
148
- addMessageToUI('user', query);
149
- showLoading(true);
150
-
151
- // *** CHANGED LOGIC: Start a new session for every query ***
152
- const sessionStarted = await startNewSession();
153
- if (!sessionStarted) {
154
- showLoading(false);
155
- return; // Stop if session could not be created
156
- }
157
-
158
- try {
159
- const response = await fetch(`${API_BASE_URL}/process_query`, {
160
- method: 'POST',
161
- headers: { 'Content-Type': 'application/json' },
162
- body: JSON.stringify({ session_id: sessionId, query: query })
163
- });
164
-
165
- const data = await response.json();
166
-
167
- if (!response.ok) {
168
- throw new Error(data.error || 'API request failed');
169
- }
170
-
171
- if (data.status === 'image_required') {
172
- handleImageRequest(data.message);
173
- } else {
174
- const message = data.response?.response || data.data;
175
- handleApiResponse(message);
176
- }
177
-
178
- } catch (error) {
179
- console.error('Error processing query:', error);
180
- handleApiResponse(`Sorry, I encountered an error: ${error.message}`, true);
181
- } finally {
182
- showLoading(false);
183
- }
184
- }
185
-
186
- function handleApiResponse(message, isError = false) {
187
- const type = isError ? 'error' : 'ai';
188
- addMessageToUI(type, message);
189
- if (!isError && (currentMode === 'voice' || currentMode === 'video')) {
190
- speak(message);
191
- }
192
- }
193
-
194
- // --- UI & STATE MANAGEMENT ---
195
- function switchMode(newMode) {
196
- if (currentMode === newMode) return;
197
-
198
- if (currentMode === 'video') stopCamera();
199
- if (isListening && recognition) recognition.stop();
200
-
201
- currentMode = newMode;
202
-
203
- document.querySelectorAll('.mode-btn').forEach(btn => btn.classList.remove('active'));
204
- document.getElementById(`mode-${newMode}`).classList.add('active');
205
-
206
- if (newMode === 'text') {
207
- document.body.classList.remove('body-video-mode');
208
- voiceControls.classList.add('hidden');
209
- micBtn.classList.add('hidden');
210
- sendBtn.classList.remove('hidden');
211
- textInput.classList.remove('hidden');
212
- videoFeed.style.display = 'none';
213
- } else if (newMode === 'voice') {
214
- document.body.classList.remove('body-video-mode');
215
- voiceControls.classList.remove('hidden');
216
- micBtn.classList.remove('hidden');
217
- sendBtn.classList.add('hidden');
218
- textInput.classList.add('hidden');
219
- videoFeed.style.display = 'none';
220
- } else if (newMode === 'video') {
221
- document.body.classList.add('body-video-mode');
222
- voiceControls.classList.remove('hidden');
223
- micBtn.classList.remove('hidden');
224
- sendBtn.classList.add('hidden');
225
- textInput.classList.add('hidden');
226
- videoFeed.style.display = 'block';
227
- startCamera();
228
- }
229
- }
230
-
231
- function addMessageToUI(sender, text) {
232
- const messageDiv = document.createElement('div');
233
- messageDiv.classList.add('message', `${sender}-message`);
234
- messageDiv.textContent = text;
235
- chatBox.appendChild(messageDiv);
236
- chatBox.scrollTop = chatBox.scrollHeight;
237
- saveHistory();
238
- }
239
-
240
- function showLoading(show) {
241
- loadingIndicator.style.display = show ? 'flex' : 'none';
242
- }
243
-
244
- // --- VOICE & VIDEO ---
245
- function toggleListening() {
246
- if (!recognition) return;
247
- if (isListening) {
248
- recognition.stop();
249
- } else {
250
- recognition.start();
251
- }
252
- }
253
-
254
- function speak(text) {
255
- if (synth.speaking) {
256
- console.error('SpeechSynthesis is already speaking.');
257
- return;
258
- }
259
- if (text !== '') {
260
- const utterance = new SpeechSynthesisUtterance(text);
261
- const femaleVoice = synth.getVoices().find(voice => voice.name.includes('Female') || voice.gender === 'female');
262
- if(femaleVoice) utterance.voice = femaleVoice;
263
- utterance.pitch = pitchSlider.value;
264
- utterance.rate = rateSlider.value;
265
- synth.speak(utterance);
266
- }
267
- }
268
-
269
- async function startCamera() {
270
- try {
271
- videoStream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
272
- videoFeed.srcObject = videoStream;
273
- } catch (err) {
274
- console.error("Error accessing camera: ", err);
275
- addMessageToUI('error', 'Could not access the camera. Please grant permission.');
276
- switchMode('voice');
277
- }
278
- }
279
-
280
- function stopCamera() {
281
- if (videoStream) {
282
- videoStream.getTracks().forEach(track => track.stop());
283
- videoFeed.srcObject = null;
284
- videoStream = null;
285
- }
286
- }
287
-
288
- function handleImageRequest(message) {
289
- addMessageToUI('ai', message);
290
- if (currentMode !== 'video') {
291
- addMessageToUI('ai', "Please switch to Video mode to provide an image.");
292
- } else {
293
- imageModal.classList.remove('hidden');
294
- }
295
- }
296
-
297
- async function captureAndSendImage() {
298
- if (!videoStream || !sessionId) {
299
- addMessageToUI('error', 'Cannot capture image. Video stream or session is not active.');
300
- return;
301
- };
302
-
303
- const videoTrack = videoStream.getVideoTracks()[0];
304
- const settings = videoTrack.getSettings();
305
- canvas.width = settings.width;
306
- canvas.height = settings.height;
307
-
308
- const context = canvas.getContext('2d');
309
- context.drawImage(videoFeed, 0, 0, canvas.width, canvas.height);
310
-
311
- canvas.toBlob(async (blob) => {
312
- const formData = new FormData();
313
- formData.append('session_id', sessionId);
314
- formData.append('photo', blob, 'capture.jpg');
315
-
316
- showLoading(true);
317
- try {
318
- const response = await fetch(`${API_BASE_URL}/process_with_image`, {
319
- method: 'POST',
320
- body: formData
321
- });
322
-
323
- const data = await response.json();
324
- if (!response.ok) throw new Error(data.message || 'Image processing failed');
325
-
326
- const message = data.response?.response || data.data;
327
- handleApiResponse(message);
328
-
329
- } catch (error) {
330
- console.error('Error sending image:', error);
331
- handleApiResponse(`Sorry, I couldn't process the image: ${error.message}`, true);
332
- } finally {
333
- showLoading(false);
334
- }
335
- }, 'image/jpeg');
336
- }
337
-
338
- // --- LOCAL STORAGE ---
339
- function saveHistory() {
340
- localStorage.setItem('medibotChatHistory', chatBox.innerHTML);
341
- }
342
- function loadHistory() {
343
- const history = localStorage.getItem('medibotChatHistory');
344
- if (history) {
345
- chatBox.innerHTML = history;
346
- chatBox.scrollTop = chatBox.scrollHeight;
347
- }
348
- }
349
-
350
- // --- START THE APP ---
351
- initializeApp();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352
  });
 
1
+ document.addEventListener('DOMContentLoaded', () => {
2
+ // DOM Elements
3
+ const chatBox = document.getElementById('chat-box');
4
+ const textInput = document.getElementById('text-input');
5
+ const sendBtn = document.getElementById('send-btn');
6
+ const micBtn = document.getElementById('mic-btn');
7
+ const loadingIndicator = document.getElementById('loading-indicator');
8
+ const statusIndicator = document.getElementById('status-indicator');
9
+
10
+ // Mode Buttons
11
+ const modeTextBtn = document.getElementById('mode-text');
12
+ const modeVoiceBtn = document.getElementById('mode-voice');
13
+ const modeVideoBtn = document.getElementById('mode-video');
14
+
15
+ // Voice Controls
16
+ const voiceControls = document.getElementById('voice-controls');
17
+ const continuousToggle = document.getElementById('continuous-toggle');
18
+ const rateSlider = document.getElementById('rate');
19
+ const pitchSlider = document.getElementById('pitch');
20
+
21
+ // Video Elements
22
+ const videoFeed = document.getElementById('video-feed');
23
+ const canvas = document.getElementById('canvas');
24
+ const imageModal = document.getElementById('image-capture-modal');
25
+ const closeModalBtn = document.getElementById('close-modal-btn');
26
+
27
+ // Upload Elements (Optional - if you want file upload capability)
28
+ const fileUploadBtn = document.getElementById('file-upload-btn');
29
+ const fileInput = document.getElementById('file-input');
30
+
31
+ // State Variables
32
+ let sessionId = null;
33
+ let currentMode = 'text';
34
+ let isListening = false;
35
+ let isContinuousMode = false;
36
+ let videoStream = null;
37
+ let systemStatus = 'disconnected';
38
+
39
+ // Updated API Base URL for local development
40
+ const API_BASE_URL = 'https://nitinbot001-medbot-backend.hf.space';
41
+
42
+ // Speech Recognition (STT) Setup
43
+ const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
44
+ let recognition;
45
+ if (SpeechRecognition) {
46
+ recognition = new SpeechRecognition();
47
+ recognition.continuous = false;
48
+ recognition.interimResults = false;
49
+ recognition.lang = 'en-US';
50
+ } else {
51
+ console.warn('Speech Recognition not supported');
52
+ if (micBtn) micBtn.disabled = true;
53
+ }
54
+
55
+ // Speech Synthesis (TTS) Setup
56
+ const synth = window.speechSynthesis;
57
+
58
+ // --- INITIALIZATION ---
59
+ function initializeApp() {
60
+ checkSystemHealth();
61
+ loadHistory();
62
+ setupEventListeners();
63
+ updateStatusIndicator('ready', 'Ready');
64
+ addMessageToUI('ai', 'Hello! I am your MediBot Assistant. I can help you with medical information, analyze images of medicines, and answer questions about diseases. How can I assist you today?');
65
+ }
66
+
67
+ async function checkSystemHealth() {
68
+ try {
69
+ const response = await fetch(`${API_BASE_URL}/health`);
70
+ const data = await response.json();
71
+
72
+ if (response.ok && data.status.includes('Running')) {
73
+ systemStatus = 'connected';
74
+ updateStatusIndicator('connected', 'System Ready');
75
+ console.log('System health check passed:', data);
76
+
77
+ // Display system info
78
+ if (data.disease_fact_sheets > 0 || data.medicine_knowledge_files > 0) {
79
+ const info = `System loaded with ${data.disease_fact_sheets} disease fact sheets and ${data.medicine_knowledge_files} medicine knowledge files.`;
80
+ addMessageToUI('system', info);
81
+ }
82
+ } else {
83
+ throw new Error('System health check failed');
84
+ }
85
+ } catch (error) {
86
+ systemStatus = 'disconnected';
87
+ updateStatusIndicator('error', 'System Offline');
88
+ console.error('System health check failed:', error);
89
+ addMessageToUI('error', 'Unable to connect to the medical system. Please ensure the backend server is running on localhost:5000.');
90
+ }
91
+ }
92
+
93
+ async function startNewSession() {
94
+ if (systemStatus !== 'connected') {
95
+ addMessageToUI('error', 'System is not connected. Please refresh the page and try again.');
96
+ return false;
97
+ }
98
+
99
+ updateStatusIndicator('connecting', 'Starting session...');
100
+ try {
101
+ const response = await fetch(`${API_BASE_URL}/start_session`, {
102
+ method: 'POST',
103
+ headers: {
104
+ 'Content-Type': 'application/json'
105
+ }
106
+ });
107
+
108
+ if (!response.ok) {
109
+ const errorData = await response.json();
110
+ throw new Error(errorData.error || 'Failed to start session');
111
+ }
112
+
113
+ const data = await response.json();
114
+ sessionId = data.session_id;
115
+ updateStatusIndicator('connected', 'Session Active');
116
+ console.log('New session started:', sessionId);
117
+ return true;
118
+ } catch (error) {
119
+ console.error('Session start error:', error);
120
+ sessionId = null;
121
+ updateStatusIndicator('error', 'Session Failed');
122
+ addMessageToUI('error', `Could not start session: ${error.message}`);
123
+ return false;
124
+ }
125
+ }
126
+
127
+ // --- EVENT LISTENERS ---
128
+ function setupEventListeners() {
129
+ // Text input handling
130
+ if (sendBtn) sendBtn.addEventListener('click', handleTextInput);
131
+ if (textInput) {
132
+ textInput.addEventListener('keydown', (e) => {
133
+ if (e.key === 'Enter' && !e.shiftKey) {
134
+ e.preventDefault();
135
+ handleTextInput();
136
+ }
137
+ });
138
+ }
139
+
140
+ // Mode switching
141
+ if (modeTextBtn) modeTextBtn.addEventListener('click', () => switchMode('text'));
142
+ if (modeVoiceBtn) modeVoiceBtn.addEventListener('click', () => switchMode('voice'));
143
+ if (modeVideoBtn) modeVideoBtn.addEventListener('click', () => switchMode('video'));
144
+
145
+ // Voice controls
146
+ if (micBtn) micBtn.addEventListener('click', toggleListening);
147
+ if (continuousToggle) {
148
+ continuousToggle.addEventListener('change', (e) => {
149
+ isContinuousMode = e.target.checked;
150
+ if (recognition) {
151
+ recognition.continuous = isContinuousMode;
152
+ }
153
+ });
154
+ }
155
+
156
+ // File upload (optional)
157
+ if (fileUploadBtn && fileInput) {
158
+ fileUploadBtn.addEventListener('click', () => fileInput.click());
159
+ fileInput.addEventListener('change', handleFileUpload);
160
+ }
161
+
162
+ // Speech recognition events
163
+ if (recognition) {
164
+ recognition.onstart = () => {
165
+ isListening = true;
166
+ if (micBtn) {
167
+ micBtn.classList.add('listening');
168
+ micBtn.innerHTML = '<i class="fas fa-stop"></i>';
169
+ }
170
+ updateStatusIndicator('listening', 'Listening...');
171
+ };
172
+
173
+ recognition.onend = () => {
174
+ isListening = false;
175
+ if (micBtn) {
176
+ micBtn.classList.remove('listening');
177
+ micBtn.innerHTML = '<i class="fas fa-microphone"></i>';
178
+ }
179
+ updateStatusIndicator('connected', 'Session Active');
180
+
181
+ if (isContinuousMode && currentMode !== 'text') {
182
+ setTimeout(() => recognition.start(), 1000);
183
+ }
184
+ };
185
+
186
+ recognition.onresult = (event) => {
187
+ const transcript = event.results[event.results.length - 1][0].transcript.trim();
188
+ if (textInput) textInput.value = transcript;
189
+ if (transcript) processUserQuery(transcript);
190
+ };
191
+
192
+ recognition.onerror = (event) => {
193
+ console.error('Speech recognition error:', event.error);
194
+ addMessageToUI('error', `Speech recognition error: ${event.error}`);
195
+ updateStatusIndicator('error', 'Speech Error');
196
+ };
197
+ }
198
+
199
+ // Image capture modal
200
+ if (closeModalBtn) {
201
+ closeModalBtn.addEventListener('click', () => {
202
+ if (imageModal) imageModal.classList.add('hidden');
203
+ addMessageToUI('ai', 'Capturing image in 2 seconds...');
204
+ setTimeout(captureAndSendImage, 2000);
205
+ });
206
+ }
207
+
208
+ // System health check interval
209
+ setInterval(checkSystemHealth, 30000); // Check every 30 seconds
210
+ }
211
+
212
+ // --- CORE LOGIC ---
213
+ function handleTextInput() {
214
+ const query = textInput ? textInput.value.trim() : '';
215
+ if (query) {
216
+ processUserQuery(query);
217
+ if (textInput) textInput.value = '';
218
+ }
219
+ }
220
+
221
+ async function handleFileUpload(event) {
222
+ const file = event.target.files[0];
223
+ if (!file) return;
224
+
225
+ // Validate file type
226
+ const allowedTypes = ['image/jpeg', 'image/jpg', 'image/png', 'image/gif', 'image/bmp', 'image/webp'];
227
+ if (!allowedTypes.includes(file.type)) {
228
+ addMessageToUI('error', 'Please select a valid image file (JPEG, PNG, GIF, BMP, or WebP).');
229
+ return;
230
+ }
231
+
232
+ // Check file size (16MB limit)
233
+ if (file.size > 16 * 1024 * 1024) {
234
+ addMessageToUI('error', 'File size too large. Please select an image under 16MB.');
235
+ return;
236
+ }
237
+
238
+ const query = prompt('Please describe what you want to know about this image:');
239
+ if (!query) return;
240
+
241
+ await processImageQuery(query, file);
242
+ }
243
+
244
+ async function processUserQuery(query) {
245
+ addMessageToUI('user', query);
246
+ showLoading(true);
247
+
248
+ // Start a new session for every query
249
+ const sessionStarted = await startNewSession();
250
+ if (!sessionStarted) {
251
+ showLoading(false);
252
+ return;
253
+ }
254
+
255
+ try {
256
+ const response = await fetch(`${API_BASE_URL}/process_query`, {
257
+ method: 'POST',
258
+ headers: {
259
+ 'Content-Type': 'application/json'
260
+ },
261
+ body: JSON.stringify({
262
+ session_id: sessionId,
263
+ query: query
264
+ })
265
+ });
266
+
267
+ const data = await response.json();
268
+
269
+ if (!response.ok) {
270
+ throw new Error(data.error || `Server error: ${response.status}`);
271
+ }
272
+
273
+ if (data.status === 'image_required') {
274
+ handleImageRequest(data.message, data.category);
275
+ } else if (data.status === 'success') {
276
+ handleApiResponse(data.response, data.category);
277
+ } else {
278
+ throw new Error('Unexpected response format');
279
+ }
280
+
281
+ } catch (error) {
282
+ console.error('Error processing query:', error);
283
+ handleApiResponse(`Sorry, I encountered an error: ${error.message}`, null, true);
284
+ } finally {
285
+ showLoading(false);
286
+ }
287
+ }
288
+
289
+ async function processImageQuery(query, imageFile) {
290
+ addMessageToUI('user', query);
291
+ showLoading(true);
292
+
293
+ const sessionStarted = await startNewSession();
294
+ if (!sessionStarted) {
295
+ showLoading(false);
296
+ return;
297
+ }
298
+
299
+ try {
300
+ const formData = new FormData();
301
+ formData.append('session_id', sessionId);
302
+ formData.append('photo', imageFile);
303
+
304
+ const response = await fetch(`${API_BASE_URL}/process_with_image`, {
305
+ method: 'POST',
306
+ body: formData
307
+ });
308
+
309
+ const data = await response.json();
310
+
311
+ if (!response.ok) {
312
+ throw new Error(data.error || `Server error: ${response.status}`);
313
+ }
314
+
315
+ if (data.status === 'success') {
316
+ handleApiResponse(data.response, data.category);
317
+ } else {
318
+ throw new Error('Unexpected response format');
319
+ }
320
+
321
+ } catch (error) {
322
+ console.error('Error processing image query:', error);
323
+ handleApiResponse(`Sorry, I couldn't process the image: ${error.message}`, null, true);
324
+ } finally {
325
+ showLoading(false);
326
+ }
327
+ }
328
+
329
+ function handleApiResponse(responseData, category, isError = false) {
330
+ let message = '';
331
+
332
+ if (isError) {
333
+ message = responseData;
334
+ } else {
335
+ // Handle different response formats from the integrated backend
336
+ if (typeof responseData === 'string') {
337
+ message = responseData;
338
+ } else if (responseData && responseData.response) {
339
+ message = responseData.response;
340
+ } else if (responseData && responseData.data) {
341
+ message = responseData.data;
342
+ } else {
343
+ message = JSON.stringify(responseData, null, 2);
344
+ }
345
+ }
346
+
347
+ const type = isError ? 'error' : 'ai';
348
+ addMessageToUI(type, message, category);
349
+
350
+ // Add category info if available
351
+ if (category && !isError) {
352
+ const categoryInfo = getCategoryInfo(category);
353
+ if (categoryInfo) {
354
+ addMessageToUI('system', categoryInfo);
355
+ }
356
+ }
357
+
358
+ // Text-to-speech for voice/video modes
359
+ if (!isError && (currentMode === 'voice' || currentMode === 'video')) {
360
+ speak(message);
361
+ }
362
+ }
363
+
364
+ function getCategoryInfo(category) {
365
+ const categoryMap = {
366
+ 'disease_query': 'Category: General Disease Information',
367
+ 'medicine_info': 'Category: Medicine Information & Analysis',
368
+ 'skin_disease': 'Category: Skin Condition Analysis',
369
+ 'report_reading': 'Category: Medical Report Interpretation'
370
+ };
371
+ return categoryMap[category] || null;
372
+ }
373
+
374
+ // --- UI & STATE MANAGEMENT ---
375
+ function updateStatusIndicator(status, message) {
376
+ if (!statusIndicator) return;
377
+
378
+ const colors = {
379
+ 'ready': '#cccccc',
380
+ 'connected': '#76ff03',
381
+ 'connecting': '#ffeb3b',
382
+ 'listening': '#2196f3',
383
+ 'error': '#ff4d4d',
384
+ 'disconnected': '#ff9800'
385
+ };
386
+
387
+ statusIndicator.textContent = `● ${message}`;
388
+ statusIndicator.style.color = colors[status] || '#cccccc';
389
+ }
390
+
391
+ function switchMode(newMode) {
392
+ if (currentMode === newMode) return;
393
+
394
+ // Cleanup current mode
395
+ if (currentMode === 'video') stopCamera();
396
+ if (isListening && recognition) recognition.stop();
397
+
398
+ currentMode = newMode;
399
+
400
+ // Update UI
401
+ document.querySelectorAll('.mode-btn').forEach(btn => btn.classList.remove('active'));
402
+ const newModeBtn = document.getElementById(`mode-${newMode}`);
403
+ if (newModeBtn) newModeBtn.classList.add('active');
404
+
405
+ // Mode-specific setup
406
+ switch (newMode) {
407
+ case 'text':
408
+ document.body.classList.remove('body-video-mode');
409
+ if (voiceControls) voiceControls.classList.add('hidden');
410
+ if (micBtn) micBtn.classList.add('hidden');
411
+ if (sendBtn) sendBtn.classList.remove('hidden');
412
+ if (textInput) textInput.classList.remove('hidden');
413
+ if (videoFeed) videoFeed.style.display = 'none';
414
+ break;
415
+
416
+ case 'voice':
417
+ document.body.classList.remove('body-video-mode');
418
+ if (voiceControls) voiceControls.classList.remove('hidden');
419
+ if (micBtn) micBtn.classList.remove('hidden');
420
+ if (sendBtn) sendBtn.classList.add('hidden');
421
+ if (textInput) textInput.classList.add('hidden');
422
+ if (videoFeed) videoFeed.style.display = 'none';
423
+ break;
424
+
425
+ case 'video':
426
+ document.body.classList.add('body-video-mode');
427
+ if (voiceControls) voiceControls.classList.remove('hidden');
428
+ if (micBtn) micBtn.classList.remove('hidden');
429
+ if (sendBtn) sendBtn.classList.add('hidden');
430
+ if (textInput) textInput.classList.add('hidden');
431
+ if (videoFeed) videoFeed.style.display = 'block';
432
+ startCamera();
433
+ break;
434
+ }
435
+
436
+ addMessageToUI('system', `Switched to ${newMode} mode`);
437
+ }
438
+
439
+ function addMessageToUI(sender, text, category = null) {
440
+ if (!chatBox) return;
441
+
442
+ const messageDiv = document.createElement('div');
443
+ messageDiv.classList.add('message', `${sender}-message`);
444
+
445
+ // Add category class if provided
446
+ if (category) {
447
+ messageDiv.classList.add(`category-${category}`);
448
+ }
449
+
450
+ // Handle different message types
451
+ if (sender === 'system') {
452
+ messageDiv.style.fontStyle = 'italic';
453
+ messageDiv.style.color = '#888';
454
+ messageDiv.style.fontSize = '0.9em';
455
+ }
456
+
457
+ messageDiv.textContent = text;
458
+ chatBox.appendChild(messageDiv);
459
+ chatBox.scrollTop = chatBox.scrollHeight;
460
+ saveHistory();
461
+ }
462
+
463
+ function showLoading(show) {
464
+ if (loadingIndicator) {
465
+ loadingIndicator.style.display = show ? 'flex' : 'none';
466
+ }
467
+ }
468
+
469
+ // --- VOICE & VIDEO ---
470
+ function toggleListening() {
471
+ if (!recognition) {
472
+ addMessageToUI('error', 'Speech recognition is not supported in this browser.');
473
+ return;
474
+ }
475
+
476
+ if (isListening) {
477
+ recognition.stop();
478
+ } else {
479
+ recognition.start();
480
+ }
481
+ }
482
+
483
+ function speak(text) {
484
+ if (!synth || synth.speaking) {
485
+ console.warn('Speech synthesis not available or already speaking');
486
+ return;
487
+ }
488
+
489
+ if (text && text.trim() !== '') {
490
+ const utterance = new SpeechSynthesisUtterance(text);
491
+
492
+ // Try to find a female voice
493
+ const voices = synth.getVoices();
494
+ const femaleVoice = voices.find(voice =>
495
+ voice.name.toLowerCase().includes('female') ||
496
+ voice.gender === 'female' ||
497
+ voice.name.toLowerCase().includes('zira') ||
498
+ voice.name.toLowerCase().includes('hazel')
499
+ );
500
+
501
+ if (femaleVoice) utterance.voice = femaleVoice;
502
+
503
+ // Apply voice settings
504
+ if (pitchSlider) utterance.pitch = parseFloat(pitchSlider.value);
505
+ if (rateSlider) utterance.rate = parseFloat(rateSlider.value);
506
+
507
+ utterance.onerror = (event) => {
508
+ console.error('Speech synthesis error:', event.error);
509
+ };
510
+
511
+ synth.speak(utterance);
512
+ }
513
+ }
514
+
515
+ async function startCamera() {
516
+ try {
517
+ videoStream = await navigator.mediaDevices.getUserMedia({
518
+ video: { width: 640, height: 480 },
519
+ audio: false
520
+ });
521
+ if (videoFeed) videoFeed.srcObject = videoStream;
522
+ addMessageToUI('system', 'Camera activated for image capture');
523
+ } catch (err) {
524
+ console.error("Error accessing camera:", err);
525
+ addMessageToUI('error', 'Could not access the camera. Please grant permission and try again.');
526
+ switchMode('voice');
527
+ }
528
+ }
529
+
530
+ function stopCamera() {
531
+ if (videoStream) {
532
+ videoStream.getTracks().forEach(track => track.stop());
533
+ if (videoFeed) videoFeed.srcObject = null;
534
+ videoStream = null;
535
+ addMessageToUI('system', 'Camera deactivated');
536
+ }
537
+ }
538
+
539
+ function handleImageRequest(message, category) {
540
+ addMessageToUI('ai', message);
541
+ if (currentMode !== 'video') {
542
+ addMessageToUI('ai', "Please switch to Video mode to capture an image, or use the file upload option in Text mode.");
543
+ } else {
544
+ if (imageModal) imageModal.classList.remove('hidden');
545
+ }
546
+ }
547
+
548
+ async function captureAndSendImage() {
549
+ if (!videoStream || !sessionId) {
550
+ addMessageToUI('error', 'Cannot capture image. Video stream or session is not active.');
551
+ return;
552
+ }
553
+
554
+ if (!videoFeed || !canvas) {
555
+ addMessageToUI('error', 'Video capture elements not found.');
556
+ return;
557
+ }
558
+
559
+ const videoTrack = videoStream.getVideoTracks()[0];
560
+ const settings = videoTrack.getSettings();
561
+ canvas.width = settings.width || 640;
562
+ canvas.height = settings.height || 480;
563
+
564
+ const context = canvas.getContext('2d');
565
+ context.drawImage(videoFeed, 0, 0, canvas.width, canvas.height);
566
+
567
+ canvas.toBlob(async (blob) => {
568
+ if (!blob) {
569
+ addMessageToUI('error', 'Failed to capture image.');
570
+ return;
571
+ }
572
+
573
+ const formData = new FormData();
574
+ formData.append('session_id', sessionId);
575
+ formData.append('photo', blob, 'capture.jpg');
576
+
577
+ showLoading(true);
578
+ addMessageToUI('system', 'Processing captured image...');
579
+
580
+ try {
581
+ const response = await fetch(`${API_BASE_URL}/process_with_image`, {
582
+ method: 'POST',
583
+ body: formData
584
+ });
585
+
586
+ const data = await response.json();
587
+
588
+ if (!response.ok) {
589
+ throw new Error(data.error || 'Image processing failed');
590
+ }
591
+
592
+ if (data.status === 'success') {
593
+ handleApiResponse(data.response, data.category);
594
+ } else {
595
+ throw new Error('Unexpected response format');
596
+ }
597
+
598
+ } catch (error) {
599
+ console.error('Error processing captured image:', error);
600
+ handleApiResponse(`Sorry, I couldn't process the captured image: ${error.message}`, null, true);
601
+ } finally {
602
+ showLoading(false);
603
+ }
604
+ }, 'image/jpeg', 0.8);
605
+ }
606
+
607
+ // --- LOCAL STORAGE ---
608
+ function saveHistory() {
609
+ if (chatBox) {
610
+ try {
611
+ localStorage.setItem('medibotChatHistory', chatBox.innerHTML);
612
+ } catch (error) {
613
+ console.warn('Could not save chat history:', error);
614
+ }
615
+ }
616
+ }
617
+
618
+ function loadHistory() {
619
+ try {
620
+ const history = localStorage.getItem('medibotChatHistory');
621
+ if (history && chatBox) {
622
+ chatBox.innerHTML = history;
623
+ chatBox.scrollTop = chatBox.scrollHeight;
624
+ }
625
+ } catch (error) {
626
+ console.warn('Could not load chat history:', error);
627
+ }
628
+ }
629
+
630
+ function clearHistory() {
631
+ if (chatBox) {
632
+ chatBox.innerHTML = '';
633
+ localStorage.removeItem('medibotChatHistory');
634
+ addMessageToUI('system', 'Chat history cleared');
635
+ }
636
+ }
637
+
638
+ // --- UTILITY FUNCTIONS ---
639
+ function downloadKnowledgeBase() {
640
+ addMessageToUI('system', 'Use the upload endpoints to add knowledge base files to the system.');
641
+ }
642
+
643
+ // --- EXPOSE FUNCTIONS TO GLOBAL SCOPE (for debugging) ---
644
+ window.medibotDebug = {
645
+ clearHistory,
646
+ checkSystemHealth,
647
+ switchMode,
648
+ getCurrentMode: () => currentMode,
649
+ getSessionId: () => sessionId,
650
+ getSystemStatus: () => systemStatus
651
+ };
652
+
653
+ // --- START THE APP ---
654
+ initializeApp();
655
  });