NitinBot001 commited on
Commit
10cca2f
·
verified ·
1 Parent(s): 8240c7e

Update script.js

Browse files
Files changed (1) hide show
  1. script.js +62 -16
script.js CHANGED
@@ -37,7 +37,7 @@ document.addEventListener('DOMContentLoaded', () => {
37
  let systemStatus = 'disconnected';
38
 
39
  // Updated API Base URL for local development
40
- const API_BASE_URL = 'https://nitinbot001-medbot-backend.hf.space';
41
 
42
  // Speech Recognition (STT) Setup
43
  const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
@@ -57,14 +57,17 @@ document.addEventListener('DOMContentLoaded', () => {
57
 
58
  // --- INITIALIZATION ---
59
  function initializeApp() {
 
60
  checkSystemHealth();
61
  loadHistory();
62
  setupEventListeners();
63
  updateStatusIndicator('ready', 'Ready');
64
  addMessageToUI('ai', 'Hello! I am your MediBot Assistant. I can help you with medical information, analyze images of medicines, and answer questions about diseases. How can I assist you today?');
 
65
  }
66
 
67
  async function checkSystemHealth() {
 
68
  try {
69
  const response = await fetch(`${API_BASE_URL}/health`);
70
  const data = await response.json();
@@ -74,10 +77,9 @@ document.addEventListener('DOMContentLoaded', () => {
74
  updateStatusIndicator('connected', 'System Ready');
75
  console.log('System health check passed:', data);
76
 
77
- // Display system info
78
  if (data.disease_fact_sheets > 0 || data.medicine_knowledge_files > 0) {
79
- const info = `System loaded with ${data.disease_fact_sheets} disease fact sheets and ${data.medicine_knowledge_files} medicine knowledge files.`;
80
- addMessageToUI('system', info);
81
  }
82
  } else {
83
  throw new Error('System health check failed');
@@ -92,10 +94,12 @@ document.addEventListener('DOMContentLoaded', () => {
92
 
93
  async function startNewSession() {
94
  if (systemStatus !== 'connected') {
 
95
  addMessageToUI('error', 'System is not connected. Please refresh the page and try again.');
96
  return false;
97
  }
98
 
 
99
  updateStatusIndicator('connecting', 'Starting session...');
100
  try {
101
  const response = await fetch(`${API_BASE_URL}/start_session`, {
@@ -126,6 +130,8 @@ document.addEventListener('DOMContentLoaded', () => {
126
 
127
  // --- EVENT LISTENERS ---
128
  function setupEventListeners() {
 
 
129
  // Text input handling
130
  if (sendBtn) sendBtn.addEventListener('click', handleTextInput);
131
  if (textInput) {
@@ -147,6 +153,7 @@ document.addEventListener('DOMContentLoaded', () => {
147
  if (continuousToggle) {
148
  continuousToggle.addEventListener('change', (e) => {
149
  isContinuousMode = e.target.checked;
 
150
  if (recognition) {
151
  recognition.continuous = isContinuousMode;
152
  }
@@ -162,6 +169,7 @@ document.addEventListener('DOMContentLoaded', () => {
162
  // Speech recognition events
163
  if (recognition) {
164
  recognition.onstart = () => {
 
165
  isListening = true;
166
  if (micBtn) {
167
  micBtn.classList.add('listening');
@@ -171,6 +179,7 @@ document.addEventListener('DOMContentLoaded', () => {
171
  };
172
 
173
  recognition.onend = () => {
 
174
  isListening = false;
175
  if (micBtn) {
176
  micBtn.classList.remove('listening');
@@ -179,12 +188,14 @@ document.addEventListener('DOMContentLoaded', () => {
179
  updateStatusIndicator('connected', 'Session Active');
180
 
181
  if (isContinuousMode && currentMode !== 'text') {
 
182
  setTimeout(() => recognition.start(), 1000);
183
  }
184
  };
185
 
186
  recognition.onresult = (event) => {
187
  const transcript = event.results[event.results.length - 1][0].transcript.trim();
 
188
  if (textInput) textInput.value = transcript;
189
  if (transcript) processUserQuery(transcript);
190
  };
@@ -199,20 +210,22 @@ document.addEventListener('DOMContentLoaded', () => {
199
  // Image capture modal
200
  if (closeModalBtn) {
201
  closeModalBtn.addEventListener('click', () => {
 
202
  if (imageModal) imageModal.classList.add('hidden');
203
- addMessageToUI('ai', 'Capturing image in 2 seconds...');
204
  setTimeout(captureAndSendImage, 2000);
205
  });
206
  }
207
 
208
  // System health check interval
209
  setInterval(checkSystemHealth, 30000); // Check every 30 seconds
 
210
  }
211
 
212
  // --- CORE LOGIC ---
213
  function handleTextInput() {
214
  const query = textInput ? textInput.value.trim() : '';
215
  if (query) {
 
216
  processUserQuery(query);
217
  if (textInput) textInput.value = '';
218
  }
@@ -222,15 +235,19 @@ document.addEventListener('DOMContentLoaded', () => {
222
  const file = event.target.files[0];
223
  if (!file) return;
224
 
 
 
225
  // Validate file type
226
  const allowedTypes = ['image/jpeg', 'image/jpg', 'image/png', 'image/gif', 'image/bmp', 'image/webp'];
227
  if (!allowedTypes.includes(file.type)) {
 
228
  addMessageToUI('error', 'Please select a valid image file (JPEG, PNG, GIF, BMP, or WebP).');
229
  return;
230
  }
231
 
232
  // Check file size (16MB limit)
233
  if (file.size > 16 * 1024 * 1024) {
 
234
  addMessageToUI('error', 'File size too large. Please select an image under 16MB.');
235
  return;
236
  }
@@ -238,10 +255,12 @@ document.addEventListener('DOMContentLoaded', () => {
238
  const query = prompt('Please describe what you want to know about this image:');
239
  if (!query) return;
240
 
 
241
  await processImageQuery(query, file);
242
  }
243
 
244
  async function processUserQuery(query) {
 
245
  addMessageToUI('user', query);
246
  showLoading(true);
247
 
@@ -253,6 +272,7 @@ document.addEventListener('DOMContentLoaded', () => {
253
  }
254
 
255
  try {
 
256
  const response = await fetch(`${API_BASE_URL}/process_query`, {
257
  method: 'POST',
258
  headers: {
@@ -265,14 +285,17 @@ document.addEventListener('DOMContentLoaded', () => {
265
  });
266
 
267
  const data = await response.json();
 
268
 
269
  if (!response.ok) {
270
  throw new Error(data.error || `Server error: ${response.status}`);
271
  }
272
 
273
  if (data.status === 'image_required') {
 
274
  handleImageRequest(data.message, data.category);
275
  } else if (data.status === 'success') {
 
276
  handleApiResponse(data.response, data.category);
277
  } else {
278
  throw new Error('Unexpected response format');
@@ -287,6 +310,7 @@ document.addEventListener('DOMContentLoaded', () => {
287
  }
288
 
289
  async function processImageQuery(query, imageFile) {
 
290
  addMessageToUI('user', query);
291
  showLoading(true);
292
 
@@ -297,6 +321,7 @@ document.addEventListener('DOMContentLoaded', () => {
297
  }
298
 
299
  try {
 
300
  const formData = new FormData();
301
  formData.append('session_id', sessionId);
302
  formData.append('photo', imageFile);
@@ -307,12 +332,14 @@ document.addEventListener('DOMContentLoaded', () => {
307
  });
308
 
309
  const data = await response.json();
 
310
 
311
  if (!response.ok) {
312
  throw new Error(data.error || `Server error: ${response.status}`);
313
  }
314
 
315
  if (data.status === 'success') {
 
316
  handleApiResponse(data.response, data.category);
317
  } else {
318
  throw new Error('Unexpected response format');
@@ -331,7 +358,9 @@ document.addEventListener('DOMContentLoaded', () => {
331
 
332
  if (isError) {
333
  message = responseData;
 
334
  } else {
 
335
  // Handle different response formats from the integrated backend
336
  if (typeof responseData === 'string') {
337
  message = responseData;
@@ -349,14 +378,12 @@ document.addEventListener('DOMContentLoaded', () => {
349
 
350
  // Add category info if available
351
  if (category && !isError) {
352
- const categoryInfo = getCategoryInfo(category);
353
- if (categoryInfo) {
354
- addMessageToUI('system', categoryInfo);
355
- }
356
  }
357
 
358
  // Text-to-speech for voice/video modes
359
  if (!isError && (currentMode === 'voice' || currentMode === 'video')) {
 
360
  speak(message);
361
  }
362
  }
@@ -386,11 +413,14 @@ document.addEventListener('DOMContentLoaded', () => {
386
 
387
  statusIndicator.textContent = `● ${message}`;
388
  statusIndicator.style.color = colors[status] || '#cccccc';
 
389
  }
390
 
391
  function switchMode(newMode) {
392
  if (currentMode === newMode) return;
393
 
 
 
394
  // Cleanup current mode
395
  if (currentMode === 'video') stopCamera();
396
  if (isListening && recognition) recognition.stop();
@@ -433,7 +463,7 @@ document.addEventListener('DOMContentLoaded', () => {
433
  break;
434
  }
435
 
436
- addMessageToUI('system', `Switched to ${newMode} mode`);
437
  }
438
 
439
  function addMessageToUI(sender, text, category = null) {
@@ -464,15 +494,18 @@ document.addEventListener('DOMContentLoaded', () => {
464
  if (loadingIndicator) {
465
  loadingIndicator.style.display = show ? 'flex' : 'none';
466
  }
 
467
  }
468
 
469
  // --- VOICE & VIDEO ---
470
  function toggleListening() {
471
  if (!recognition) {
 
472
  addMessageToUI('error', 'Speech recognition is not supported in this browser.');
473
  return;
474
  }
475
 
 
476
  if (isListening) {
477
  recognition.stop();
478
  } else {
@@ -487,6 +520,7 @@ document.addEventListener('DOMContentLoaded', () => {
487
  }
488
 
489
  if (text && text.trim() !== '') {
 
490
  const utterance = new SpeechSynthesisUtterance(text);
491
 
492
  // Try to find a female voice
@@ -498,7 +532,10 @@ document.addEventListener('DOMContentLoaded', () => {
498
  voice.name.toLowerCase().includes('hazel')
499
  );
500
 
501
- if (femaleVoice) utterance.voice = femaleVoice;
 
 
 
502
 
503
  // Apply voice settings
504
  if (pitchSlider) utterance.pitch = parseFloat(pitchSlider.value);
@@ -513,13 +550,14 @@ document.addEventListener('DOMContentLoaded', () => {
513
  }
514
 
515
  async function startCamera() {
 
516
  try {
517
  videoStream = await navigator.mediaDevices.getUserMedia({
518
  video: { width: 640, height: 480 },
519
  audio: false
520
  });
521
  if (videoFeed) videoFeed.srcObject = videoStream;
522
- addMessageToUI('system', 'Camera activated for image capture');
523
  } catch (err) {
524
  console.error("Error accessing camera:", err);
525
  addMessageToUI('error', 'Could not access the camera. Please grant permission and try again.');
@@ -528,15 +566,17 @@ document.addEventListener('DOMContentLoaded', () => {
528
  }
529
 
530
  function stopCamera() {
 
531
  if (videoStream) {
532
  videoStream.getTracks().forEach(track => track.stop());
533
  if (videoFeed) videoFeed.srcObject = null;
534
  videoStream = null;
535
- addMessageToUI('system', 'Camera deactivated');
536
  }
537
  }
538
 
539
  function handleImageRequest(message, category) {
 
540
  addMessageToUI('ai', message);
541
  if (currentMode !== 'video') {
542
  addMessageToUI('ai', "Please switch to Video mode to capture an image, or use the file upload option in Text mode.");
@@ -546,12 +586,15 @@ document.addEventListener('DOMContentLoaded', () => {
546
  }
547
 
548
  async function captureAndSendImage() {
 
549
  if (!videoStream || !sessionId) {
 
550
  addMessageToUI('error', 'Cannot capture image. Video stream or session is not active.');
551
  return;
552
  }
553
 
554
  if (!videoFeed || !canvas) {
 
555
  addMessageToUI('error', 'Video capture elements not found.');
556
  return;
557
  }
@@ -566,16 +609,17 @@ document.addEventListener('DOMContentLoaded', () => {
566
 
567
  canvas.toBlob(async (blob) => {
568
  if (!blob) {
 
569
  addMessageToUI('error', 'Failed to capture image.');
570
  return;
571
  }
572
 
 
573
  const formData = new FormData();
574
  formData.append('session_id', sessionId);
575
  formData.append('photo', blob, 'capture.jpg');
576
 
577
  showLoading(true);
578
- addMessageToUI('system', 'Processing captured image...');
579
 
580
  try {
581
  const response = await fetch(`${API_BASE_URL}/process_with_image`, {
@@ -584,6 +628,7 @@ document.addEventListener('DOMContentLoaded', () => {
584
  });
585
 
586
  const data = await response.json();
 
587
 
588
  if (!response.ok) {
589
  throw new Error(data.error || 'Image processing failed');
@@ -621,6 +666,7 @@ document.addEventListener('DOMContentLoaded', () => {
621
  if (history && chatBox) {
622
  chatBox.innerHTML = history;
623
  chatBox.scrollTop = chatBox.scrollHeight;
 
624
  }
625
  } catch (error) {
626
  console.warn('Could not load chat history:', error);
@@ -631,13 +677,13 @@ document.addEventListener('DOMContentLoaded', () => {
631
  if (chatBox) {
632
  chatBox.innerHTML = '';
633
  localStorage.removeItem('medibotChatHistory');
634
- addMessageToUI('system', 'Chat history cleared');
635
  }
636
  }
637
 
638
  // --- UTILITY FUNCTIONS ---
639
  function downloadKnowledgeBase() {
640
- addMessageToUI('system', 'Use the upload endpoints to add knowledge base files to the system.');
641
  }
642
 
643
  // --- EXPOSE FUNCTIONS TO GLOBAL SCOPE (for debugging) ---
 
37
  let systemStatus = 'disconnected';
38
 
39
  // Updated API Base URL for local development
40
+ const API_BASE_URL = 'http://localhost:5000';
41
 
42
  // Speech Recognition (STT) Setup
43
  const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
 
57
 
58
  // --- INITIALIZATION ---
59
  function initializeApp() {
60
+ console.log('MediBot initializing...');
61
  checkSystemHealth();
62
  loadHistory();
63
  setupEventListeners();
64
  updateStatusIndicator('ready', 'Ready');
65
  addMessageToUI('ai', 'Hello! I am your MediBot Assistant. I can help you with medical information, analyze images of medicines, and answer questions about diseases. How can I assist you today?');
66
+ console.log('MediBot initialization complete');
67
  }
68
 
69
  async function checkSystemHealth() {
70
+ console.log('Checking system health...');
71
  try {
72
  const response = await fetch(`${API_BASE_URL}/health`);
73
  const data = await response.json();
 
77
  updateStatusIndicator('connected', 'System Ready');
78
  console.log('System health check passed:', data);
79
 
80
+ // Display system info only if there are loaded files
81
  if (data.disease_fact_sheets > 0 || data.medicine_knowledge_files > 0) {
82
+ console.log(`System loaded with ${data.disease_fact_sheets} disease fact sheets and ${data.medicine_knowledge_files} medicine knowledge files`);
 
83
  }
84
  } else {
85
  throw new Error('System health check failed');
 
94
 
95
  async function startNewSession() {
96
  if (systemStatus !== 'connected') {
97
+ console.error('Cannot start session - system not connected');
98
  addMessageToUI('error', 'System is not connected. Please refresh the page and try again.');
99
  return false;
100
  }
101
 
102
+ console.log('Starting new session...');
103
  updateStatusIndicator('connecting', 'Starting session...');
104
  try {
105
  const response = await fetch(`${API_BASE_URL}/start_session`, {
 
130
 
131
  // --- EVENT LISTENERS ---
132
  function setupEventListeners() {
133
+ console.log('Setting up event listeners...');
134
+
135
  // Text input handling
136
  if (sendBtn) sendBtn.addEventListener('click', handleTextInput);
137
  if (textInput) {
 
153
  if (continuousToggle) {
154
  continuousToggle.addEventListener('change', (e) => {
155
  isContinuousMode = e.target.checked;
156
+ console.log('Continuous mode toggled:', isContinuousMode);
157
  if (recognition) {
158
  recognition.continuous = isContinuousMode;
159
  }
 
169
  // Speech recognition events
170
  if (recognition) {
171
  recognition.onstart = () => {
172
+ console.log('Speech recognition started');
173
  isListening = true;
174
  if (micBtn) {
175
  micBtn.classList.add('listening');
 
179
  };
180
 
181
  recognition.onend = () => {
182
+ console.log('Speech recognition ended');
183
  isListening = false;
184
  if (micBtn) {
185
  micBtn.classList.remove('listening');
 
188
  updateStatusIndicator('connected', 'Session Active');
189
 
190
  if (isContinuousMode && currentMode !== 'text') {
191
+ console.log('Restarting speech recognition in continuous mode');
192
  setTimeout(() => recognition.start(), 1000);
193
  }
194
  };
195
 
196
  recognition.onresult = (event) => {
197
  const transcript = event.results[event.results.length - 1][0].transcript.trim();
198
+ console.log('Speech recognition result:', transcript);
199
  if (textInput) textInput.value = transcript;
200
  if (transcript) processUserQuery(transcript);
201
  };
 
210
  // Image capture modal
211
  if (closeModalBtn) {
212
  closeModalBtn.addEventListener('click', () => {
213
+ console.log('Image capture modal closed, starting capture...');
214
  if (imageModal) imageModal.classList.add('hidden');
 
215
  setTimeout(captureAndSendImage, 2000);
216
  });
217
  }
218
 
219
  // System health check interval
220
  setInterval(checkSystemHealth, 30000); // Check every 30 seconds
221
+ console.log('Event listeners setup complete');
222
  }
223
 
224
  // --- CORE LOGIC ---
225
  function handleTextInput() {
226
  const query = textInput ? textInput.value.trim() : '';
227
  if (query) {
228
+ console.log('Processing text input:', query);
229
  processUserQuery(query);
230
  if (textInput) textInput.value = '';
231
  }
 
235
  const file = event.target.files[0];
236
  if (!file) return;
237
 
238
+ console.log('File selected for upload:', file.name, file.type, file.size);
239
+
240
  // Validate file type
241
  const allowedTypes = ['image/jpeg', 'image/jpg', 'image/png', 'image/gif', 'image/bmp', 'image/webp'];
242
  if (!allowedTypes.includes(file.type)) {
243
+ console.error('Invalid file type:', file.type);
244
  addMessageToUI('error', 'Please select a valid image file (JPEG, PNG, GIF, BMP, or WebP).');
245
  return;
246
  }
247
 
248
  // Check file size (16MB limit)
249
  if (file.size > 16 * 1024 * 1024) {
250
+ console.error('File size too large:', file.size);
251
  addMessageToUI('error', 'File size too large. Please select an image under 16MB.');
252
  return;
253
  }
 
255
  const query = prompt('Please describe what you want to know about this image:');
256
  if (!query) return;
257
 
258
+ console.log('Processing file upload with query:', query);
259
  await processImageQuery(query, file);
260
  }
261
 
262
  async function processUserQuery(query) {
263
+ console.log('Processing user query:', query);
264
  addMessageToUI('user', query);
265
  showLoading(true);
266
 
 
272
  }
273
 
274
  try {
275
+ console.log('Sending query to API...');
276
  const response = await fetch(`${API_BASE_URL}/process_query`, {
277
  method: 'POST',
278
  headers: {
 
285
  });
286
 
287
  const data = await response.json();
288
+ console.log('API response received:', data);
289
 
290
  if (!response.ok) {
291
  throw new Error(data.error || `Server error: ${response.status}`);
292
  }
293
 
294
  if (data.status === 'image_required') {
295
+ console.log('Image required for query');
296
  handleImageRequest(data.message, data.category);
297
  } else if (data.status === 'success') {
298
+ console.log('Query processed successfully');
299
  handleApiResponse(data.response, data.category);
300
  } else {
301
  throw new Error('Unexpected response format');
 
310
  }
311
 
312
  async function processImageQuery(query, imageFile) {
313
+ console.log('Processing image query:', query, 'with file:', imageFile.name);
314
  addMessageToUI('user', query);
315
  showLoading(true);
316
 
 
321
  }
322
 
323
  try {
324
+ console.log('Sending image to API...');
325
  const formData = new FormData();
326
  formData.append('session_id', sessionId);
327
  formData.append('photo', imageFile);
 
332
  });
333
 
334
  const data = await response.json();
335
+ console.log('Image API response received:', data);
336
 
337
  if (!response.ok) {
338
  throw new Error(data.error || `Server error: ${response.status}`);
339
  }
340
 
341
  if (data.status === 'success') {
342
+ console.log('Image processed successfully');
343
  handleApiResponse(data.response, data.category);
344
  } else {
345
  throw new Error('Unexpected response format');
 
358
 
359
  if (isError) {
360
  message = responseData;
361
+ console.error('API Error:', responseData);
362
  } else {
363
+ console.log('Handling API response:', responseData, 'Category:', category);
364
  // Handle different response formats from the integrated backend
365
  if (typeof responseData === 'string') {
366
  message = responseData;
 
378
 
379
  // Add category info if available
380
  if (category && !isError) {
381
+ console.log('Response category:', category);
 
 
 
382
  }
383
 
384
  // Text-to-speech for voice/video modes
385
  if (!isError && (currentMode === 'voice' || currentMode === 'video')) {
386
+ console.log('Speaking response in', currentMode, 'mode');
387
  speak(message);
388
  }
389
  }
 
413
 
414
  statusIndicator.textContent = `● ${message}`;
415
  statusIndicator.style.color = colors[status] || '#cccccc';
416
+ console.log('Status updated:', status, message);
417
  }
418
 
419
  function switchMode(newMode) {
420
  if (currentMode === newMode) return;
421
 
422
+ console.log('Switching mode from', currentMode, 'to', newMode);
423
+
424
  // Cleanup current mode
425
  if (currentMode === 'video') stopCamera();
426
  if (isListening && recognition) recognition.stop();
 
463
  break;
464
  }
465
 
466
+ console.log('Mode switch completed to:', newMode);
467
  }
468
 
469
  function addMessageToUI(sender, text, category = null) {
 
494
  if (loadingIndicator) {
495
  loadingIndicator.style.display = show ? 'flex' : 'none';
496
  }
497
+ console.log('Loading indicator:', show ? 'shown' : 'hidden');
498
  }
499
 
500
  // --- VOICE & VIDEO ---
501
  function toggleListening() {
502
  if (!recognition) {
503
+ console.error('Speech recognition not supported');
504
  addMessageToUI('error', 'Speech recognition is not supported in this browser.');
505
  return;
506
  }
507
 
508
+ console.log('Toggling listening, current state:', isListening);
509
  if (isListening) {
510
  recognition.stop();
511
  } else {
 
520
  }
521
 
522
  if (text && text.trim() !== '') {
523
+ console.log('Speaking text:', text.substring(0, 50) + '...');
524
  const utterance = new SpeechSynthesisUtterance(text);
525
 
526
  // Try to find a female voice
 
532
  voice.name.toLowerCase().includes('hazel')
533
  );
534
 
535
+ if (femaleVoice) {
536
+ utterance.voice = femaleVoice;
537
+ console.log('Using voice:', femaleVoice.name);
538
+ }
539
 
540
  // Apply voice settings
541
  if (pitchSlider) utterance.pitch = parseFloat(pitchSlider.value);
 
550
  }
551
 
552
  async function startCamera() {
553
+ console.log('Starting camera...');
554
  try {
555
  videoStream = await navigator.mediaDevices.getUserMedia({
556
  video: { width: 640, height: 480 },
557
  audio: false
558
  });
559
  if (videoFeed) videoFeed.srcObject = videoStream;
560
+ console.log('Camera started successfully');
561
  } catch (err) {
562
  console.error("Error accessing camera:", err);
563
  addMessageToUI('error', 'Could not access the camera. Please grant permission and try again.');
 
566
  }
567
 
568
  function stopCamera() {
569
+ console.log('Stopping camera...');
570
  if (videoStream) {
571
  videoStream.getTracks().forEach(track => track.stop());
572
  if (videoFeed) videoFeed.srcObject = null;
573
  videoStream = null;
574
+ console.log('Camera stopped');
575
  }
576
  }
577
 
578
  function handleImageRequest(message, category) {
579
+ console.log('Image request received:', message, 'Category:', category);
580
  addMessageToUI('ai', message);
581
  if (currentMode !== 'video') {
582
  addMessageToUI('ai', "Please switch to Video mode to capture an image, or use the file upload option in Text mode.");
 
586
  }
587
 
588
  async function captureAndSendImage() {
589
+ console.log('Capturing image...');
590
  if (!videoStream || !sessionId) {
591
+ console.error('Cannot capture image - missing video stream or session');
592
  addMessageToUI('error', 'Cannot capture image. Video stream or session is not active.');
593
  return;
594
  }
595
 
596
  if (!videoFeed || !canvas) {
597
+ console.error('Video capture elements not found');
598
  addMessageToUI('error', 'Video capture elements not found.');
599
  return;
600
  }
 
609
 
610
  canvas.toBlob(async (blob) => {
611
  if (!blob) {
612
+ console.error('Failed to create image blob');
613
  addMessageToUI('error', 'Failed to capture image.');
614
  return;
615
  }
616
 
617
+ console.log('Image captured, sending to server...');
618
  const formData = new FormData();
619
  formData.append('session_id', sessionId);
620
  formData.append('photo', blob, 'capture.jpg');
621
 
622
  showLoading(true);
 
623
 
624
  try {
625
  const response = await fetch(`${API_BASE_URL}/process_with_image`, {
 
628
  });
629
 
630
  const data = await response.json();
631
+ console.log('Captured image processed:', data);
632
 
633
  if (!response.ok) {
634
  throw new Error(data.error || 'Image processing failed');
 
666
  if (history && chatBox) {
667
  chatBox.innerHTML = history;
668
  chatBox.scrollTop = chatBox.scrollHeight;
669
+ console.log('Chat history loaded');
670
  }
671
  } catch (error) {
672
  console.warn('Could not load chat history:', error);
 
677
  if (chatBox) {
678
  chatBox.innerHTML = '';
679
  localStorage.removeItem('medibotChatHistory');
680
+ console.log('Chat history cleared');
681
  }
682
  }
683
 
684
  // --- UTILITY FUNCTIONS ---
685
  function downloadKnowledgeBase() {
686
+ console.log('Knowledge base download requested');
687
  }
688
 
689
  // --- EXPOSE FUNCTIONS TO GLOBAL SCOPE (for debugging) ---