Spaces:
Sleeping
Sleeping
| document.addEventListener('DOMContentLoaded', () => { | |
| // DOM Elements | |
| const chatBox = document.getElementById('chat-box'); | |
| const textInput = document.getElementById('text-input'); | |
| const sendBtn = document.getElementById('send-btn'); | |
| const micBtn = document.getElementById('mic-btn'); | |
| const loadingIndicator = document.getElementById('loading-indicator'); | |
| const statusIndicator = document.getElementById('status-indicator'); | |
| // Mode Buttons | |
| const modeTextBtn = document.getElementById('mode-text'); | |
| const modeVoiceBtn = document.getElementById('mode-voice'); | |
| const modeVideoBtn = document.getElementById('mode-video'); | |
| // Voice Controls | |
| const voiceControls = document.getElementById('voice-controls'); | |
| const continuousToggle = document.getElementById('continuous-toggle'); | |
| const rateSlider = document.getElementById('rate'); | |
| const pitchSlider = document.getElementById('pitch'); | |
| // Video Elements | |
| const videoFeed = document.getElementById('video-feed'); | |
| const canvas = document.getElementById('canvas'); | |
| const imageModal = document.getElementById('image-capture-modal'); | |
| const closeModalBtn = document.getElementById('close-modal-btn'); | |
| // State Variables | |
| let sessionId = null; // Will be set before each new query | |
| let currentMode = 'text'; // 'text', 'voice', 'video' | |
| let isListening = false; | |
| let isContinuousMode = false; | |
| let videoStream = null; | |
| const API_BASE_URL = 'https://nitinbot001-medbot-backend.hf.space'; | |
| // Speech Recognition (STT) Setup | |
| const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; | |
| let recognition; | |
| if (SpeechRecognition) { | |
| recognition = new SpeechRecognition(); | |
| recognition.continuous = false; | |
| recognition.interimResults = false; | |
| recognition.lang = 'en-US'; // You can change this | |
| } else { | |
| micBtn.disabled = true; | |
| addMessageToUI('error', 'Speech Recognition is not supported in this browser.'); | |
| } | |
| // Speech Synthesis (TTS) Setup | |
| const synth = window.speechSynthesis; | |
| // --- INITIALIZATION --- | |
| function initializeApp() { | |
| loadHistory(); | |
| setupEventListeners(); | |
| statusIndicator.textContent = '● Ready'; | |
| statusIndicator.style.color = '#cccccc'; | |
| addMessageToUI('ai', 'Hello! I am your MediBot Assistant. How can I help you today?'); | |
| } | |
| async function startNewSession() { | |
| statusIndicator.textContent = '● Connecting...'; | |
| statusIndicator.style.color = 'orange'; | |
| try { | |
| const response = await fetch(`${API_BASE_URL}/start_session`, { method: 'POST' }); | |
| if (!response.ok) throw new Error('Failed to start session'); | |
| const data = await response.json(); | |
| sessionId = data.session_id; // Set the session ID for the current transaction | |
| statusIndicator.textContent = '● Connected'; | |
| statusIndicator.style.color = '#76ff03'; | |
| console.log('New transaction session started:', sessionId); | |
| return true; // Indicate success | |
| } catch (error) { | |
| console.error('Session start error:', error); | |
| sessionId = null; // Ensure session ID is null on failure | |
| statusIndicator.textContent = '● Connection Failed'; | |
| statusIndicator.style.color = '#ff4d4d'; | |
| addMessageToUI('error', `Could not connect to the server. ${error.message}`); | |
| return false; // Indicate failure | |
| } | |
| } | |
| // --- EVENT LISTENERS --- | |
| function setupEventListeners() { | |
| sendBtn.addEventListener('click', handleTextInput); | |
| textInput.addEventListener('keydown', (e) => { | |
| if (e.key === 'Enter') handleTextInput(); | |
| }); | |
| // Mode Switching | |
| modeTextBtn.addEventListener('click', () => switchMode('text')); | |
| modeVoiceBtn.addEventListener('click', () => switchMode('voice')); | |
| modeVideoBtn.addEventListener('click', () => switchMode('video')); | |
| // Voice Controls | |
| micBtn.addEventListener('click', toggleListening); | |
| continuousToggle.addEventListener('change', (e) => { | |
| isContinuousMode = e.target.checked; | |
| if (recognition) { | |
| recognition.continuous = isContinuousMode; | |
| } | |
| }); | |
| // STT Events | |
| if (recognition) { | |
| recognition.onstart = () => { | |
| isListening = true; | |
| micBtn.classList.add('listening'); | |
| micBtn.innerHTML = '<i class="fas fa-stop"></i>'; | |
| }; | |
| recognition.onend = () => { | |
| isListening = false; | |
| micBtn.classList.remove('listening'); | |
| micBtn.innerHTML = '<i class="fas fa-microphone"></i>'; | |
| if (isContinuousMode && currentMode !== 'text') { | |
| recognition.start(); // Keep listening in continuous mode | |
| } | |
| }; | |
| recognition.onresult = (event) => { | |
| const transcript = event.results[event.results.length - 1][0].transcript.trim(); | |
| textInput.value = transcript; | |
| processUserQuery(transcript); | |
| }; | |
| recognition.onerror = (event) => { | |
| console.error('Speech recognition error:', event.error); | |
| addMessageToUI('error', `Speech recognition error: ${event.error}`); | |
| }; | |
| } | |
| // Image Capture Modal | |
| closeModalBtn.addEventListener('click', () => { | |
| imageModal.classList.add('hidden'); | |
| addMessageToUI('ai', 'Capturing image in 1 second...'); | |
| setTimeout(captureAndSendImage, 1000); | |
| }); | |
| } | |
| // --- CORE LOGIC --- | |
| function handleTextInput() { | |
| const query = textInput.value.trim(); | |
| if (query) { | |
| processUserQuery(query); | |
| textInput.value = ''; | |
| } | |
| } | |
| async function processUserQuery(query) { | |
| addMessageToUI('user', query); | |
| showLoading(true); | |
| // *** CHANGED LOGIC: Start a new session for every query *** | |
| const sessionStarted = await startNewSession(); | |
| if (!sessionStarted) { | |
| showLoading(false); | |
| return; // Stop if session could not be created | |
| } | |
| try { | |
| const response = await fetch(`${API_BASE_URL}/process_query`, { | |
| method: 'POST', | |
| headers: { 'Content-Type': 'application/json' }, | |
| body: JSON.stringify({ session_id: sessionId, query: query }) | |
| }); | |
| const data = await response.json(); | |
| if (!response.ok) { | |
| throw new Error(data.error || 'API request failed'); | |
| } | |
| if (data.status === 'image_required') { | |
| handleImageRequest(data.message); | |
| } else { | |
| const message = data.response?.response || data.data; | |
| handleApiResponse(message); | |
| } | |
| } catch (error) { | |
| console.error('Error processing query:', error); | |
| handleApiResponse(`Sorry, I encountered an error: ${error.message}`, true); | |
| } finally { | |
| showLoading(false); | |
| } | |
| } | |
| function handleApiResponse(message, isError = false) { | |
| const type = isError ? 'error' : 'ai'; | |
| addMessageToUI(type, message); | |
| if (!isError && (currentMode === 'voice' || currentMode === 'video')) { | |
| speak(message); | |
| } | |
| } | |
| // --- UI & STATE MANAGEMENT --- | |
| function switchMode(newMode) { | |
| if (currentMode === newMode) return; | |
| if (currentMode === 'video') stopCamera(); | |
| if (isListening && recognition) recognition.stop(); | |
| currentMode = newMode; | |
| document.querySelectorAll('.mode-btn').forEach(btn => btn.classList.remove('active')); | |
| document.getElementById(`mode-${newMode}`).classList.add('active'); | |
| if (newMode === 'text') { | |
| document.body.classList.remove('body-video-mode'); | |
| voiceControls.classList.add('hidden'); | |
| micBtn.classList.add('hidden'); | |
| sendBtn.classList.remove('hidden'); | |
| textInput.classList.remove('hidden'); | |
| videoFeed.style.display = 'none'; | |
| } else if (newMode === 'voice') { | |
| document.body.classList.remove('body-video-mode'); | |
| voiceControls.classList.remove('hidden'); | |
| micBtn.classList.remove('hidden'); | |
| sendBtn.classList.add('hidden'); | |
| textInput.classList.add('hidden'); | |
| videoFeed.style.display = 'none'; | |
| } else if (newMode === 'video') { | |
| document.body.classList.add('body-video-mode'); | |
| voiceControls.classList.remove('hidden'); | |
| micBtn.classList.remove('hidden'); | |
| sendBtn.classList.add('hidden'); | |
| textInput.classList.add('hidden'); | |
| videoFeed.style.display = 'block'; | |
| startCamera(); | |
| } | |
| } | |
| function addMessageToUI(sender, text) { | |
| const messageDiv = document.createElement('div'); | |
| messageDiv.classList.add('message', `${sender}-message`); | |
| messageDiv.textContent = text; | |
| chatBox.appendChild(messageDiv); | |
| chatBox.scrollTop = chatBox.scrollHeight; | |
| saveHistory(); | |
| } | |
| function showLoading(show) { | |
| loadingIndicator.style.display = show ? 'flex' : 'none'; | |
| } | |
| // --- VOICE & VIDEO --- | |
| function toggleListening() { | |
| if (!recognition) return; | |
| if (isListening) { | |
| recognition.stop(); | |
| } else { | |
| recognition.start(); | |
| } | |
| } | |
| function speak(text) { | |
| if (synth.speaking) { | |
| console.error('SpeechSynthesis is already speaking.'); | |
| return; | |
| } | |
| if (text !== '') { | |
| const utterance = new SpeechSynthesisUtterance(text); | |
| const femaleVoice = synth.getVoices().find(voice => voice.name.includes('Female') || voice.gender === 'female'); | |
| if(femaleVoice) utterance.voice = femaleVoice; | |
| utterance.pitch = pitchSlider.value; | |
| utterance.rate = rateSlider.value; | |
| synth.speak(utterance); | |
| } | |
| } | |
| async function startCamera() { | |
| try { | |
| videoStream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false }); | |
| videoFeed.srcObject = videoStream; | |
| } catch (err) { | |
| console.error("Error accessing camera: ", err); | |
| addMessageToUI('error', 'Could not access the camera. Please grant permission.'); | |
| switchMode('voice'); | |
| } | |
| } | |
| function stopCamera() { | |
| if (videoStream) { | |
| videoStream.getTracks().forEach(track => track.stop()); | |
| videoFeed.srcObject = null; | |
| videoStream = null; | |
| } | |
| } | |
| function handleImageRequest(message) { | |
| addMessageToUI('ai', message); | |
| if (currentMode !== 'video') { | |
| addMessageToUI('ai', "Please switch to Video mode to provide an image."); | |
| } else { | |
| imageModal.classList.remove('hidden'); | |
| } | |
| } | |
| async function captureAndSendImage() { | |
| if (!videoStream || !sessionId) { | |
| addMessageToUI('error', 'Cannot capture image. Video stream or session is not active.'); | |
| return; | |
| }; | |
| const videoTrack = videoStream.getVideoTracks()[0]; | |
| const settings = videoTrack.getSettings(); | |
| canvas.width = settings.width; | |
| canvas.height = settings.height; | |
| const context = canvas.getContext('2d'); | |
| context.drawImage(videoFeed, 0, 0, canvas.width, canvas.height); | |
| canvas.toBlob(async (blob) => { | |
| const formData = new FormData(); | |
| formData.append('session_id', sessionId); | |
| formData.append('photo', blob, 'capture.jpg'); | |
| showLoading(true); | |
| try { | |
| const response = await fetch(`${API_BASE_URL}/process_with_image`, { | |
| method: 'POST', | |
| body: formData | |
| }); | |
| const data = await response.json(); | |
| if (!response.ok) throw new Error(data.message || 'Image processing failed'); | |
| const message = data.response?.response || data.data; | |
| handleApiResponse(message); | |
| } catch (error) { | |
| console.error('Error sending image:', error); | |
| handleApiResponse(`Sorry, I couldn't process the image: ${error.message}`, true); | |
| } finally { | |
| showLoading(false); | |
| } | |
| }, 'image/jpeg'); | |
| } | |
| // --- LOCAL STORAGE --- | |
| function saveHistory() { | |
| localStorage.setItem('medibotChatHistory', chatBox.innerHTML); | |
| } | |
| function loadHistory() { | |
| const history = localStorage.getItem('medibotChatHistory'); | |
| if (history) { | |
| chatBox.innerHTML = history; | |
| chatBox.scrollTop = chatBox.scrollHeight; | |
| } | |
| } | |
| // --- START THE APP --- | |
| initializeApp(); | |
| }); |