Spaces:
Running
Running
Update index.html
Browse files- index.html +139 -125
index.html
CHANGED
|
@@ -1,136 +1,150 @@
|
|
| 1 |
<!DOCTYPE html>
|
| 2 |
<html lang="en">
|
| 3 |
<head>
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
</head>
|
| 31 |
<body>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
if (voices.length) return resolve(voices);
|
| 48 |
-
synth.onvoiceschanged = () => {
|
| 49 |
-
voices = synth.getVoices();
|
| 50 |
-
resolve(voices);
|
| 51 |
-
};
|
| 52 |
-
});
|
| 53 |
-
}
|
| 54 |
-
|
| 55 |
-
// Unlock Japanese TTS on startup
|
| 56 |
-
async function unlockJapaneseTTS() {
|
| 57 |
-
await loadVoices();
|
| 58 |
-
const testUtter = new SpeechSynthesisUtterance("準備ができました");
|
| 59 |
-
const jpVoice = voices.find(v => v.lang === "ja-JP");
|
| 60 |
-
if (jpVoice) {
|
| 61 |
-
testUtter.voice = jpVoice;
|
| 62 |
-
testUtter.lang = "ja-JP";
|
| 63 |
-
} else {
|
| 64 |
-
testUtter.lang = "ja-JP";
|
| 65 |
-
}
|
| 66 |
-
testUtter.volume = 0;
|
| 67 |
-
synth.speak(testUtter); // Silent utterance to unlock audio in Safari
|
| 68 |
-
statusDiv.textContent = "Ready. Tap the button and speak in Japanese.";
|
| 69 |
-
}
|
| 70 |
-
|
| 71 |
-
// Speak Japanese text
|
| 72 |
-
async function speakJapanese(text) {
|
| 73 |
-
await loadVoices();
|
| 74 |
-
const utterance = new SpeechSynthesisUtterance(text);
|
| 75 |
-
const jpVoice = voices.find(v => v.lang === "ja-JP");
|
| 76 |
-
if (jpVoice) {
|
| 77 |
-
utterance.voice = jpVoice;
|
| 78 |
-
utterance.lang = "ja-JP";
|
| 79 |
-
} else {
|
| 80 |
-
utterance.lang = "ja-JP";
|
| 81 |
-
}
|
| 82 |
-
synth.cancel();
|
| 83 |
-
synth.speak(utterance);
|
| 84 |
-
}
|
| 85 |
-
|
| 86 |
-
// When user clicks the button: request mic, then listen
|
| 87 |
-
async function handleSpeak() {
|
| 88 |
-
try {
|
| 89 |
-
await navigator.mediaDevices.getUserMedia({ audio: true });
|
| 90 |
-
} catch (err) {
|
| 91 |
-
statusDiv.textContent = "Microphone permission denied.";
|
| 92 |
-
return;
|
| 93 |
-
}
|
| 94 |
-
|
| 95 |
-
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
| 96 |
-
if (!SpeechRecognition) {
|
| 97 |
-
statusDiv.textContent = "Speech recognition not supported.";
|
| 98 |
-
return;
|
| 99 |
-
}
|
| 100 |
-
|
| 101 |
-
const recognition = new SpeechRecognition();
|
| 102 |
-
recognition.lang = "ja-JP";
|
| 103 |
-
recognition.interimResults = false;
|
| 104 |
-
|
| 105 |
-
recognition.onstart = () => {
|
| 106 |
-
statusDiv.textContent = "Listening...";
|
| 107 |
-
};
|
| 108 |
-
|
| 109 |
-
recognition.onresult = (event) => {
|
| 110 |
-
const transcript = event.results[0][0].transcript;
|
| 111 |
-
statusDiv.textContent = `You said: "${transcript}"`;
|
| 112 |
-
speakJapanese(transcript);
|
| 113 |
-
};
|
| 114 |
-
|
| 115 |
-
recognition.onerror = (event) => {
|
| 116 |
-
statusDiv.textContent = `Error: ${event.error}`;
|
| 117 |
-
};
|
| 118 |
-
|
| 119 |
-
recognition.onend = () => {
|
| 120 |
-
if (!synth.speaking) {
|
| 121 |
-
statusDiv.textContent += " | Tap again to speak.";
|
| 122 |
}
|
| 123 |
-
};
|
| 124 |
|
| 125 |
-
|
| 126 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 127 |
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
|
| 133 |
-
|
| 134 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 135 |
</body>
|
| 136 |
-
</html>
|
|
|
|
| 1 |
<!DOCTYPE html>
|
| 2 |
<html lang="en">
|
| 3 |
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<title>Voice Command | Chatbot</title>
|
| 6 |
+
<style>
|
| 7 |
+
.chat-container {
|
| 8 |
+
max-width: 400px;
|
| 9 |
+
margin: 20px auto;
|
| 10 |
+
padding: 10px;
|
| 11 |
+
border: 1px solid #ccc;
|
| 12 |
+
border-radius: 5px;
|
| 13 |
+
box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);
|
| 14 |
+
font-family: Arial, sans-serif;
|
| 15 |
+
}
|
| 16 |
+
.user-message {
|
| 17 |
+
background-color: #f0f0f0;
|
| 18 |
+
border-radius: 5px;
|
| 19 |
+
padding: 5px 10px;
|
| 20 |
+
margin: 5px 0;
|
| 21 |
+
text-align: right;
|
| 22 |
+
}
|
| 23 |
+
.bot-message {
|
| 24 |
+
background-color: #d3e9ff;
|
| 25 |
+
border-radius: 5px;
|
| 26 |
+
padding: 5px 10px;
|
| 27 |
+
margin: 5px 0;
|
| 28 |
+
}
|
| 29 |
+
#languageSelector {
|
| 30 |
+
width: 100%;
|
| 31 |
+
margin-top: 10px;
|
| 32 |
+
padding: 5px;
|
| 33 |
+
border-radius: 5px;
|
| 34 |
+
border: 1px solid #ccc;
|
| 35 |
+
}
|
| 36 |
+
</style>
|
| 37 |
</head>
|
| 38 |
<body>
|
| 39 |
+
<div class="chat-container">
|
| 40 |
+
<div id="chat-box"></div>
|
| 41 |
+
|
| 42 |
+
<!-- Language Selection -->
|
| 43 |
+
<select id="languageSelector">
|
| 44 |
+
<option value="en-US">English (US)</option>
|
| 45 |
+
<option value="hi-IN">Hindi (India)</option>
|
| 46 |
+
<option value="es-ES">Spanish (Spain)</option>
|
| 47 |
+
<option value="fr-FR">French (France)</option>
|
| 48 |
+
<option value="de-DE">German (Germany)</option>
|
| 49 |
+
<option value="ar-SA">Arabic (Saudi Arabia)</option>
|
| 50 |
+
<!-- Add more as needed -->
|
| 51 |
+
</select>
|
| 52 |
+
|
| 53 |
+
<div class="speaker" style="display: flex; justify-content: space-between; width: 100%; box-shadow: 0 0 13px #0000003d; border-radius: 5px; margin-top: 10px;">
|
| 54 |
+
<p id="action" style="color: grey; font-weight: 800; padding: 0; padding-left: 2rem;"></p>
|
| 55 |
+
<button id="speech" onclick="runSpeechRecog()" style="border: transparent; padding: 0 0.5rem;">
|
| 56 |
+
Tap to Speak
|
| 57 |
+
</button>
|
| 58 |
+
</div>
|
| 59 |
+
</div>
|
| 60 |
+
|
| 61 |
+
<script>
|
| 62 |
+
let synth = window.speechSynthesis;
|
| 63 |
+
|
| 64 |
+
function runSpeechRecog() {
|
| 65 |
+
const selectedLang = document.getElementById("languageSelector").value;
|
| 66 |
+
const action = document.getElementById('action');
|
| 67 |
+
|
| 68 |
+
let recognition = new webkitSpeechRecognition();
|
| 69 |
+
recognition.lang = selectedLang;
|
| 70 |
+
recognition.interimResults = false;
|
| 71 |
+
recognition.continuous = false;
|
| 72 |
+
|
| 73 |
+
recognition.onstart = () => {
|
| 74 |
+
action.innerHTML = "Listening...";
|
| 75 |
+
};
|
| 76 |
+
|
| 77 |
+
recognition.onresult = (event) => {
|
| 78 |
+
var transcript = event.results[0][0].transcript;
|
| 79 |
+
action.innerHTML = "";
|
| 80 |
+
sendMessage(transcript);
|
| 81 |
+
};
|
| 82 |
|
| 83 |
+
recognition.onerror = (event) => {
|
| 84 |
+
action.innerHTML = "Error: " + event.error;
|
| 85 |
+
};
|
| 86 |
+
|
| 87 |
+
recognition.onend = () => {
|
| 88 |
+
action.innerHTML = "";
|
| 89 |
+
};
|
| 90 |
+
|
| 91 |
+
recognition.start();
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
function sendMessage(message) {
|
| 95 |
+
showUserMessage(message);
|
| 96 |
+
sendToFlaskAPI(message);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
}
|
|
|
|
| 98 |
|
| 99 |
+
function sendToFlaskAPI(message) {
|
| 100 |
+
fetch('/api/process_text', {
|
| 101 |
+
method: 'POST',
|
| 102 |
+
headers: {
|
| 103 |
+
'Content-Type': 'application/json',
|
| 104 |
+
},
|
| 105 |
+
body: JSON.stringify({ text: message })
|
| 106 |
+
})
|
| 107 |
+
.then(response => response.json())
|
| 108 |
+
.then(data => {
|
| 109 |
+
console.log('Response from Flask API:', data);
|
| 110 |
+
handleResponse(data);
|
| 111 |
+
})
|
| 112 |
+
.catch(error => {
|
| 113 |
+
console.error('Error sending data to Flask API:', error);
|
| 114 |
+
});
|
| 115 |
+
}
|
| 116 |
|
| 117 |
+
function handleResponse(data) {
|
| 118 |
+
showBotMessage(data);
|
| 119 |
+
speakResponse(data);
|
| 120 |
+
}
|
| 121 |
|
| 122 |
+
function showUserMessage(message) {
|
| 123 |
+
var chatBox = document.getElementById('chat-box');
|
| 124 |
+
var userMessageHTML = '<div class="user-message">' + message + '</div>';
|
| 125 |
+
chatBox.innerHTML += userMessageHTML;
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
function showBotMessage(message) {
|
| 129 |
+
var chatBox = document.getElementById('chat-box');
|
| 130 |
+
var botMessageHTML = '<div class="bot-message">' + message + '</div>';
|
| 131 |
+
chatBox.innerHTML += botMessageHTML;
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
function speakResponse(response) {
|
| 135 |
+
var utterance = new SpeechSynthesisUtterance(response);
|
| 136 |
+
speechSynthesis.speak(utterance);
|
| 137 |
+
window.addEventListener('beforeunload', function () {
|
| 138 |
+
if (synth.speaking) {
|
| 139 |
+
synth.cancel();
|
| 140 |
+
}
|
| 141 |
+
});
|
| 142 |
+
document.getElementById('speech').addEventListener('click', function () {
|
| 143 |
+
if (synth.speaking) {
|
| 144 |
+
synth.cancel();
|
| 145 |
+
}
|
| 146 |
+
});
|
| 147 |
+
}
|
| 148 |
+
</script>
|
| 149 |
</body>
|
| 150 |
+
</html>
|