willammonster / voice_chat_standalone.html
artecnosomatic's picture
Deploy Willam-viv-peg
6077461
<!DOCTYPE html>
<html>
<head>
<title>Voice Chat with Qwen3-VL</title>
<style>
body { font-family: Arial, sans-serif; max-width: 600px; margin: 50px auto; padding: 20px; }
.status { font-size: 18px; margin: 20px 0; padding: 10px; background: #f0f0f0; border-radius: 5px; }
.chat-box { height: 300px; border: 1px solid #ccc; padding: 10px; overflow-y: scroll; margin: 20px 0; }
.controls { text-align: center; margin: 20px 0; }
button { padding: 10px 20px; margin: 5px; font-size: 16px; border: none; border-radius: 5px; cursor: pointer; }
.listen-btn { background: #4CAF50; color: white; }
.stop-btn { background: #f44336; color: white; }
.mute-btn { background: #2196F3; color: white; }
.muted-btn { background: #ff9800; color: white; }
.clear-btn { background: #9E9E9E; color: white; }
.message { margin: 10px 0; padding: 8px; border-radius: 5px; }
.user-msg { background: #e3f2fd; text-align: right; }
.ai-msg { background: #f3e5f5; }
</style>
</head>
<body>
<h1>🎀 Voice Chat with Dolphin Mistral</h1>
<div class="status" id="status">🎀 Ready - Click Listen to start</div>
<div class="chat-box" id="chatBox"></div>
<div class="controls">
<button id="listenBtn" class="listen-btn" onclick="toggleListening()">🎀 Listen</button>
<button id="muteBtn" class="mute-btn" onclick="toggleMute()">πŸ”Š Unmuted</button>
<button class="clear-btn" onclick="clearChat()">Clear</button>
<button class="clear-btn" onclick="testVoice()">Test Voice</button>
</div>
<script>
let isListening = false;
let isMuted = false;
let recognition;
let synthesis = window.speechSynthesis;
let selectedVoice = null;
if ('webkitSpeechRecognition' in window || 'SpeechRecognition' in window) {
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
recognition = new SpeechRecognition();
recognition.continuous = false;
recognition.interimResults = false;
recognition.lang = 'en-US';
recognition.onstart = () => updateStatus('🎀 Listening...');
recognition.onresult = (event) => {
const text = event.results[0][0].transcript;
addMessage('You', text);
sendToAI(text);
};
recognition.onerror = (event) => {
if (event.error === 'not-allowed') {
updateStatus('❌ Microphone access denied. Please allow microphone access.');
} else {
updateStatus('Speech error: ' + event.error);
}
stopListening();
};
recognition.onend = () => {
if (isListening) updateStatus('Click Listen to speak again');
stopListening();
};
}
function toggleListening() {
isListening ? stopListening() : startListening();
}
function startListening() {
if (!recognition) {
updateStatus('❌ Speech recognition not available');
return;
}
navigator.mediaDevices.getUserMedia({ audio: true })
.then(() => {
isListening = true;
document.getElementById('listenBtn').textContent = '⏹ Stop';
document.getElementById('listenBtn').className = 'stop-btn';
recognition.start();
})
.catch(() => {
updateStatus('❌ Microphone access denied');
});
}
function stopListening() {
isListening = false;
document.getElementById('listenBtn').textContent = '🎀 Listen';
document.getElementById('listenBtn').className = 'listen-btn';
if (recognition) recognition.stop();
updateStatus('βœ… Ready - Click Listen to start');
}
function toggleMute() {
isMuted = !isMuted;
const btn = document.getElementById('muteBtn');
btn.textContent = isMuted ? 'πŸ”‡ Muted' : 'πŸ”Š Unmuted';
btn.className = isMuted ? 'muted-btn' : 'mute-btn';
}
function addMessage(sender, message) {
const chatBox = document.getElementById('chatBox');
const msgDiv = document.createElement('div');
msgDiv.className = 'message ' + (sender === 'You' ? 'user-msg' : 'ai-msg');
msgDiv.innerHTML = `<strong>${sender}:</strong> ${message}`;
chatBox.appendChild(msgDiv);
chatBox.scrollTop = chatBox.scrollHeight;
}
function updateStatus(status) {
document.getElementById('status').textContent = status;
}
function clearChat() {
document.getElementById('chatBox').innerHTML = '';
}
function showVoices() {
const voices = synthesis.getVoices();
const modernVoices = voices.filter(voice =>
voice.lang.startsWith('en') &&
(voice.name.includes('Enhanced') ||
voice.name.includes('Premium') ||
voice.name === 'Samantha' ||
voice.name === 'Alex')
);
let voiceList = 'Modern voices:\n\n';
modernVoices.forEach((voice, index) => {
voiceList += `${index}: ${voice.name}\n`;
});
const choice = prompt(voiceList + '\nEnter voice number:');
if (choice !== null && choice !== '') {
const voiceIndex = parseInt(choice);
if (voiceIndex >= 0 && voiceIndex < modernVoices.length) {
selectedVoice = modernVoices[voiceIndex];
updateStatus(`βœ… Voice: ${selectedVoice.name}`);
}
}
}
function testVoice() {
if (synthesis) {
const utterance = new SpeechSynthesisUtterance('Testing voice output. Can you hear me?');
utterance.rate = 1.1;
utterance.volume = 1.0;
const voices = synthesis.getVoices();
if (selectedVoice) {
utterance.voice = selectedVoice;
} else if (voices.length > 0) {
const englishVoice = voices.find(v => v.lang.startsWith('en'));
if (englishVoice) utterance.voice = englishVoice;
}
utterance.onstart = () => updateStatus('πŸ”Š Testing voice...');
utterance.onend = () => updateStatus('βœ… Voice test complete');
utterance.onerror = (e) => updateStatus('❌ Voice error: ' + e.error);
synthesis.speak(utterance);
} else {
updateStatus('❌ Speech synthesis not available');
}
}
async function sendToAI(text) {
updateStatus('πŸ€– AI thinking...');
try {
console.log('Sending to AI:', text);
const response = await fetch('http://localhost:11434/api/generate', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: 'dolphin-mistral:latest',
prompt: text,
stream: false,
keep_alive: -1
})
});
console.log('Response status:', response.status);
if (!response.ok) {
throw new Error(`HTTP ${response.status}`);
}
const data = await response.json();
console.log('AI response:', data);
if (data.response) {
addMessage('AI', data.response);
if (!isMuted && synthesis) {
// Wait for voices to load
const speakText = () => {
const utterance = new SpeechSynthesisUtterance(data.response);
utterance.rate = 1.1;
utterance.pitch = 1.0;
utterance.volume = 1.0;
const voices = synthesis.getVoices();
console.log('Available voices:', voices.length);
let bestVoice = selectedVoice;
if (!bestVoice && voices.length > 0) {
// Try to find a good English voice
bestVoice = voices.find(voice =>
voice.lang.startsWith('en') &&
(voice.name.includes('Enhanced') ||
voice.name === 'Samantha' ||
voice.name === 'Alex')
) || voices.find(voice => voice.lang.startsWith('en'));
}
if (bestVoice) {
utterance.voice = bestVoice;
console.log('Using voice:', bestVoice.name);
}
utterance.onstart = () => console.log('Speech started');
utterance.onend = () => console.log('Speech ended');
utterance.onerror = (e) => console.error('Speech error:', e);
synthesis.speak(utterance);
};
// Ensure voices are loaded
if (synthesis.getVoices().length === 0) {
synthesis.onvoiceschanged = speakText;
} else {
speakText();
}
}
updateStatus('βœ… Ready - Click Listen to start');
} else {
updateStatus('❌ No response from AI');
}
} catch (error) {
console.error('Error:', error);
updateStatus('❌ Error: ' + error.message + ' - Check console (F12)');
}
}
</script>
</body>
</html>