interview-bot / static /interview.js
gigswar's picture
Update static/interview.js
c038460 verified
const video = document.getElementById("localVideo");
const messages = document.getElementById("messages");
const startBtn = document.getElementById("startInterview");
async function startCamera() {
try {
const stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: true });
video.srcObject = stream;
} catch (err) {
alert("Could not access camera/mic: " + err.message);
}
}
function appendMessage(sender, text) {
const msg = document.createElement("div");
msg.textContent = `${sender}: ${text}`;
messages.appendChild(msg);
messages.scrollTop = messages.scrollHeight;
}
async function askBot(message) {
const res = await fetch("/ask", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ message })
});
const data = await res.json();
return data.response;
}
function speak(text) {
const utterance = new SpeechSynthesisUtterance(text);
utterance.lang = "en-US";
utterance.pitch = 1.1;
utterance.rate = 1;
speechSynthesis.speak(utterance);
}
function startSpeechRecognition() {
const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
recognition.lang = "en-US";
recognition.interimResults = false;
recognition.maxAlternatives = 1;
recognition.start();
recognition.onresult = async (event) => {
const transcript = event.results[0][0].transcript;
appendMessage("You", transcript);
const reply = await askBot(transcript);
appendMessage("Interviewer", reply);
speak(reply);
};
recognition.onerror = (event) => {
console.error("Speech recognition error:", event.error);
};
}
startBtn.addEventListener("click", async () => {
await startCamera();
const opening = await askBot("Start the interview.");
appendMessage("Interviewer", opening);
speak(opening);
startSpeechRecognition();
});