|
|
import { Component, Inject, OnDestroy, PLATFORM_ID, ChangeDetectorRef } from '@angular/core'; |
|
|
import { ApiService } from './api.service'; |
|
|
import { FormsModule } from '@angular/forms'; |
|
|
import { CommonModule } from '@angular/common'; |
|
|
import { Router, RouterModule } from '@angular/router'; |
|
|
import { isPlatformBrowser } from '@angular/common'; |
|
|
import { ViewChild, ElementRef } from '@angular/core'; |
|
|
import { Renderer2 } from '@angular/core'; |
|
|
import { Subscription } from 'rxjs'; |
|
|
|
|
|
@Component({ |
|
|
selector: 'app-chat', |
|
|
standalone: true, |
|
|
imports: [FormsModule, CommonModule, RouterModule], |
|
|
templateUrl: './chat.component.html', |
|
|
styleUrl: './chat.component.css' |
|
|
}) |
|
|
export class ChatComponent implements OnDestroy { |
|
|
showQuestions: boolean = false; |
|
|
userInput: string = ''; |
|
|
messages: { from: string, text: string, timestamp: string; isPlaying?: boolean }[] = []; |
|
|
isTyping: boolean = false; |
|
|
@ViewChild('chatBox') chatBox!: ElementRef; |
|
|
|
|
|
isLoadingSpeech: boolean = false; |
|
|
selectedVoice: SpeechSynthesisVoice | null = null; |
|
|
errorMessage: string = ""; |
|
|
recognition: any; |
|
|
speechSynthesisInstance: SpeechSynthesisUtterance | null = null; |
|
|
isListening: boolean = false; |
|
|
isProcessingSpeech: boolean = false; |
|
|
isSpeaking: boolean = false; |
|
|
isAudioPaused: boolean = false; |
|
|
isInputValid: boolean = false; |
|
|
suggestions: string[] = []; |
|
|
showMicPopup: boolean = false; |
|
|
isSubmitting: boolean = false; |
|
|
private responseSub?: Subscription; |
|
|
private lastFullAiText: string = ''; |
|
|
|
|
|
ngAfterViewChecked() { |
|
|
setTimeout(() => { |
|
|
this.scrollToBottom(); |
|
|
}, 100); |
|
|
} |
|
|
|
|
|
private scrollToBottom(): void { |
|
|
try { |
|
|
this.chatBox.nativeElement.scrollTop = this.chatBox.nativeElement.scrollHeight; |
|
|
} catch (err) { } |
|
|
} |
|
|
|
|
|
constructor( |
|
|
private apiService: ApiService, |
|
|
private cdr: ChangeDetectorRef, |
|
|
@Inject(PLATFORM_ID,) private platformId: object, |
|
|
private router: Router, |
|
|
private renderer: Renderer2 |
|
|
) { |
|
|
window.speechSynthesis.onvoiceschanged = () => { |
|
|
console.log("Available Voices:", window.speechSynthesis.getVoices()); |
|
|
}; |
|
|
|
|
|
if (isPlatformBrowser(this.platformId)) { |
|
|
const SpeechRecognition = (window as any).SpeechRecognition || (window as any).webkitSpeechRecognition; |
|
|
if (SpeechRecognition) { |
|
|
this.recognition = new SpeechRecognition(); |
|
|
this.recognition.continuous = false; |
|
|
this.recognition.lang = 'en-US'; |
|
|
this.recognition.interimResults = false; |
|
|
|
|
|
this.recognition.onresult = (event: any) => { |
|
|
if (event.results && event.results[0]) { |
|
|
const transcript = event.results[0][0].transcript.trim(); |
|
|
console.log('Recognized speech:', transcript); |
|
|
this.userInput = transcript; |
|
|
this.sendMessage(); |
|
|
this.recognition.stop(); |
|
|
this.isListening = false; |
|
|
} |
|
|
}; |
|
|
|
|
|
this.recognition.onerror = (event: any) => { |
|
|
console.error('Speech Recognition Error:', event.error); |
|
|
this.isProcessingSpeech = false; |
|
|
}; |
|
|
} else { |
|
|
console.warn('Speech Recognition is not supported in this browser.'); |
|
|
} |
|
|
window.addEventListener('beforeunload', this.handleUnload); |
|
|
} |
|
|
} |
|
|
|
|
|
private handleUnload = (): void => { |
|
|
if (window.speechSynthesis) { |
|
|
window.speechSynthesis.cancel(); |
|
|
} |
|
|
}; |
|
|
|
|
|
ngOnDestroy(): void { |
|
|
if (isPlatformBrowser(this.platformId)) { |
|
|
if (window.speechSynthesis) { |
|
|
window.speechSynthesis.cancel(); |
|
|
} |
|
|
window.removeEventListener('beforeunload', this.handleUnload); |
|
|
} |
|
|
} |
|
|
|
|
|
openMicrophonePopup(): void { |
|
|
this.showMicPopup = true; |
|
|
} |
|
|
|
|
|
closeMicrophonePopup(): void { |
|
|
this.showMicPopup = false; |
|
|
} |
|
|
|
|
|
showHardcodedQuestions(): void { |
|
|
setTimeout(() => { |
|
|
this.showQuestions = true; |
|
|
}, 100); |
|
|
} |
|
|
|
|
|
hideHardcodedQuestions(): void { |
|
|
setTimeout(() => { |
|
|
this.showQuestions = false; |
|
|
}, 200); |
|
|
} |
|
|
|
|
|
selectHardcodedQuestion(question: string): void { |
|
|
this.userInput = question; |
|
|
this.showQuestions = false; |
|
|
setTimeout(() => { |
|
|
this.sendMessage(); |
|
|
this.userInput = ''; |
|
|
}, 100); |
|
|
} |
|
|
|
|
|
getSuggestions(): void { |
|
|
if (!this.userInput || this.userInput.trim().length < 1 || this.isSpeaking) { |
|
|
this.suggestions = []; |
|
|
return; |
|
|
} |
|
|
|
|
|
this.apiService.getGrammarSuggestions(this.userInput).subscribe( |
|
|
(response) => { |
|
|
console.log("API Response:", response); |
|
|
if (response.suggestions) { |
|
|
this.suggestions = response.suggestions |
|
|
.filter((s: string) => s && s.trim().length > 0) |
|
|
.map((s: string) => s.replace(/^\d+\.\s*/, "")); |
|
|
} else { |
|
|
this.suggestions = []; |
|
|
} |
|
|
}, |
|
|
(error) => { |
|
|
console.error("Error fetching suggestions:", error); |
|
|
this.suggestions = []; |
|
|
} |
|
|
); |
|
|
} |
|
|
|
|
|
selectSuggestion(suggestion: string): void { |
|
|
this.userInput = suggestion; |
|
|
this.suggestions = []; |
|
|
this.sendMessage(); |
|
|
} |
|
|
|
|
|
sendMessage(inputText?: string): void { |
|
|
const message = inputText ? inputText.trim() : this.userInput.trim(); |
|
|
if (!message) { |
|
|
return; |
|
|
} |
|
|
|
|
|
let sessionId = localStorage.getItem('session_id'); |
|
|
|
|
|
this.messages.push({ from: 'user', text: message, timestamp: new Date().toLocaleTimeString() }); |
|
|
this.userInput = ''; |
|
|
this.isTyping = true; |
|
|
this.cdr.detectChanges(); |
|
|
this.scrollToBottom(); |
|
|
|
|
|
this.responseSub = this.apiService.askQuestion(message, sessionId).subscribe( |
|
|
(response) => { |
|
|
this.isTyping = false; |
|
|
|
|
|
const explanation = (response?.response || 'No explanation available.').trim(); |
|
|
|
|
|
if (response.session_id && !sessionId) { |
|
|
localStorage.setItem('session_id', response.session_id); |
|
|
} |
|
|
|
|
|
const lines: string[] = String(explanation).split('\n'); |
|
|
const formatted: string = lines.map((line: string) => line.trim()).join('\n'); |
|
|
this.messages.push({ |
|
|
from: 'ai', |
|
|
text: formatted, |
|
|
timestamp: new Date().toLocaleTimeString(), |
|
|
}); |
|
|
this.cdr.detectChanges(); |
|
|
this.scrollToBottom(); |
|
|
|
|
|
this.lastFullAiText = formatted; |
|
|
|
|
|
this.speakResponse(explanation); |
|
|
}, |
|
|
(error) => { |
|
|
this.isTyping = false; |
|
|
const errorMessage = 'Error: Could not get a response from the server.'; |
|
|
console.error('API Error:', error); |
|
|
|
|
|
this.messages.push({ |
|
|
from: 'ai', |
|
|
text: errorMessage, |
|
|
timestamp: new Date().toLocaleTimeString(), |
|
|
}); |
|
|
this.cdr.detectChanges(); |
|
|
this.scrollToBottom(); |
|
|
|
|
|
this.lastFullAiText = errorMessage; |
|
|
this.speakResponse(errorMessage); |
|
|
} |
|
|
); |
|
|
} |
|
|
|
|
|
formatStructuredResponse(text: string): string { |
|
|
let formattedText = text |
|
|
.replace(/\n/g, '<br>') |
|
|
.replace(/(\d+)\.\s/g, '<b>$1.</b> ') |
|
|
.replace(/\•\s/g, '✔️ ') |
|
|
.replace(/\-\s/g, '🔹 ') |
|
|
.replace(/(\*\*)(.*?)\1/g, '<b>$2</b>'); |
|
|
return formattedText; |
|
|
} |
|
|
|
|
|
speakResponse(responseText: string): void { |
|
|
if (!responseText) { |
|
|
console.warn('No response text provided for speech.'); |
|
|
return; |
|
|
} |
|
|
|
|
|
console.log('Initiating text-to-speech with response:', responseText); |
|
|
|
|
|
let lastAiMessage = this.messages.slice().reverse().find((msg) => msg.from === 'ai'); |
|
|
|
|
|
if (!lastAiMessage) { |
|
|
lastAiMessage = { from: 'ai', text: '', timestamp: new Date().toLocaleTimeString() }; |
|
|
this.messages.push(lastAiMessage); |
|
|
} else { |
|
|
lastAiMessage.text = ''; |
|
|
} |
|
|
|
|
|
this.cdr.detectChanges(); |
|
|
|
|
|
const words = responseText.split(' '); |
|
|
let currentWordIndex = 0; |
|
|
|
|
|
const speech = new SpeechSynthesisUtterance(); |
|
|
speech.text = responseText; |
|
|
speech.lang = 'en-US'; |
|
|
speech.pitch = 1; |
|
|
speech.rate = 1; |
|
|
this.isSpeaking = true; |
|
|
|
|
|
const voices = window.speechSynthesis.getVoices(); |
|
|
|
|
|
let femaleVoice = voices.find(voice => voice.name === "Microsoft Zira - English (United States)"); |
|
|
|
|
|
if (femaleVoice) { |
|
|
speech.voice = femaleVoice; |
|
|
console.log("Using voice:", femaleVoice.name); |
|
|
} else { |
|
|
console.warn("Microsoft Zira not found, using default."); |
|
|
} |
|
|
|
|
|
speech.onboundary = (event) => { |
|
|
if (event.name === 'word' && currentWordIndex < words.length) { |
|
|
lastAiMessage!.text = words.slice(0, currentWordIndex + 1).join(' '); |
|
|
currentWordIndex++; |
|
|
this.cdr.detectChanges(); |
|
|
} |
|
|
}; |
|
|
|
|
|
speech.onend = () => { |
|
|
console.log('Speech ended.'); |
|
|
this.isSpeaking = false; |
|
|
lastAiMessage!.text = responseText; |
|
|
this.cdr.detectChanges(); |
|
|
}; |
|
|
|
|
|
console.log('Starting speech synthesis...'); |
|
|
window.speechSynthesis.speak(speech); |
|
|
} |
|
|
|
|
|
ngOnInit(): void { |
|
|
if (window.speechSynthesis.onvoiceschanged !== undefined) { |
|
|
window.speechSynthesis.onvoiceschanged = () => { |
|
|
this.loadVoices(); |
|
|
}; |
|
|
} |
|
|
|
|
|
this.loadVoices(); |
|
|
} |
|
|
|
|
|
loadVoices(): void { |
|
|
const voices = window.speechSynthesis.getVoices(); |
|
|
|
|
|
if (!voices.length) { |
|
|
console.warn("No voices available yet, retrying..."); |
|
|
setTimeout(() => this.loadVoices(), 500); |
|
|
return; |
|
|
} |
|
|
|
|
|
console.log("Available Voices:", voices.map(v => v.name)); |
|
|
|
|
|
const preferredVoices = [ |
|
|
"Google UK English Female", |
|
|
"Google US English Female", |
|
|
"Microsoft Zira - English (United States)", |
|
|
"Microsoft Hazel - English (United Kingdom)", |
|
|
"Google en-GB Female", |
|
|
"Google en-US Female" |
|
|
]; |
|
|
|
|
|
for (let voiceName of preferredVoices) { |
|
|
const foundVoice = voices.find(voice => voice.name === voiceName); |
|
|
if (foundVoice) { |
|
|
this.selectedVoice = foundVoice; |
|
|
break; |
|
|
} |
|
|
} |
|
|
|
|
|
if (!this.selectedVoice) { |
|
|
this.selectedVoice = voices.find(voice => voice.name.toLowerCase().includes("female")) || voices[0]; |
|
|
} |
|
|
|
|
|
console.log("Selected AI Voice:", this.selectedVoice?.name); |
|
|
} |
|
|
|
|
|
pauseAudio(): void { |
|
|
if (window.speechSynthesis.speaking && !window.speechSynthesis.paused) { |
|
|
window.speechSynthesis.pause(); |
|
|
this.isAudioPaused = true; |
|
|
console.log('AI Speech Paused'); |
|
|
this.cdr.detectChanges(); |
|
|
} |
|
|
} |
|
|
|
|
|
resumeAudio(): void { |
|
|
if (window.speechSynthesis.paused) { |
|
|
window.speechSynthesis.resume(); |
|
|
this.isAudioPaused = false; |
|
|
console.log('AI Speech Resumed'); |
|
|
this.cdr.detectChanges(); |
|
|
} |
|
|
} |
|
|
|
|
|
muteMicrophone(): void { |
|
|
console.log("Microphone muted"); |
|
|
} |
|
|
|
|
|
startListening(): void { |
|
|
this.isListening = true; |
|
|
this.isProcessingSpeech = false; |
|
|
|
|
|
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) { |
|
|
navigator.mediaDevices |
|
|
.getUserMedia({ audio: true }) |
|
|
.then(() => { |
|
|
if (this.recognition) { |
|
|
console.log('Starting speech recognition...'); |
|
|
this.recognition.start(); |
|
|
|
|
|
this.recognition.onaudiostart = () => console.log('Audio capturing started.'); |
|
|
this.recognition.onspeechstart = () => console.log('Speech has been detected.'); |
|
|
this.recognition.onspeechend = () => console.log('Speech ended, processing...'); |
|
|
this.recognition.onaudioend = () => console.log('Audio capturing ended.'); |
|
|
|
|
|
this.recognition.onresult = (event: any) => { |
|
|
if (event.results && event.results[0]) { |
|
|
const transcript = event.results[0][0].transcript.trim(); |
|
|
console.log('Recognized speech:', transcript); |
|
|
|
|
|
this.userInput = transcript; |
|
|
|
|
|
if (this.userInput.trim()) { |
|
|
console.log('Sending question automatically:', this.userInput); |
|
|
this.sendMessage(); |
|
|
} |
|
|
|
|
|
this.recognition.stop(); |
|
|
this.isListening = false; |
|
|
} |
|
|
}; |
|
|
|
|
|
this.recognition.onnomatch = () => |
|
|
alert('No speech detected. Please try again.'); |
|
|
this.recognition.onend = () => { |
|
|
console.log('Speech recognition service disconnected.'); |
|
|
this.isListening = false; |
|
|
}; |
|
|
this.recognition.onerror = (error: any) => { |
|
|
console.error('Speech Recognition Error:', error); |
|
|
this.isListening = false; |
|
|
if (error.error === 'not-allowed') { |
|
|
alert('Microphone permission denied.'); |
|
|
} else if (error.error === 'no-speech') { |
|
|
alert('No speech detected. Please try speaking clearly.'); |
|
|
} |
|
|
}; |
|
|
} else { |
|
|
alert('Speech Recognition is not supported in this browser.'); |
|
|
} |
|
|
}) |
|
|
.catch((error) => { |
|
|
console.error('Microphone access denied:', error); |
|
|
this.errorMessage = 'Please enable microphone access to use this feature.'; |
|
|
this.isListening = true; |
|
|
}); |
|
|
|
|
|
} else { |
|
|
alert('Microphone access is not supported in this browser.'); |
|
|
} |
|
|
} |
|
|
|
|
|
stopListening(): void { |
|
|
this.isListening = false; |
|
|
if (this.recognition) { |
|
|
this.recognition.stop(); |
|
|
} |
|
|
} |
|
|
|
|
|
toggleAudio(message: { text: string, isPlaying?: boolean }): void { |
|
|
if (this.speechSynthesisInstance && this.speechSynthesisInstance.text === message.text) { |
|
|
if (message.isPlaying) { |
|
|
window.speechSynthesis.pause(); |
|
|
message.isPlaying = false; |
|
|
} else { |
|
|
window.speechSynthesis.resume(); |
|
|
message.isPlaying = true; |
|
|
} |
|
|
} else { |
|
|
if (this.speechSynthesisInstance) { |
|
|
window.speechSynthesis.cancel(); |
|
|
} |
|
|
this.messages.forEach((msg) => (msg.isPlaying = false)); |
|
|
|
|
|
message.isPlaying = true; |
|
|
this.speechSynthesisInstance = new SpeechSynthesisUtterance(message.text); |
|
|
this.speechSynthesisInstance.lang = 'en-US'; |
|
|
this.speechSynthesisInstance.pitch = 1; |
|
|
this.speechSynthesisInstance.rate = 1; |
|
|
|
|
|
this.speechSynthesisInstance.onend = () => { |
|
|
message.isPlaying = false; |
|
|
this.speechSynthesisInstance = null; |
|
|
}; |
|
|
|
|
|
window.speechSynthesis.speak(this.speechSynthesisInstance); |
|
|
} |
|
|
} |
|
|
|
|
|
goToHome() { |
|
|
this.router.navigate(['/home']); |
|
|
} |
|
|
|
|
|
copySuccessIndex: number | null = null; |
|
|
|
|
|
copyToClipboard(text: string, index: number): void { |
|
|
navigator.clipboard.writeText(text).then(() => { |
|
|
this.copySuccessIndex = index; |
|
|
setTimeout(() => { |
|
|
this.copySuccessIndex = null; |
|
|
}, 2000); |
|
|
}).catch(err => { |
|
|
console.error('Failed to copy: ', err); |
|
|
}); |
|
|
} |
|
|
|
|
|
checkInput() { |
|
|
this.isInputValid = this.userInput.trim().length > 0; |
|
|
} |
|
|
|
|
|
handleButtonClick(): void { |
|
|
if (this.userInput.trim().length > 0) { |
|
|
this.showQuestions = false; |
|
|
const messageToSend = this.userInput; |
|
|
this.userInput = ''; |
|
|
this.sendMessage(messageToSend); |
|
|
} else if (this.isSpeaking) { |
|
|
this.pauseAudio(); |
|
|
} else if (this.isAudioPaused) { |
|
|
this.resumeAudio(); |
|
|
} else { |
|
|
this.startListening(); |
|
|
} |
|
|
} |
|
|
|
|
|
getButtonIcon(): string { |
|
|
if (this.userInput.trim().length > 0) { |
|
|
return 'assets/images/chat/send-icon.png'; |
|
|
} else if (this.isSpeaking) { |
|
|
return 'assets/images/chat/pause-icon.png'; |
|
|
} else if (this.isAudioPaused) { |
|
|
return 'assets/images/chat/resume-icon.png'; |
|
|
} else { |
|
|
return 'assets/images/chat/microphone-icon.png'; |
|
|
} |
|
|
} |
|
|
|
|
|
addNewLine(event: KeyboardEvent): void { |
|
|
if (event.key === 'Enter' && event.shiftKey) { |
|
|
event.preventDefault(); |
|
|
this.userInput += '\n'; |
|
|
} |
|
|
} |
|
|
|
|
|
adjustTextareaHeight(event: Event): void { |
|
|
const textarea = event.target as HTMLTextAreaElement; |
|
|
textarea.style.height = 'auto'; |
|
|
textarea.style.height = `${textarea.scrollHeight}px`; |
|
|
} |
|
|
|
|
|
getButtonIconClass(): string { |
|
|
return this.userInput.trim().length > 0 |
|
|
? 'send-icon' |
|
|
: this.isSpeaking |
|
|
? 'pause-icon' |
|
|
: this.isAudioPaused |
|
|
? 'resume-icon' |
|
|
: 'microphone-icon'; |
|
|
} |
|
|
|
|
|
openMicrophoneSettings(): void { |
|
|
const userAgent = navigator.userAgent; |
|
|
|
|
|
if (userAgent.includes("Chrome")) { |
|
|
window.open("chrome://settings/content/microphone", "_blank"); |
|
|
} else if (userAgent.includes("Firefox")) { |
|
|
window.open("about:preferences#privacy", "_blank"); |
|
|
} else if (userAgent.includes("Edge")) { |
|
|
window.open("edge://settings/content/microphone", "_blank"); |
|
|
} else { |
|
|
alert("Please check your browser's settings to enable the microphone."); |
|
|
} |
|
|
} |
|
|
|
|
|
stopSpeaking(): void { |
|
|
try { |
|
|
if (window.speechSynthesis.speaking || window.speechSynthesis.paused) { |
|
|
window.speechSynthesis.cancel(); |
|
|
} |
|
|
} catch { } |
|
|
|
|
|
(this as any).speechSynthesisInstance = null; |
|
|
|
|
|
if (this.responseSub && !this.responseSub.closed) { |
|
|
this.responseSub.unsubscribe(); |
|
|
} |
|
|
|
|
|
this.isSpeaking = false; |
|
|
this.isAudioPaused = false; |
|
|
this.isTyping = false; |
|
|
} |
|
|
|
|
|
handleEnterPress(event: KeyboardEvent): void { |
|
|
if (this.isSpeaking) { |
|
|
event.preventDefault(); |
|
|
return; |
|
|
} |
|
|
if (event.key === 'Enter' && !event.shiftKey) { |
|
|
event.preventDefault(); |
|
|
const text = (this.userInput || '').trim(); |
|
|
if (text) this.sendMessage(); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|