import { Component, OnDestroy, OnInit, signal, ViewChild, ElementRef, ChangeDetectorRef } from '@angular/core'; import { CommonModule } from '@angular/common'; import { Router, NavigationStart } from '@angular/router'; import { Subscription } from 'rxjs'; import { FormsModule } from '@angular/forms'; import { PyDetectService } from '../services/pydetect.service'; declare global { interface Window { webkitSpeechRecognition?: any; SpeechRecognition?: any; } } type QAResult = { question: string; transcript: string; language: string; avgPitchHz: number | null; avgVolume: number | null; // 0..1 (rough RMS) audioUrl: string; startedAt: number; endedAt: number; skipped?: boolean; }; @Component({ standalone: true, selector: 'app-py-detect', imports: [CommonModule, FormsModule], templateUrl: './py-detect.component.html', styleUrls: ['./py-detect.component.css'] }) export class PyDetectComponent implements OnInit, OnDestroy { // Store body language explanation for UI public bodyLanguageExplanation: string | null = null; public bodyLanguageMeaning: string | null = null; // Fetch explanation for a body language cue from backend public fetchBodyLanguageExplanation(cue: string) { this.bodyLanguageExplanation = null; this.bodyLanguageMeaning = null; this.pyDetectService.bodyLanguageExplain(cue).subscribe({ next: (resp) => { if (resp?.explanation) { this.bodyLanguageExplanation = resp.explanation; } if (resp?.meaning) { this.bodyLanguageMeaning = resp.meaning; } console.log('[PyDetect] Body Language:', { meaning: resp?.meaning, explanation: resp?.explanation }); }, error: () => { this.bodyLanguageExplanation = 'No explanation available.'; this.bodyLanguageMeaning = null; console.warn('[PyDetect] No body language explanation available.'); } }); } // FER emotion result for UI display public ferEmotion: string | null = null; // Face detection score for UI display public faceDetectionScore: number | null = null; // --- Patch: Add missing properties for template and logic --- public currentQuestionIndex: number = -1; public totalQuestions: number = 0; public currentQuestionText: string = ''; public evidenceSummary: string = ''; // Store the truth score for the last submitted answer public truthScore: number | null = null; // Timing & frame streaming additions public questionWindowStartAt: number | null = null; public answerStartAt: number | null = null; public answerEndAt: number | null = null; public answerMode: 'voice' | 'text' | 'mixed' = 'text'; private frameIntervalId: any; private frameStreamingActive: boolean = false; public involvementScore: number | null = null; public involvementCues: string[] = []; public dominantInvestigativeExpression: string | null = null; public behaviorTagDistribution: Record | null = null; public guidanceCommand: string | null = null; // --- Patch: Add missing stub methods for template bindings --- public async speakQuestion(question: string) { // Use TTS to speak the question (stub) await this.speak(question); } public startRecognitionWithRecording(index: number) { // Stub for starting recognition with recording // You may want to start voice recording and speech recognition here } public async navigateToValidationPage() { // Stop video recording and release camera this.stopVideoRecording(); if (this.videoStream) { this.videoStream.getTracks().forEach(t => t.stop()); this.videoStream = undefined; } this.isRecording = false; // Wait for the video to finish processing if needed await this.sleep(500); // Give time for onstop to fire and recordedVideoUrl to be set // Automatically download the recorded video if available if (this.recordedVideoUrl) { const anchor = document.createElement('a'); anchor.href = this.recordedVideoUrl; anchor.download = 'investigation-video.webm'; anchor.style.display = 'none'; document.body.appendChild(anchor); anchor.click(); setTimeout(() => { document.body.removeChild(anchor); // Optionally revoke the object URL after download // URL.revokeObjectURL(this.recordedVideoUrl); }, 100); } // Then navigate to validation page this.router.navigate(['/validationpage']); } public uploadDocument() { // Stub for document upload logic // You may want to handle file upload here } public onEvidenceFileSelect(event: any, type: string) { // Stub for evidence file selection logic // You may want to process selected files here } // Manual answer submission for testing public submitTextAnswer() { if (!this.textAnswer || !this.sessionId || this.currentQuestionIndex < 0 || !this.questions[this.currentQuestionIndex]) { this.infoText = 'Please enter an answer and ensure a question is active.'; return; } // Call backend to submit response this.pyDetectService.submitResponse( this.sessionId, this.textAnswer, this.questions[this.currentQuestionIndex] ).subscribe({ next: async (res) => { // Extract truth score if present this.truthScore = (res && (res.truth_score || res.score)) ? Number(res.truth_score || res.score) : null; this.infoText = 'Answer submitted.' + (this.truthScore !== null ? ` Truth Score: ${this.truthScore}` : ''); this.textAnswer = ''; // Fetch body language explanation for the first involvement cue if (this.involvementCues.length) { this.fetchBodyLanguageExplanation(this.involvementCues[0]); } const response = await this.pyDetectService.askQuestion( this.sessionId, this.crimeType, this.briefDescription ).toPromise(); if (response && response.question) { this.questions.push(response.question); this.currentQuestionIndex++; this.cdr.detectChanges(); await this.speakQuestion(response.question); } else { this.infoText = 'No more questions.'; } }, error: (err) => { this.infoText = 'Error submitting answer.'; } }); } public showDetailsPanel: boolean = false; public metadata: any = null; // Backend-driven session and investigation state sessionId: string = ''; caseData: any = null; briefDescription: string = ''; isSessionStarted: boolean = false; isLoading: boolean = false; currentQuestion: string = ''; textAnswer: string = ''; lastAnalysisResult: any = null; questionCount: number = 0; currentInvestigationStage: string = 'Initial Investigation'; questionNumber: number = 1; cameraActive: boolean = false; voiceRecordingActive: boolean = false; investigationActive: boolean = false; investigationStarted: boolean = false; caseSummary: any = null; processingResponse: boolean = false; videoStatus: string = 'Camera Ready'; ttsEnabled: boolean = false; isListening: boolean = false; speechRecognition: any = null; // Combined answer submission: prefer text box, fallback to transcript public submitCombinedAnswer() { // Accept answer from text box or voice transcript let answerText = (this.textAnswer && this.textAnswer.trim()) ? this.textAnswer.trim() : (this.transcriptSoFar && this.transcriptSoFar.trim()) ? this.transcriptSoFar.trim() : ''; if (!answerText || !this.sessionId || !this.questions[this.currentQuestionIndex]) { this.infoText = 'Please provide your answer before submitting.'; return; } this.stopAudioRecording(); this.infoText = 'Submitting answer...'; this.textAnswer = ''; this.transcriptSoFar = ''; const endTs = Date.now(); this.answerEndAt = endTs; if (!this.answerStartAt) this.answerStartAt = this.questionWindowStartAt || endTs; const durationMs = this.answerEndAt - this.answerStartAt; this.stopFrameStreaming(); this.pyDetectService.submitResponse( this.sessionId, answerText, this.questions[this.currentQuestionIndex], { answer_start_at: this.answerStartAt, answer_end_at: this.answerEndAt, duration_ms: durationMs, mode: this.answerMode } ).subscribe({ next: async (res) => { // Extract truth score if present this.truthScore = (res && (res.truth_score || res.score)) ? Number(res.truth_score || res.score) : null; this.infoText = 'Answer submitted.' + (this.truthScore !== null ? ` Truth Score: ${this.truthScore}` : ''); // Pull involvement metrics this.fetchLatestInvolvement(); // Fetch next question from backend const response = await this.pyDetectService.askQuestion( this.sessionId, this.crimeType, this.briefDescription ).toPromise(); if (response && response.question) { this.questions.push(response.question); this.currentQuestionIndex++; this.questionNumber = this.currentQuestionIndex + 1; this.cdr.detectChanges(); // await this.startCamera(); await this.startVideoRecording(); await this.speakQuestion(response.question); // Restart window for next question this.startQuestionWindow(); // Reset answer timing this.answerStartAt = null; this.answerEndAt = null; this.answerMode = 'text'; } else { this.infoText = 'No more questions.'; this.showSummary = true; } }, error: () => { this.infoText = 'Error submitting answer.'; } }); } public stopAudioRecording() { if (this.mediaRecorder && this.mediaRecorder.state !== 'inactive') { this.mediaRecorder.stop(); // Show the transcribed answer in the text box this.textAnswer = this.transcriptSoFar; this.infoText = 'Voice recording stopped.'; // Stop speech recognition when recording stops if (this.recognition) { try { this.recognition.stop(); } catch { } } } } speechSynthesis: any = null; voiceSupported: boolean = false; microphoneSupported: boolean = false; microphonePermissionDenied: boolean = false; permissionStatus: string = 'unknown'; // ---- TTS active flag ---- private isActive = false; // ---- Q/A data ---- // log: QAResult[] = []; log: QAResult[] = []; // ---- Constructor with Router Injection ---- private routerSubscription?: Subscription; constructor( private router: Router, private cdr: ChangeDetectorRef, private pyDetectService: PyDetectService ) { // Cancel TTS on any navigation away this.routerSubscription = this.router.events.subscribe(event => { if (event instanceof NavigationStart) { if (window.speechSynthesis) { window.speechSynthesis.cancel(); } } }); } // ---- Recording/analysis handles ---- private mediaStream?: MediaStream; private mediaRecorder?: MediaRecorder; private audioChunks: Blob[] = []; private audioCtx?: AudioContext; private analyser?: AnalyserNode; private sourceNode?: MediaStreamAudioSourceNode; private pitchSamples: number[] = []; private volumeSamples: number[] = []; private analyserBuffer: Float32Array = new Float32Array(2048); private analyserTimer?: any; // ---- Speech Recognition ---- private recognition?: any; // webkitSpeechRecognition private transcriptSoFar = ''; private detectedLang = 'auto'; // ---- Settings ---- private maxAnswerMs = 10_000; // per answer recording window private silenceTimeout?: any; // Declare silenceTimeout here private analyserWindowMs = 100; // Declare analyserWindowMs property // Example question source (replace with API call when ready) // Remove legacy seedQuestions public questions: string[] = []; // Button state signals // Remove legacy button state signals // Add missing public methods and properties for template binding // videoStatus already declared above for backend workflow public videoStream?: MediaStream; @ViewChild('videoElement', { static: false }) videoElement?: ElementRef; public videoChunks: Blob[] = []; public videoAnswers: Blob[] = []; public videoRecorder?: MediaRecorder; public recordedVideoUrl: string = ''; // UI properties for template caseId: string = ''; crimeType: string = ''; dateTime: string = ''; location: string = ''; suspectName: string = ''; statusText: string = ''; investigationOfficer: string = ''; progress: number = 0; progressStage: string = ''; sessionTime: string = ''; isRecording: boolean = false; isProcessing: boolean = false; transcriptLines: string[] = []; showSummary: boolean = false; summaryData: { question: string; answer: string; duration: number }[] = []; // Evidence panel state and placeholder data public showEvidencePanel: boolean = false; public showSummaryPanel: boolean = false; public uploadedDocuments: string[] = ['Report.pdf', 'Statement.docx']; public capturedPhotos: string[] = ['Photo1.jpg', 'Photo2.png']; public previousRecordings: string[] = ['Recording1.webm', 'Recording2.mp3']; // Info text for HUD below question public floatingInfoText: string | null = null; public infoText: string | null = null; // Elapsed and remaining time for recording status public elapsedTime: string = '00:00'; public remainingTime: string = '00:00'; private recordingTimerInterval: any; // Repeat current question using TTS public async repeatQuestion() { await this.speakQuestion(this.currentQuestionText); await this.sleep(1000); this.infoText = 'Recording in progress – Listening to answer.'; } // Navigate back to the homepage navigateHome() { if (window.speechSynthesis) { window.speechSynthesis.cancel(); } this.router.navigate(['/']); } goToInfoPage() { if (window.speechSynthesis) { window.speechSynthesis.cancel(); } this.router.navigate(['/infopage']); } navigateBackToCaseDetails() { if (window.speechSynthesis) { window.speechSynthesis.cancel(); } // Stop any