Buckets:
| """ | |
| AI Interview Conduct System - ENHANCED VERSION | |
| Advanced Features: Face Orientation Detection + Eye Tracking + Pose Estimation | |
| Detects: Left/Right/Up/Down face turns + Eye movements + Multiple people | |
| """ | |
| from flask import Flask, Response, request, jsonify, render_template_string | |
| import cv2 | |
| import numpy as np | |
| from datetime import datetime | |
| import threading | |
| import queue | |
| import os | |
| import re | |
| import time | |
| import speech_recognition as sr | |
| import google.generativeai as genai | |
| from werkzeug.utils import secure_filename | |
| app = Flask(__name__) | |
| app.config['UPLOAD_FOLDER'] = '/tmp/uploads' | |
| app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 | |
| os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True) | |
| # ========== CONFIGURE YOUR API KEY HERE ========== | |
| GEMINI_API_KEY = "AIzaSyDXyaUnpoOgwI2RM85mxIz1gIrTtcKt3Ik" # ← PASTE YOUR GEMINI API KEY HERE | |
| # ================================================ | |
| genai.configure(api_key=GEMINI_API_KEY) | |
| # Global interview state | |
| interview_state = { | |
| 'questions': [], | |
| 'current_question_index': 0, | |
| 'answers': [], | |
| 'scores': [], | |
| 'total_score': 0, | |
| 'cheating_alerts': 0, | |
| 'face_turn_alerts': 0, | |
| 'multiple_face_alerts': 0, | |
| 'no_face_alerts': 0, | |
| 'interview_started': False, | |
| 'interview_completed': False, | |
| 'violations': [] | |
| } | |
| class AudioPlayer: | |
| """Audio alert system""" | |
| def __init__(self): | |
| self.sound_queue = queue.Queue() | |
| self.thread = threading.Thread(target=self._play_sounds, daemon=True) | |
| self.thread.start() | |
| def _play_sounds(self): | |
| while True: | |
| try: | |
| sound_type = self.sound_queue.get(timeout=1) | |
| os.system('beep -f 800 -l 100 2>/dev/null || echo -e "\a"') | |
| except queue.Empty: | |
| continue | |
| def play_warning(self): | |
| if self.sound_queue.qsize() < 3: | |
| self.sound_queue.put("warning") | |
| class AdvancedEyeDetector: | |
| """ | |
| Advanced eye tracking and face orientation detection | |
| Features: | |
| - Face pose estimation (yaw, pitch, roll) | |
| - Multiple face detection | |
| - Eye gaze tracking | |
| - Head position monitoring | |
| """ | |
| def __init__(self): | |
| try: | |
| # Load cascades | |
| self.face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') | |
| self.eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml') | |
| self.profile_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_profileface.xml') | |
| # Camera setup | |
| self.cap = cv2.VideoCapture(0) | |
| self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) | |
| self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) | |
| self.cap.set(cv2.CAP_PROP_FPS, 30) | |
| if not self.cap.isOpened(): | |
| raise Exception("Camera not available") | |
| # Detection parameters | |
| self.audio_player = AudioPlayer() | |
| self.eye_positions = [] | |
| self.face_positions = [] | |
| self.max_history = 15 | |
| # Thresholds | |
| self.face_turn_threshold = 80 # pixels from center | |
| self.vertical_face_threshold = 60 # pixels from center | |
| self.eye_center_threshold = 0.28 | |
| self.eye_vertical_threshold = 0.25 | |
| # Alert management | |
| self.last_alert_time = 0 | |
| self.alert_cooldown = 1.5 | |
| self.cheating_detected = False | |
| self.current_violation = None | |
| # Face tracking | |
| self.baseline_face_center = None | |
| self.calibration_frames = 0 | |
| self.calibrated = False | |
| # State tracking | |
| self.no_face_counter = 0 | |
| self.multiple_face_counter = 0 | |
| self.face_turn_counter = 0 | |
| self.camera_available = True | |
| print("✅ Advanced Eye Detector initialized") | |
| except Exception as e: | |
| print(f"⚠️ Camera error: {e}") | |
| self.camera_available = False | |
| def calibrate_baseline(self, face_center): | |
| """Calibrate the baseline face position during first 30 frames""" | |
| if not self.calibrated: | |
| if self.baseline_face_center is None: | |
| self.baseline_face_center = np.array(face_center, dtype=float) | |
| else: | |
| # Moving average for stable baseline | |
| alpha = 0.1 | |
| self.baseline_face_center = (1 - alpha) * self.baseline_face_center + alpha * np.array(face_center) | |
| self.calibration_frames += 1 | |
| if self.calibration_frames >= 30: | |
| self.calibrated = True | |
| print(f"✅ Calibrated baseline face position: {self.baseline_face_center}") | |
| def detect_faces_and_orientation(self, frame, gray): | |
| """ | |
| Detect faces and determine orientation | |
| Returns: faces, profile_faces, face_count, orientation_info | |
| """ | |
| # Detect frontal faces | |
| frontal_faces = self.face_cascade.detectMultiScale( | |
| gray, | |
| scaleFactor=1.1, | |
| minNeighbors=5, | |
| minSize=(80, 80) | |
| ) | |
| # Detect profile faces (left and right) | |
| profile_faces = self.profile_cascade.detectMultiScale( | |
| gray, | |
| scaleFactor=1.1, | |
| minNeighbors=5, | |
| minSize=(80, 80) | |
| ) | |
| # Flip image to detect right profile | |
| gray_flipped = cv2.flip(gray, 1) | |
| profile_faces_right = self.profile_cascade.detectMultiScale( | |
| gray_flipped, | |
| scaleFactor=1.1, | |
| minNeighbors=5, | |
| minSize=(80, 80) | |
| ) | |
| total_faces = len(frontal_faces) + len(profile_faces) + len(profile_faces_right) | |
| orientation = "FRONTAL" | |
| if len(profile_faces) > 0: | |
| orientation = "LEFT_PROFILE" | |
| elif len(profile_faces_right) > 0: | |
| orientation = "RIGHT_PROFILE" | |
| return frontal_faces, profile_faces, profile_faces_right, total_faces, orientation | |
| def detect_face_position(self, face, frame_center): | |
| """ | |
| Detect if face has moved significantly from baseline | |
| Returns: (is_suspicious, direction, deviation) | |
| """ | |
| if not self.calibrated: | |
| return False, "CALIBRATING", 0 | |
| x, y, w, h = face | |
| face_center = np.array([x + w//2, y + h//2]) | |
| # Calculate deviation from baseline | |
| deviation = face_center - self.baseline_face_center | |
| horizontal_dev = deviation[0] | |
| vertical_dev = deviation[1] | |
| # Determine direction | |
| direction = "CENTER" | |
| is_suspicious = False | |
| if abs(horizontal_dev) > self.face_turn_threshold: | |
| if horizontal_dev > 0: | |
| direction = "TURNED_RIGHT" | |
| else: | |
| direction = "TURNED_LEFT" | |
| is_suspicious = True | |
| if abs(vertical_dev) > self.vertical_face_threshold: | |
| if vertical_dev > 0: | |
| direction = "LOOKING_DOWN" if direction == "CENTER" else direction + "+DOWN" | |
| else: | |
| direction = "LOOKING_UP" if direction == "CENTER" else direction + "+UP" | |
| is_suspicious = True | |
| total_deviation = np.linalg.norm(deviation) | |
| return is_suspicious, direction, total_deviation | |
| def detect_eyes_in_face(self, gray_frame, face_region): | |
| """Detect eyes within a face region""" | |
| x, y, w, h = face_region | |
| roi_gray = gray_frame[y:y+h, x:x+w] | |
| eyes = self.eye_cascade.detectMultiScale( | |
| roi_gray, | |
| scaleFactor=1.1, | |
| minNeighbors=5, | |
| minSize=(20, 20), | |
| maxSize=(80, 80) | |
| ) | |
| # Convert to frame coordinates | |
| eyes_in_frame = [] | |
| for (ex, ey, ew, eh) in eyes: | |
| eyes_in_frame.append((x + ex, y + ey, ew, eh)) | |
| return eyes_in_frame | |
| def detect_pupil_in_eye(self, gray_frame, eye_region): | |
| """Detect pupil position in eye region""" | |
| x, y, w, h = eye_region | |
| eye_img = gray_frame[y:y+h, x:x+w] | |
| eye_img = cv2.GaussianBlur(eye_img, (7, 7), 0) | |
| _, threshold = cv2.threshold(eye_img, 50, 255, cv2.THRESH_BINARY_INV) | |
| contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) | |
| if contours: | |
| largest_contour = max(contours, key=cv2.contourArea) | |
| moments = cv2.moments(largest_contour) | |
| if moments['m00'] != 0: | |
| cx = int(moments['m10'] / moments['m00']) | |
| cy = int(moments['m01'] / moments['m00']) | |
| rel_x = cx / w if w > 0 else 0.5 | |
| rel_y = cy / h if h > 0 else 0.5 | |
| return (x + cx, y + cy, rel_x, rel_y) | |
| return (x + w//2, y + h//2, 0.5, 0.5) | |
| def analyze_eye_gaze(self, eyes, gray_frame): | |
| """Analyze eye gaze direction""" | |
| if len(eyes) < 2: | |
| return [], "NO_EYES", False | |
| pupil_positions = [] | |
| for eye in eyes[:2]: | |
| pupil_pos = self.detect_pupil_in_eye(gray_frame, eye) | |
| pupil_positions.append(pupil_pos) | |
| # Calculate average relative position | |
| avg_rel_x = sum(p[2] for p in pupil_positions) / len(pupil_positions) | |
| avg_rel_y = sum(p[3] for p in pupil_positions) / len(pupil_positions) | |
| # Determine gaze direction | |
| gaze_direction = "CENTER" | |
| is_suspicious = False | |
| if avg_rel_y < (0.5 - self.eye_vertical_threshold): | |
| gaze_direction = "EYES_UP" | |
| is_suspicious = True | |
| elif avg_rel_x < (0.5 - self.eye_center_threshold): | |
| gaze_direction = "EYES_LEFT" | |
| is_suspicious = True | |
| elif avg_rel_x > (0.5 + self.eye_center_threshold): | |
| gaze_direction = "EYES_RIGHT" | |
| is_suspicious = True | |
| elif avg_rel_y > (0.5 + self.eye_vertical_threshold): | |
| gaze_direction = "EYES_DOWN" | |
| is_suspicious = True | |
| return pupil_positions, gaze_direction, is_suspicious | |
| def process_violations(self, violation_type, details): | |
| """Process and log violations""" | |
| global interview_state | |
| current_time = datetime.now().timestamp() | |
| if (current_time - self.last_alert_time) > self.alert_cooldown: | |
| self.audio_player.play_warning() | |
| self.cheating_detected = True | |
| self.current_violation = f"{violation_type}: {details}" | |
| self.last_alert_time = current_time | |
| # Update global state | |
| interview_state['cheating_alerts'] += 1 | |
| if violation_type == "FACE_TURN": | |
| interview_state['face_turn_alerts'] += 1 | |
| elif violation_type == "MULTIPLE_FACES": | |
| interview_state['multiple_face_alerts'] += 1 | |
| elif violation_type == "NO_FACE": | |
| interview_state['no_face_alerts'] += 1 | |
| # Log violation | |
| interview_state['violations'].append({ | |
| 'timestamp': datetime.now().isoformat(), | |
| 'type': violation_type, | |
| 'details': details | |
| }) | |
| # Reset cheating flag after cooldown | |
| if self.cheating_detected and (current_time - self.last_alert_time) > 2.0: | |
| self.cheating_detected = False | |
| self.current_violation = None | |
| def draw_advanced_overlay(self, frame, faces, eyes, gray_frame, orientation, face_count): | |
| """Draw comprehensive detection overlay""" | |
| frame_height, frame_width = frame.shape[:2] | |
| frame_center = (frame_width // 2, frame_height // 2) | |
| # Draw calibration status | |
| if not self.calibrated: | |
| cv2.putText(frame, f'CALIBRATING... {self.calibration_frames}/30', | |
| (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2) | |
| if len(faces) > 0: | |
| self.calibrate_baseline((faces[0][0] + faces[0][2]//2, faces[0][1] + faces[0][3]//2)) | |
| # Initialize status | |
| status_color = (0, 255, 0) # Green | |
| status_text = "✓ NORMAL" | |
| violation_detected = False | |
| # Check face count | |
| if face_count == 0: | |
| self.no_face_counter += 1 | |
| if self.no_face_counter > 15: # 0.5 seconds at 30fps | |
| status_text = "⚠ NO FACE DETECTED" | |
| status_color = (0, 0, 255) | |
| violation_detected = True | |
| self.process_violations("NO_FACE", "Face not visible") | |
| else: | |
| self.no_face_counter = 0 | |
| if face_count > 1: | |
| self.multiple_face_counter += 1 | |
| if self.multiple_face_counter > 10: | |
| status_text = f"⚠ {face_count} FACES DETECTED" | |
| status_color = (0, 0, 255) | |
| violation_detected = True | |
| self.process_violations("MULTIPLE_FACES", f"{face_count} people detected") | |
| else: | |
| self.multiple_face_counter = 0 | |
| # Draw faces and analyze | |
| for i, (x, y, w, h) in enumerate(faces): | |
| # Face rectangle | |
| face_color = (100, 200, 255) if not violation_detected else (0, 0, 255) | |
| cv2.rectangle(frame, (x, y), (x+w, y+h), face_color, 2) | |
| # Face center | |
| face_center = (x + w//2, y + h//2) | |
| cv2.circle(frame, face_center, 5, (255, 255, 0), -1) | |
| # Check face position | |
| if self.calibrated and i == 0: # Only check primary face | |
| is_suspicious, direction, deviation = self.detect_face_position((x, y, w, h), frame_center) | |
| if is_suspicious: | |
| status_text = f"⚠ FACE {direction}" | |
| status_color = (0, 0, 255) | |
| violation_detected = True | |
| self.process_violations("FACE_TURN", direction) | |
| # Draw baseline reference | |
| if self.baseline_face_center is not None: | |
| baseline_int = tuple(self.baseline_face_center.astype(int)) | |
| cv2.circle(frame, baseline_int, 3, (0, 255, 255), -1) | |
| cv2.line(frame, baseline_int, face_center, (255, 0, 255), 1) | |
| # Show deviation | |
| cv2.putText(frame, f'Dev: {int(deviation)}px', | |
| (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) | |
| # Detect eyes in this face | |
| face_eyes = self.detect_eyes_in_face(gray_frame, (x, y, w, h)) | |
| # Analyze eye gaze | |
| if len(face_eyes) >= 2: | |
| pupil_positions, gaze_direction, gaze_suspicious = self.analyze_eye_gaze(face_eyes, gray_frame) | |
| # Draw eyes | |
| for j, (ex, ey, ew, eh) in enumerate(face_eyes): | |
| eye_color = (0, 255, 0) if not gaze_suspicious else (0, 0, 255) | |
| cv2.rectangle(frame, (ex, ey), (ex+ew, ey+eh), eye_color, 2) | |
| # Draw pupil | |
| if j < len(pupil_positions): | |
| pupil_x, pupil_y = pupil_positions[j][:2] | |
| cv2.circle(frame, (pupil_x, pupil_y), 3, (255, 255, 0), -1) | |
| # Update status for eye movement | |
| if gaze_suspicious and not violation_detected: | |
| status_text = f"⚠ {gaze_direction}" | |
| status_color = (0, 165, 255) # Orange | |
| self.process_violations("EYE_MOVEMENT", gaze_direction) | |
| # Display orientation | |
| cv2.putText(frame, f'Orientation: {orientation}', | |
| (10, frame_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2) | |
| # Display face count | |
| cv2.putText(frame, f'Faces: {face_count}', | |
| (10, frame_height - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2) | |
| # Main status display | |
| cv2.putText(frame, status_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, status_color, 2) | |
| # Warning banner | |
| if self.cheating_detected and self.current_violation: | |
| cv2.rectangle(frame, (0, 50), (frame_width, 100), (0, 0, 255), -1) | |
| cv2.putText(frame, f'⚠ VIOLATION: {self.current_violation}', | |
| (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2) | |
| return frame | |
| def generate_frames(self): | |
| """Generate video frames with advanced detection""" | |
| if not self.camera_available: | |
| while True: | |
| frame = np.zeros((480, 640, 3), dtype=np.uint8) | |
| cv2.putText(frame, 'Camera Not Available', (150, 240), | |
| cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) | |
| ret, buffer = cv2.imencode('.jpg', frame) | |
| yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + buffer.tobytes() + b'\r\n') | |
| time.sleep(0.1) | |
| while True: | |
| ret, frame = self.cap.read() | |
| if not ret: | |
| break | |
| gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) | |
| # Detect faces and orientation | |
| frontal_faces, profile_left, profile_right, face_count, orientation = \ | |
| self.detect_faces_and_orientation(frame, gray) | |
| # Use frontal faces for detailed analysis | |
| all_eyes = [] | |
| if len(frontal_faces) > 0: | |
| for face in frontal_faces: | |
| face_eyes = self.detect_eyes_in_face(gray, face) | |
| all_eyes.extend(face_eyes) | |
| # Draw all detections | |
| frame = self.draw_advanced_overlay( | |
| frame, frontal_faces, all_eyes, gray, orientation, face_count | |
| ) | |
| # Draw profile faces | |
| for (x, y, w, h) in profile_left: | |
| cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 255), 2) | |
| cv2.putText(frame, 'LEFT', (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2) | |
| for (x, y, w, h) in profile_right: | |
| # Flip coordinates back | |
| x_flipped = frame.shape[1] - x - w | |
| cv2.rectangle(frame, (x_flipped, y), (x_flipped+w, y+h), (255, 0, 255), 2) | |
| cv2.putText(frame, 'RIGHT', (x_flipped, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2) | |
| ret, buffer = cv2.imencode('.jpg', frame) | |
| yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + buffer.tobytes() + b'\r\n') | |
| def release(self): | |
| if self.camera_available: | |
| self.cap.release() | |
| detector = None | |
| # PDF Processing | |
| def upload_pdf_to_gemini(pdf_path): | |
| print(f"📤 Uploading: {pdf_path}") | |
| uploaded_file = genai.upload_file(pdf_path) | |
| print(f"✅ Upload complete: {uploaded_file.uri}") | |
| return uploaded_file | |
| def generate_questions_from_pdf(pdf_path, num_questions=5): | |
| uploaded_file = upload_pdf_to_gemini(pdf_path) | |
| prompt = f"""Generate exactly {num_questions} questions from this document. | |
| RULES: | |
| - Output ONLY numbered list from 1 to {num_questions} | |
| - Format: "1. Question text here?" | |
| - Each question on new line with question mark | |
| - NO headings, explanations, or extra text | |
| - Start with "1." end with "{num_questions}." | |
| Generate {num_questions} questions now:""" | |
| model = genai.GenerativeModel("gemini-2.5-flash") | |
| response = model.generate_content([uploaded_file, prompt]) | |
| pattern = r'^\d+[\.\)]\s+(.+?)$' | |
| questions = [] | |
| for line in response.text.strip().split('\n'): | |
| match = re.match(pattern, line.strip()) | |
| if match: | |
| questions.append(match.group(1).strip()) | |
| return questions[:num_questions] | |
| # Speech Recognition | |
| def recognize_speech(timeout=30): | |
| recognizer = sr.Recognizer() | |
| try: | |
| with sr.Microphone() as source: | |
| print("🎤 Adjusting for noise...") | |
| recognizer.adjust_for_ambient_noise(source, duration=1) | |
| print("🎤 Listening...") | |
| audio = recognizer.listen(source, timeout=timeout, phrase_time_limit=30) | |
| try: | |
| text = recognizer.recognize_google(audio) | |
| print(f"✅ Recognized: {text}") | |
| return text | |
| except sr.UnknownValueError: | |
| return "" | |
| except sr.RequestError as e: | |
| print(f"❌ Speech error: {e}") | |
| return "" | |
| except Exception as e: | |
| print(f"❌ Microphone error: {e}") | |
| return "" | |
| # AI Detection | |
| def check_ai_generated(text): | |
| try: | |
| model = genai.GenerativeModel('gemini-2.5-flash') | |
| prompt = f"""Analyze if this text is AI-generated. | |
| Text: {text} | |
| Respond with ONLY a number 0-100: | |
| 0-20: Human | |
| 21-40: Likely human | |
| 41-60: Unclear | |
| 61-80: Likely AI | |
| 81-100: Definitely AI | |
| Number only:""" | |
| response = model.generate_content(prompt) | |
| score = int(re.search(r'\d+', response.text).group()) | |
| return score | |
| except: | |
| return 0 | |
| # Answer Validation | |
| def validate_answer(question, answer): | |
| try: | |
| model = genai.GenerativeModel('gemini-2.5-flash') | |
| prompt = f"""Question: {question} | |
| Answer: {answer} | |
| Respond in exact format: | |
| Verdict: [Correct/Incorrect] | |
| Marks: [0-10]""" | |
| response = model.generate_content(prompt) | |
| text = response.text | |
| verdict_match = re.search(r'Verdict:\s*(Correct|Incorrect)', text, re.IGNORECASE) | |
| marks_match = re.search(r'Marks:\s*(\d+)', text) | |
| return { | |
| 'verdict': verdict_match.group(1) if verdict_match else "Incorrect", | |
| 'marks': int(marks_match.group(1)) if marks_match else 0, | |
| 'explanation': text | |
| } | |
| except Exception as e: | |
| return {'verdict': 'Error', 'marks': 0, 'explanation': str(e)} | |
| # Flask Routes | |
| def index(): | |
| return render_template_string(HTML_TEMPLATE) | |
| def video_feed(): | |
| global detector | |
| if detector is None: | |
| detector = AdvancedEyeDetector() | |
| return Response(detector.generate_frames(), mimetype='multipart/x-mixed-replace; boundary=frame') | |
| def upload_pdf(): | |
| global interview_state | |
| if 'pdf' not in request.files: | |
| return jsonify({'success': False, 'message': 'No PDF uploaded'}) | |
| file = request.files['pdf'] | |
| num_questions = int(request.form.get('num_questions', 5)) | |
| if file.filename == '' or not file.filename.endswith('.pdf'): | |
| return jsonify({'success': False, 'message': 'Invalid file'}) | |
| filename = secure_filename(file.filename) | |
| filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename) | |
| file.save(filepath) | |
| try: | |
| questions = generate_questions_from_pdf(filepath, num_questions) | |
| interview_state.update({ | |
| 'questions': questions, | |
| 'current_question_index': 0, | |
| 'answers': [], | |
| 'scores': [], | |
| 'total_score': 0, | |
| 'cheating_alerts': 0, | |
| 'face_turn_alerts': 0, | |
| 'multiple_face_alerts': 0, | |
| 'no_face_alerts': 0, | |
| 'interview_started': False, | |
| 'interview_completed': False, | |
| 'violations': [] | |
| }) | |
| return jsonify({ | |
| 'success': True, | |
| 'message': f'{len(questions)} questions generated', | |
| 'questions': questions | |
| }) | |
| except Exception as e: | |
| return jsonify({'success': False, 'message': f'Error: {str(e)}'}) | |
| def start_interview(): | |
| global interview_state | |
| interview_state['interview_started'] = True | |
| interview_state['current_question_index'] = 0 | |
| return jsonify({'success': True}) | |
| def get_current_question(): | |
| global interview_state | |
| if interview_state['current_question_index'] < len(interview_state['questions']): | |
| return jsonify({ | |
| 'success': True, | |
| 'question': interview_state['questions'][interview_state['current_question_index']], | |
| 'question_number': interview_state['current_question_index'] + 1, | |
| 'total_questions': len(interview_state['questions']) | |
| }) | |
| return jsonify({'success': False, 'message': 'No more questions'}) | |
| def start_recording(): | |
| answer = recognize_speech(timeout=30) | |
| return jsonify({'success': bool(answer), 'answer': answer}) | |
| def submit_answer(): | |
| global interview_state | |
| data = request.json | |
| answer = data.get('answer', '') | |
| question = interview_state['questions'][interview_state['current_question_index']] | |
| ai_score = check_ai_generated(answer) | |
| if ai_score >= 80: | |
| result = { | |
| 'verdict': 'AI-Generated', | |
| 'marks': 0, | |
| 'explanation': f'AI-generated detected ({ai_score}%)', | |
| 'ai_score': ai_score | |
| } | |
| else: | |
| result = validate_answer(question, answer) | |
| result['ai_score'] = ai_score | |
| interview_state['answers'].append({ | |
| 'question': question, | |
| 'answer': answer, | |
| 'result': result | |
| }) | |
| interview_state['scores'].append(result['marks']) | |
| interview_state['total_score'] += result['marks'] | |
| interview_state['current_question_index'] += 1 | |
| if interview_state['current_question_index'] >= len(interview_state['questions']): | |
| interview_state['interview_completed'] = True | |
| return jsonify({ | |
| 'success': True, | |
| 'result': result, | |
| 'interview_completed': interview_state['interview_completed'] | |
| }) | |
| def get_interview_status(): | |
| return jsonify({ | |
| 'started': interview_state['interview_started'], | |
| 'completed': interview_state['interview_completed'], | |
| 'current_question': interview_state['current_question_index'] + 1, | |
| 'total_questions': len(interview_state['questions']), | |
| 'total_score': interview_state['total_score'], | |
| 'cheating_alerts': interview_state['cheating_alerts'], | |
| 'face_turn_alerts': interview_state['face_turn_alerts'], | |
| 'multiple_face_alerts': interview_state['multiple_face_alerts'], | |
| 'no_face_alerts': interview_state['no_face_alerts'] | |
| }) | |
| def get_results(): | |
| return jsonify({ | |
| 'success': True, | |
| 'total_score': interview_state['total_score'], | |
| 'max_score': len(interview_state['questions']) * 10, | |
| 'cheating_alerts': interview_state['cheating_alerts'], | |
| 'face_turn_alerts': interview_state['face_turn_alerts'], | |
| 'multiple_face_alerts': interview_state['multiple_face_alerts'], | |
| 'no_face_alerts': interview_state['no_face_alerts'], | |
| 'answers': interview_state['answers'], | |
| 'violations': interview_state['violations'] | |
| }) | |
| # HTML Template | |
| HTML_TEMPLATE = '''<!DOCTYPE html> | |
| <html lang="en"> | |
| <head> | |
| <meta charset="UTF-8"> | |
| <meta name="viewport" content="width=device-width, initial-scale=1.0"> | |
| <title>AI Interview System - Enhanced</title> | |
| <link href="https://fonts.googleapis.com/css2?family=Outfit:wght@300;400;600;700;900&family=JetBrains+Mono:wght@400;700&display=swap" rel="stylesheet"> | |
| <style> | |
| * { margin: 0; padding: 0; box-sizing: border-box; } | |
| :root { | |
| --primary: #6366f1; | |
| --primary-dark: #4f46e5; | |
| --secondary: #ec4899; | |
| --success: #10b981; | |
| --warning: #f59e0b; | |
| --danger: #ef4444; | |
| } | |
| body { | |
| font-family: 'Outfit', sans-serif; | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 50%, #f093fb 100%); | |
| min-height: 100vh; | |
| color: #1e293b; | |
| } | |
| body::before { | |
| content: ''; | |
| position: fixed; | |
| inset: 0; | |
| background: | |
| radial-gradient(circle at 20% 30%, rgba(99, 102, 241, 0.1) 0%, transparent 50%), | |
| radial-gradient(circle at 80% 70%, rgba(236, 72, 153, 0.1) 0%, transparent 50%); | |
| pointer-events: none; | |
| } | |
| .container { | |
| max-width: 1400px; | |
| margin: 0 auto; | |
| padding: 20px; | |
| position: relative; | |
| z-index: 1; | |
| } | |
| .header { | |
| background: rgba(255, 255, 255, 0.95); | |
| backdrop-filter: blur(20px); | |
| border-radius: 24px; | |
| padding: 32px; | |
| margin-bottom: 24px; | |
| box-shadow: 0 20px 60px rgba(0, 0, 0, 0.15); | |
| } | |
| h1 { | |
| font-size: 3em; | |
| font-weight: 900; | |
| background: linear-gradient(135deg, var(--primary), var(--secondary)); | |
| -webkit-background-clip: text; | |
| -webkit-text-fill-color: transparent; | |
| margin-bottom: 12px; | |
| letter-spacing: -1px; | |
| } | |
| .subtitle { | |
| font-size: 1.2em; | |
| color: #64748b; | |
| } | |
| .badge { | |
| display: inline-block; | |
| background: linear-gradient(135deg, var(--success), #059669); | |
| color: white; | |
| padding: 6px 14px; | |
| border-radius: 20px; | |
| font-size: 0.85em; | |
| font-weight: 600; | |
| margin-left: 12px; | |
| } | |
| .main-grid { | |
| display: grid; | |
| grid-template-columns: 1fr 380px; | |
| gap: 24px; | |
| } | |
| .card { | |
| background: rgba(255, 255, 255, 0.95); | |
| backdrop-filter: blur(20px); | |
| border-radius: 20px; | |
| padding: 28px; | |
| box-shadow: 0 10px 40px rgba(0, 0, 0, 0.1); | |
| } | |
| .card-title { | |
| font-size: 1.5em; | |
| font-weight: 700; | |
| margin-bottom: 20px; | |
| display: flex; | |
| align-items: center; | |
| gap: 12px; | |
| } | |
| .video-container { | |
| position: relative; | |
| border-radius: 16px; | |
| overflow: hidden; | |
| background: #000; | |
| box-shadow: 0 8px 32px rgba(0, 0, 0, 0.2); | |
| } | |
| #video-stream { | |
| width: 100%; | |
| height: auto; | |
| display: block; | |
| } | |
| .status-badge { | |
| position: absolute; | |
| top: 16px; | |
| right: 16px; | |
| background: var(--danger); | |
| color: white; | |
| padding: 8px 16px; | |
| border-radius: 24px; | |
| font-weight: 600; | |
| font-size: 0.85em; | |
| display: flex; | |
| align-items: center; | |
| gap: 8px; | |
| animation: pulse 2s infinite; | |
| } | |
| @keyframes pulse { | |
| 0%, 100% { opacity: 1; transform: scale(1); } | |
| 50% { opacity: 0.8; transform: scale(1.05); } | |
| } | |
| .live-dot { | |
| width: 8px; | |
| height: 8px; | |
| background: white; | |
| border-radius: 50%; | |
| animation: blink 1s infinite; | |
| } | |
| @keyframes blink { | |
| 0%, 50% { opacity: 1; } | |
| 51%, 100% { opacity: 0.3; } | |
| } | |
| .upload-section { | |
| border: 3px dashed #e2e8f0; | |
| border-radius: 16px; | |
| padding: 32px; | |
| text-align: center; | |
| background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%); | |
| transition: all 0.3s ease; | |
| cursor: pointer; | |
| } | |
| .upload-section:hover { | |
| border-color: var(--primary); | |
| transform: translateY(-2px); | |
| } | |
| .upload-icon { | |
| font-size: 4em; | |
| margin-bottom: 16px; | |
| } | |
| .btn { | |
| background: linear-gradient(135deg, var(--primary), var(--primary-dark)); | |
| color: white; | |
| border: none; | |
| padding: 14px 28px; | |
| border-radius: 12px; | |
| font-size: 1em; | |
| font-weight: 600; | |
| cursor: pointer; | |
| transition: all 0.3s ease; | |
| font-family: 'Outfit', sans-serif; | |
| box-shadow: 0 4px 12px rgba(99, 102, 241, 0.3); | |
| } | |
| .btn:hover { | |
| transform: translateY(-2px); | |
| box-shadow: 0 6px 20px rgba(99, 102, 241, 0.4); | |
| } | |
| .btn-success { | |
| background: linear-gradient(135deg, var(--success), #059669); | |
| } | |
| .btn:disabled { | |
| opacity: 0.5; | |
| cursor: not-allowed; | |
| transform: none; | |
| } | |
| .question-display { | |
| background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%); | |
| padding: 28px; | |
| border-radius: 16px; | |
| margin-bottom: 24px; | |
| border-left: 5px solid var(--primary); | |
| font-size: 1.2em; | |
| font-weight: 500; | |
| min-height: 120px; | |
| display: flex; | |
| align-items: center; | |
| } | |
| .answer-box { | |
| background: white; | |
| border: 2px solid #e2e8f0; | |
| border-radius: 12px; | |
| padding: 16px; | |
| min-height: 120px; | |
| font-family: 'JetBrains Mono', monospace; | |
| font-size: 0.95em; | |
| margin-bottom: 16px; | |
| width: 100%; | |
| resize: vertical; | |
| } | |
| .stat-grid { | |
| display: grid; | |
| grid-template-columns: repeat(2, 1fr); | |
| gap: 16px; | |
| margin-bottom: 24px; | |
| } | |
| .stat-grid-4 { | |
| display: grid; | |
| grid-template-columns: repeat(2, 1fr); | |
| gap: 12px; | |
| margin-bottom: 24px; | |
| } | |
| .stat-card { | |
| background: linear-gradient(135deg, var(--primary), var(--primary-dark)); | |
| color: white; | |
| padding: 20px; | |
| border-radius: 16px; | |
| text-align: center; | |
| box-shadow: 0 4px 16px rgba(99, 102, 241, 0.3); | |
| } | |
| .stat-card-small { | |
| background: linear-gradient(135deg, var(--primary), var(--primary-dark)); | |
| color: white; | |
| padding: 16px; | |
| border-radius: 12px; | |
| text-align: center; | |
| box-shadow: 0 4px 12px rgba(99, 102, 241, 0.3); | |
| } | |
| .stat-card.warning, .stat-card-small.warning { | |
| background: linear-gradient(135deg, var(--warning), #d97706); | |
| } | |
| .stat-card.success, .stat-card-small.success { | |
| background: linear-gradient(135deg, var(--success), #059669); | |
| } | |
| .stat-card.danger, .stat-card-small.danger { | |
| background: linear-gradient(135deg, var(--danger), #dc2626); | |
| } | |
| .stat-value { | |
| font-size: 2.5em; | |
| font-weight: 900; | |
| } | |
| .stat-value-small { | |
| font-size: 1.8em; | |
| font-weight: 700; | |
| } | |
| .stat-label { | |
| font-size: 0.9em; | |
| opacity: 0.9; | |
| } | |
| .stat-label-small { | |
| font-size: 0.75em; | |
| opacity: 0.9; | |
| margin-top: 4px; | |
| } | |
| .progress-bar { | |
| background: #e2e8f0; | |
| height: 8px; | |
| border-radius: 8px; | |
| overflow: hidden; | |
| margin-bottom: 24px; | |
| } | |
| .progress-fill { | |
| background: linear-gradient(90deg, var(--primary), var(--secondary)); | |
| height: 100%; | |
| width: 0%; | |
| transition: width 0.5s ease; | |
| } | |
| .alert { | |
| padding: 16px 20px; | |
| border-radius: 12px; | |
| margin-bottom: 16px; | |
| font-weight: 500; | |
| display: none; | |
| animation: slideIn 0.3s ease; | |
| } | |
| @keyframes slideIn { | |
| from { opacity: 0; transform: translateY(-10px); } | |
| to { opacity: 1; transform: translateY(0); } | |
| } | |
| .alert.success { background: #d1fae5; color: #065f46; border: 2px solid var(--success); } | |
| .alert.error { background: #fee2e2; color: #991b1b; border: 2px solid var(--danger); } | |
| .alert.warning { background: #fef3c7; color: #92400e; border: 2px solid var(--warning); } | |
| .alert.info { background: #dbeafe; color: #1e40af; border: 2px solid var(--primary); } | |
| .recording-indicator { | |
| display: none; | |
| align-items: center; | |
| gap: 12px; | |
| padding: 16px; | |
| background: var(--danger); | |
| color: white; | |
| border-radius: 12px; | |
| margin-bottom: 16px; | |
| font-weight: 600; | |
| } | |
| .recording-indicator.active { display: flex; } | |
| .recording-pulse { | |
| width: 12px; | |
| height: 12px; | |
| background: white; | |
| border-radius: 50%; | |
| animation: pulse 1s infinite; | |
| } | |
| .result-item { | |
| background: #f8fafc; | |
| padding: 20px; | |
| border-radius: 12px; | |
| margin-bottom: 16px; | |
| border-left: 4px solid var(--primary); | |
| } | |
| .result-item.ai-detected { | |
| border-left-color: var(--danger); | |
| background: #fef2f2; | |
| } | |
| .result-item.correct { | |
| border-left-color: var(--success); | |
| background: #f0fdf4; | |
| } | |
| .input-group { | |
| margin-bottom: 20px; | |
| } | |
| .input-group label { | |
| display: block; | |
| margin-bottom: 8px; | |
| font-weight: 600; | |
| } | |
| .input-group select { | |
| width: 100%; | |
| padding: 12px 16px; | |
| border: 2px solid #e2e8f0; | |
| border-radius: 10px; | |
| font-size: 1em; | |
| font-family: 'Outfit', sans-serif; | |
| } | |
| .hidden { display: none !important; } | |
| .loader { | |
| border: 3px solid #f3f3f3; | |
| border-top: 3px solid var(--primary); | |
| border-radius: 50%; | |
| width: 24px; | |
| height: 24px; | |
| animation: spin 1s linear infinite; | |
| display: inline-block; | |
| margin-left: 10px; | |
| } | |
| @keyframes spin { | |
| 0% { transform: rotate(0deg); } | |
| 100% { transform: rotate(360deg); } | |
| } | |
| .violation-log { | |
| max-height: 300px; | |
| overflow-y: auto; | |
| background: #f8fafc; | |
| padding: 16px; | |
| border-radius: 12px; | |
| margin-top: 16px; | |
| } | |
| .violation-item { | |
| background: white; | |
| padding: 12px; | |
| border-radius: 8px; | |
| margin-bottom: 8px; | |
| border-left: 3px solid var(--danger); | |
| font-size: 0.9em; | |
| } | |
| @media (max-width: 1024px) { | |
| .main-grid { grid-template-columns: 1fr; } | |
| h1 { font-size: 2.5em; } | |
| } | |
| </style> | |
| </head> | |
| <body> | |
| <div class="container"> | |
| <div class="header"> | |
| <h1>🎯 AI Interview System <span class="badge">ENHANCED</span></h1> | |
| <p class="subtitle">Advanced Face Tracking + Eye Detection + Multi-Person Alert</p> | |
| </div> | |
| <div class="main-grid"> | |
| <div> | |
| <div class="card" id="uploadCard"> | |
| <h2 class="card-title"><span>📄</span> Upload PDF</h2> | |
| <div class="upload-section" id="uploadSection"> | |
| <span class="upload-icon">📤</span> | |
| <h3>Click or drag PDF here</h3> | |
| <input type="file" id="pdfInput" accept=".pdf" style="display: none;"> | |
| </div> | |
| <div class="input-group" style="margin-top: 20px;"> | |
| <label for="numQuestions">Questions:</label> | |
| <select id="numQuestions"> | |
| <option value="3">3</option> | |
| <option value="5" selected>5</option> | |
| <option value="8">8</option> | |
| <option value="10">10</option> | |
| </select> | |
| </div> | |
| <div id="uploadAlert" class="alert"></div> | |
| <button class="btn" id="generateBtn" style="width: 100%; margin-top: 16px;" disabled> | |
| Generate Questions | |
| </button> | |
| </div> | |
| <div class="card hidden" id="interviewCard"> | |
| <h2 class="card-title"><span>💬</span> Interview</h2> | |
| <div class="progress-bar"> | |
| <div class="progress-fill" id="progressBar"></div> | |
| </div> | |
| <div id="questionDisplay" class="question-display"> | |
| Click "Start Interview" to begin... | |
| </div> | |
| <div id="recordingIndicator" class="recording-indicator"> | |
| <div class="recording-pulse"></div> | |
| <span>🎤 Recording...</span> | |
| </div> | |
| <div id="answerAlert" class="alert"></div> | |
| <textarea id="answerBox" class="answer-box" placeholder="Answer appears here..." readonly></textarea> | |
| <div style="display: flex; gap: 12px;"> | |
| <button class="btn btn-success hidden" id="startInterviewBtn" style="flex: 1;"> | |
| 🎬 Start | |
| </button> | |
| <button class="btn" id="recordBtn" style="flex: 1;" disabled> | |
| 🎤 Record | |
| </button> | |
| <button class="btn btn-success" id="submitBtn" style="flex: 1;" disabled> | |
| ✅ Submit | |
| </button> | |
| </div> | |
| </div> | |
| <div class="card hidden" id="resultsCard"> | |
| <h2 class="card-title"><span>📊</span> Results</h2> | |
| <div class="stat-grid"> | |
| <div class="stat-card success"> | |
| <div class="stat-value" id="finalScore">0</div> | |
| <div class="stat-label">Total Score</div> | |
| </div> | |
| <div class="stat-card danger"> | |
| <div class="stat-value" id="finalCheating">0</div> | |
| <div class="stat-label">Total Alerts</div> | |
| </div> | |
| </div> | |
| <div class="stat-grid-4"> | |
| <div class="stat-card-small danger"> | |
| <div class="stat-value-small" id="finalFaceTurn">0</div> | |
| <div class="stat-label-small">Face Turns</div> | |
| </div> | |
| <div class="stat-card-small warning"> | |
| <div class="stat-value-small" id="finalMultiFace">0</div> | |
| <div class="stat-label-small">Multi-Person</div> | |
| </div> | |
| <div class="stat-card-small danger"> | |
| <div class="stat-value-small" id="finalNoFace">0</div> | |
| <div class="stat-label-small">No Face</div> | |
| </div> | |
| <div class="stat-card-small"> | |
| <div class="stat-value-small" id="finalEyeMove">0</div> | |
| <div class="stat-label-small">Eye Movements</div> | |
| </div> | |
| </div> | |
| <div id="resultsContainer"></div> | |
| <button class="btn" onclick="location.reload()" style="width: 100%; margin-top: 16px;"> | |
| 🔄 New Interview | |
| </button> | |
| </div> | |
| </div> | |
| <div> | |
| <div class="card"> | |
| <h2 class="card-title"><span>👁️</span> Advanced Tracking</h2> | |
| <div class="video-container"> | |
| <img id="video-stream" src="/video_feed"> | |
| <div class="status-badge"> | |
| <div class="live-dot"></div> | |
| <span>LIVE</span> | |
| </div> | |
| </div> | |
| <div style="margin-top: 16px; padding: 12px; background: #f8fafc; border-radius: 8px; font-size: 0.85em;"> | |
| <strong>Detection Features:</strong><br> | |
| ✓ Face Left/Right/Up/Down turns<br> | |
| ✓ Eye gaze tracking<br> | |
| ✓ Multiple person detection<br> | |
| ✓ Face disappearance alerts | |
| </div> | |
| </div> | |
| <div class="card" style="margin-top: 24px;"> | |
| <h2 class="card-title"><span>📈</span> Live Statistics</h2> | |
| <div class="stat-grid"> | |
| <div class="stat-card"> | |
| <div class="stat-value" id="currentQ">0</div> | |
| <div class="stat-label">Current</div> | |
| </div> | |
| <div class="stat-card success"> | |
| <div class="stat-value" id="scoreDisplay">0</div> | |
| <div class="stat-label">Score</div> | |
| </div> | |
| <div class="stat-card warning"> | |
| <div class="stat-value" id="totalQ">0</div> | |
| <div class="stat-label">Total Q</div> | |
| </div> | |
| <div class="stat-card danger"> | |
| <div class="stat-value" id="cheatingAlerts">0</div> | |
| <div class="stat-label">Alerts</div> | |
| </div> | |
| </div> | |
| <div class="stat-grid-4"> | |
| <div class="stat-card-small danger"> | |
| <div class="stat-value-small" id="faceTurnAlerts">0</div> | |
| <div class="stat-label-small">Face Turns</div> | |
| </div> | |
| <div class="stat-card-small warning"> | |
| <div class="stat-value-small" id="multiFaceAlerts">0</div> | |
| <div class="stat-label-small">Multi-Face</div> | |
| </div> | |
| <div class="stat-card-small danger"> | |
| <div class="stat-value-small" id="noFaceAlerts">0</div> | |
| <div class="stat-label-small">No Face</div> | |
| </div> | |
| <div class="stat-card-small"> | |
| <div class="stat-value-small" id="eyeMoveAlerts">0</div> | |
| <div class="stat-label-small">Eye Moves</div> | |
| </div> | |
| </div> | |
| </div> | |
| </div> | |
| </div> | |
| </div> | |
| <script> | |
| let currentAnswer = ''; | |
| let questionsGenerated = false; | |
| const uploadSection = document.getElementById('uploadSection'); | |
| const pdfInput = document.getElementById('pdfInput'); | |
| const generateBtn = document.getElementById('generateBtn'); | |
| const uploadAlert = document.getElementById('uploadAlert'); | |
| uploadSection.addEventListener('click', () => pdfInput.click()); | |
| uploadSection.addEventListener('dragover', (e) => { | |
| e.preventDefault(); | |
| uploadSection.style.borderColor = 'var(--primary)'; | |
| }); | |
| uploadSection.addEventListener('dragleave', () => { | |
| uploadSection.style.borderColor = '#e2e8f0'; | |
| }); | |
| uploadSection.addEventListener('drop', (e) => { | |
| e.preventDefault(); | |
| uploadSection.style.borderColor = '#e2e8f0'; | |
| if (e.dataTransfer.files.length) { | |
| pdfInput.files = e.dataTransfer.files; | |
| handleFileSelect(); | |
| } | |
| }); | |
| pdfInput.addEventListener('change', handleFileSelect); | |
| function handleFileSelect() { | |
| if (pdfInput.files.length > 0) { | |
| uploadSection.querySelector('h3').textContent = `📄 ${pdfInput.files[0].name}`; | |
| generateBtn.disabled = false; | |
| } | |
| } | |
| generateBtn.addEventListener('click', async () => { | |
| const formData = new FormData(); | |
| formData.append('pdf', pdfInput.files[0]); | |
| formData.append('num_questions', document.getElementById('numQuestions').value); | |
| generateBtn.disabled = true; | |
| generateBtn.innerHTML = '⏳ Generating... <span class="loader"></span>'; | |
| showAlert(uploadAlert, 'info', '🔄 Processing...'); | |
| try { | |
| const response = await fetch('/upload_pdf', { | |
| method: 'POST', | |
| body: formData | |
| }); | |
| const data = await response.json(); | |
| if (data.success) { | |
| showAlert(uploadAlert, 'success', `✅ ${data.message}`); | |
| questionsGenerated = true; | |
| document.getElementById('totalQ').textContent = data.questions.length; | |
| document.getElementById('uploadCard').classList.add('hidden'); | |
| document.getElementById('interviewCard').classList.remove('hidden'); | |
| document.getElementById('startInterviewBtn').classList.remove('hidden'); | |
| } else { | |
| showAlert(uploadAlert, 'error', `❌ ${data.message}`); | |
| generateBtn.disabled = false; | |
| generateBtn.innerHTML = 'Generate Questions'; | |
| } | |
| } catch (error) { | |
| showAlert(uploadAlert, 'error', `❌ ${error.message}`); | |
| generateBtn.disabled = false; | |
| generateBtn.innerHTML = 'Generate Questions'; | |
| } | |
| }); | |
| const startInterviewBtn = document.getElementById('startInterviewBtn'); | |
| const recordBtn = document.getElementById('recordBtn'); | |
| const submitBtn = document.getElementById('submitBtn'); | |
| const answerBox = document.getElementById('answerBox'); | |
| const questionDisplay = document.getElementById('questionDisplay'); | |
| const recordingIndicator = document.getElementById('recordingIndicator'); | |
| const answerAlert = document.getElementById('answerAlert'); | |
| startInterviewBtn.addEventListener('click', async () => { | |
| await fetch('/start_interview', { method: 'POST' }); | |
| startInterviewBtn.classList.add('hidden'); | |
| loadNextQuestion(); | |
| recordBtn.disabled = false; | |
| updateStats(); | |
| }); | |
| async function loadNextQuestion() { | |
| const response = await fetch('/get_current_question'); | |
| const data = await response.json(); | |
| if (data.success) { | |
| questionDisplay.innerHTML = `<div><strong>Q${data.question_number}:</strong><br>${data.question}</div>`; | |
| answerBox.value = ''; | |
| currentAnswer = ''; | |
| updateProgress(data.question_number, data.total_questions); | |
| updateStats(); | |
| } | |
| } | |
| recordBtn.addEventListener('click', async () => { | |
| recordBtn.disabled = true; | |
| recordingIndicator.classList.add('active'); | |
| answerBox.value = '🎤 Listening...'; | |
| try { | |
| const response = await fetch('/start_recording', { method: 'POST' }); | |
| const data = await response.json(); | |
| recordingIndicator.classList.remove('active'); | |
| if (data.success && data.answer) { | |
| currentAnswer = data.answer; | |
| answerBox.value = data.answer; | |
| submitBtn.disabled = false; | |
| showAlert(answerAlert, 'success', '✅ Recorded!'); | |
| } else { | |
| answerBox.value = ''; | |
| showAlert(answerAlert, 'error', '❌ Try again'); | |
| recordBtn.disabled = false; | |
| } | |
| } catch (error) { | |
| recordingIndicator.classList.remove('active'); | |
| answerBox.value = ''; | |
| showAlert(answerAlert, 'error', '❌ Error'); | |
| recordBtn.disabled = false; | |
| } | |
| }); | |
| submitBtn.addEventListener('click', async () => { | |
| if (!currentAnswer) { | |
| showAlert(answerAlert, 'warning', '⚠️ Record first!'); | |
| return; | |
| } | |
| submitBtn.disabled = true; | |
| submitBtn.innerHTML = '⏳ Validating... <span class="loader"></span>'; | |
| try { | |
| const response = await fetch('/submit_answer', { | |
| method: 'POST', | |
| headers: { 'Content-Type': 'application/json' }, | |
| body: JSON.stringify({ answer: currentAnswer }) | |
| }); | |
| const data = await response.json(); | |
| if (data.success) { | |
| const result = data.result; | |
| if (result.verdict === 'AI-Generated') { | |
| showAlert(answerAlert, 'error', `🤖 AI DETECTED! 0/10 (${result.ai_score}%)`); | |
| } else { | |
| const type = result.marks >= 7 ? 'success' : result.marks >= 4 ? 'warning' : 'error'; | |
| showAlert(answerAlert, type, `📊 ${result.verdict} - ${result.marks}/10`); | |
| } | |
| updateStats(); | |
| if (data.interview_completed) { | |
| setTimeout(showResults, 2000); | |
| } else { | |
| setTimeout(() => { | |
| loadNextQuestion(); | |
| recordBtn.disabled = false; | |
| submitBtn.disabled = true; | |
| submitBtn.innerHTML = '✅ Submit'; | |
| answerAlert.style.display = 'none'; | |
| }, 3000); | |
| } | |
| } | |
| } catch (error) { | |
| showAlert(answerAlert, 'error', `❌ ${error.message}`); | |
| submitBtn.disabled = false; | |
| submitBtn.innerHTML = '✅ Submit'; | |
| } | |
| }); | |
| async function showResults() { | |
| document.getElementById('interviewCard').classList.add('hidden'); | |
| document.getElementById('resultsCard').classList.remove('hidden'); | |
| const response = await fetch('/get_results'); | |
| const data = await response.json(); | |
| document.getElementById('finalScore').textContent = `${data.total_score}/${data.max_score}`; | |
| document.getElementById('finalCheating').textContent = data.cheating_alerts; | |
| document.getElementById('finalFaceTurn').textContent = data.face_turn_alerts || 0; | |
| document.getElementById('finalMultiFace').textContent = data.multiple_face_alerts || 0; | |
| document.getElementById('finalNoFace').textContent = data.no_face_alerts || 0; | |
| // Calculate eye movement alerts | |
| const eyeMoveAlerts = data.cheating_alerts - (data.face_turn_alerts + data.multiple_face_alerts + data.no_face_alerts); | |
| document.getElementById('finalEyeMove').textContent = Math.max(0, eyeMoveAlerts); | |
| const resultsContainer = document.getElementById('resultsContainer'); | |
| resultsContainer.innerHTML = '<h3 style="margin-bottom: 16px;">Answer Details:</h3>'; | |
| data.answers.forEach((item, index) => { | |
| const isAI = item.result.verdict === 'AI-Generated'; | |
| const isCorrect = item.result.marks >= 7; | |
| const className = isAI ? 'ai-detected' : isCorrect ? 'correct' : ''; | |
| resultsContainer.innerHTML += ` | |
| <div class="result-item ${className}"> | |
| <strong>Q${index + 1}:</strong> ${item.question}<br> | |
| <strong>Answer:</strong> ${item.answer}<br> | |
| <strong>Verdict:</strong> ${item.result.verdict}<br> | |
| <strong>Score:</strong> ${item.result.marks}/10 | |
| ${isAI ? `<br><strong>AI:</strong> ${item.result.ai_score}%` : ''} | |
| </div> | |
| `; | |
| }); | |
| // Add violations log if available | |
| if (data.violations && data.violations.length > 0) { | |
| resultsContainer.innerHTML += '<h3 style="margin-top: 24px; margin-bottom: 12px;">Violation Log:</h3>'; | |
| resultsContainer.innerHTML += '<div class="violation-log">'; | |
| data.violations.forEach((v, i) => { | |
| const time = new Date(v.timestamp).toLocaleTimeString(); | |
| resultsContainer.innerHTML += ` | |
| <div class="violation-item"> | |
| <strong>${i + 1}. ${time}</strong> - ${v.type}: ${v.details} | |
| </div> | |
| `; | |
| }); | |
| resultsContainer.innerHTML += '</div>'; | |
| } | |
| } | |
| async function updateStats() { | |
| const response = await fetch('/get_interview_status'); | |
| const data = await response.json(); | |
| document.getElementById('currentQ').textContent = data.current_question; | |
| document.getElementById('scoreDisplay').textContent = data.total_score; | |
| document.getElementById('cheatingAlerts').textContent = data.cheating_alerts; | |
| document.getElementById('faceTurnAlerts').textContent = data.face_turn_alerts || 0; | |
| document.getElementById('multiFaceAlerts').textContent = data.multiple_face_alerts || 0; | |
| document.getElementById('noFaceAlerts').textContent = data.no_face_alerts || 0; | |
| // Calculate eye movement alerts | |
| const eyeMoveAlerts = data.cheating_alerts - (data.face_turn_alerts + data.multiple_face_alerts + data.no_face_alerts); | |
| document.getElementById('eyeMoveAlerts').textContent = Math.max(0, eyeMoveAlerts); | |
| } | |
| function updateProgress(current, total) { | |
| document.getElementById('progressBar').style.width = (current / total * 100) + '%'; | |
| } | |
| function showAlert(element, type, message) { | |
| element.className = `alert ${type}`; | |
| element.textContent = message; | |
| element.style.display = 'block'; | |
| } | |
| document.getElementById('video-stream').onerror = function() { | |
| setTimeout(() => { | |
| this.src = this.src.split('?')[0] + '?' + new Date().getTime(); | |
| }, 1000); | |
| }; | |
| setInterval(() => { | |
| if (questionsGenerated) updateStats(); | |
| }, 2000); | |
| </script> | |
| </body> | |
| </html>''' | |
| if __name__ == '__main__': | |
| print("=" * 80) | |
| print("🎯 AI INTERVIEW CONDUCT SYSTEM - ENHANCED VERSION") | |
| print("=" * 80) | |
| print("\n📱 Browser: http://localhost:5000") | |
| print("\n✅ ADVANCED Features:") | |
| print(" • PDF Question Generation") | |
| print(" • Speech Recognition + AI Detection") | |
| print(" • Real-time Eye Gaze Tracking") | |
| print(" • 🆕 Face Orientation Detection (Left/Right/Up/Down)") | |
| print(" • 🆕 Multiple Person Detection") | |
| print(" • 🆕 Face Disappearance Alerts") | |
| print(" • 🆕 Baseline Calibration System") | |
| print(" • 🆕 Detailed Violation Logging") | |
| print("\n📊 Detection Categories:") | |
| print(" 1. Face Turns (Left/Right/Up/Down)") | |
| print(" 2. Eye Movements (All directions)") | |
| print(" 3. Multiple Faces") | |
| print(" 4. No Face Detected") | |
| print("\n⚙️ Setup:") | |
| print(" 1. Add GEMINI_API_KEY (line 28)") | |
| print(" 2. pip install flask opencv-python google-generativeai") | |
| print(" SpeechRecognition pyaudio werkzeug numpy") | |
| print("\n🎯 Ctrl+C to stop") | |
| print("=" * 80 + "\n") | |
| try: | |
| app.run(host='0.0.0.0', port=5000, debug=False, threaded=True) | |
| except KeyboardInterrupt: | |
| print("\n\n✅ Stopped") | |
| if detector: | |
| detector.release() |
Xet Storage Details
- Size:
- 62.8 kB
- Xet hash:
- 0bd7c17d7313c45ff80cb0ad40bf45a75feb5a8dde71a1bef7f8fc31af016b70
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.