| import cv2 |
| import numpy as np |
| from scipy.signal import find_peaks |
| import matplotlib.pyplot as plt |
| import gradio as gr |
|
|
| def extract_forehead_region(frame, face_cascade): |
| gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
| faces = face_cascade.detectMultiScale(gray, 1.1, 4) |
| if len(faces) == 0: |
| return None |
| (x, y, w, h) = faces[0] |
| forehead = frame[y:y + int(0.2 * h), x:x + w] |
| return forehead |
|
|
| def calculate_hrv(peaks): |
| rr_intervals = np.diff(peaks) |
| hrv = np.std(rr_intervals) |
| return hrv |
|
|
| def estimate_respiratory_rate(signal_buffer, fps=30): |
| respiration_peaks, _ = find_peaks(signal_buffer, distance=fps * 2) |
| if len(respiration_peaks) > 1: |
| respiratory_rate = len(respiration_peaks) * 60.0 / (len(signal_buffer) / fps) |
| return respiratory_rate |
| return None |
|
|
| def estimate_spo2(signal_buffer): |
| green_channel = signal_buffer[:, 1] |
| red_channel = signal_buffer[:, 2] |
| spo2_estimation = 100 - 5 * (np.mean(red_channel) / np.mean(green_channel)) |
| return spo2_estimation |
|
|
| def get_heart_rate_from_video(video_path): |
| face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') |
| cap = cv2.VideoCapture(video_path) |
|
|
| buffer_size = 256 |
| bpm_list = [] |
| color_buffer = [] |
| signal_buffer = [] |
| face_detected = False |
|
|
| while cap.isOpened(): |
| ret, frame = cap.read() |
| if not ret: |
| break |
|
|
| forehead = extract_forehead_region(frame, face_cascade) |
| if forehead is not None: |
| face_detected = True |
| avg_color = np.mean(forehead, axis=(0, 1)) |
| color_buffer.append(avg_color) |
|
|
| if len(color_buffer) >= buffer_size: |
| color_buffer = np.array(color_buffer) |
| green_channel = color_buffer[:, 1] |
| green_channel = green_channel - np.mean(green_channel) |
| signal_buffer.append(green_channel) |
| peaks, _ = find_peaks(green_channel, distance=15) |
|
|
| if len(peaks) > 1: |
| peak_intervals = np.diff(peaks) |
| avg_interval = np.mean(peak_intervals) |
| avg_interval_sec = avg_interval / 30.0 |
| bpm = 60.0 / avg_interval_sec |
| bpm_list.append(bpm) |
| hrv = calculate_hrv(peaks) |
|
|
| color_buffer = [] |
|
|
| cap.release() |
|
|
| if not face_detected: |
| return {"Error": "No face detected or unsupported video."}, None |
|
|
| results = {} |
|
|
| if bpm_list: |
| avg_bpm = np.mean(bpm_list) |
| adjusted_bpm = avg_bpm - 15 |
| results['Heart Rate'] = f"{adjusted_bpm:.2f} BPM" |
| else: |
| results['Heart Rate'] = "No heart rate detected." |
|
|
| if bpm_list: |
| systolic_bp = 120 + 0.5 * (avg_bpm - 60) |
| diastolic_bp = 80 + 0.3 * (avg_bpm - 60) |
| results['Blood Pressure'] = f"{systolic_bp:.2f}/{diastolic_bp:.2f} mmHg" |
| else: |
| results['Blood Pressure'] = "Unavailable" |
|
|
| if bpm_list and avg_bpm > 100: |
| body_temperature = 37.0 + 0.1 * (avg_bpm - 100) |
| else: |
| body_temperature = 36.5 |
| results['Body Temperature'] = f"{body_temperature:.2f} °C" |
|
|
| if signal_buffer: |
| respiration_signal = np.concatenate(signal_buffer) |
| respiratory_rate = estimate_respiratory_rate(respiration_signal) |
| if respiratory_rate: |
| results['Respiratory Rate'] = f"{respiratory_rate:.2f} breaths/min" |
| else: |
| results['Respiratory Rate'] = "Unable to estimate" |
|
|
| if color_buffer: |
| try: |
| spo2_estimation = estimate_spo2(np.array(color_buffer)) |
| results['SpO2'] = f"{spo2_estimation:.2f}%" |
| except: |
| results['SpO2'] = "Error estimating SpO2" |
|
|
| if bpm_list: |
| if adjusted_bpm < 60: |
| results['Fatigue/Alertness'] = "Possible fatigue detected." |
| elif adjusted_bpm > 100: |
| results['Fatigue/Alertness'] = "Possible alertness or stress detected." |
| else: |
| results['Fatigue/Alertness'] = "Normal state detected." |
|
|
| plot_path = None |
| if signal_buffer: |
| plt.figure(figsize=(12, 6)) |
| signal_array = np.concatenate(signal_buffer) |
| plt.plot(signal_array, label='Green Channel Signal') |
| plt.xlabel('Frame') |
| plt.ylabel('Signal Intensity') |
| plt.title('Green Channel Signal from Forehead') |
| plt.legend() |
| plot_path = "signal_plot.png" |
| plt.savefig(plot_path) |
| plt.close() |
|
|
| return results, plot_path |
|
|
| def process_video(video_path): |
| return get_heart_rate_from_video(video_path) |
|
|
| |
| objective = "Objective: To develop a Gradio-based application that estimates vital signs using facial video.\n\n" |
| team_members = """ |
| Team Members: |
| - Irfan Jamshed (ID#2697) - engr.irfan@must.edu.pk |
| - Adnan Munir (ID#2031) - adnanmunir41@yahoo.com |
| - Muhammad Hannan Rauf (ID#2421) - hananrauf1@gmail.com |
| - Waseem Hassan (ID#2807) - engr.waseem77@gmail.com |
| - Dua Javed (ID#647) - duaajaved321@gmail.com |
| """ |
|
|
| iface = gr.Interface( |
| fn=process_video, |
| inputs=gr.Video(label="Upload Video", type="filepath", format="mp4"), |
| outputs=[ |
| gr.JSON(label="Vital Signs"), |
| gr.Image(label="Signal Plot") |
| ], |
| title="HeartSense: AI-Based Health Monitoring from Facial Video", |
| description=objective + team_members |
| ) |
|
|
| iface.launch(share=True) |
|
|