Hussein El-Hadidy commited on
Commit ·
1ca9b28
1
Parent(s): f879444
Real Time Added
Browse files- CPR/CPRAnalyzer.py +380 -181
- CPR/chest_initializer.py +132 -142
- CPR/metrics_calculator.py +445 -14
- CPR/pose_estimation.py +3 -2
- CPR/posture_analyzer.py +137 -51
- CPR/quick_test.ipynb +0 -37
- CPR/role_classifier.py +155 -60
- CPR/shoulders_analyzer.py +31 -0
- CPR/wrists_midpoint_analyzer.py +62 -0
- CPRRealTime/analysis_socket_server.py +65 -0
- CPRRealTime/chest_initializer.py +154 -0
- CPRRealTime/client.py +34 -0
- CPRRealTime/graph_plotter.py +306 -0
- CPRRealTime/keypoints.py +22 -0
- CPRRealTime/logging_config.py +20 -0
- CPRRealTime/main.py +723 -0
- CPRRealTime/metrics_calculator.py +480 -0
- CPRRealTime/pose_estimation.py +49 -0
- CPRRealTime/posture_analyzer.py +132 -0
- CPRRealTime/role_classifier.py +178 -0
- CPRRealTime/shoulders_analyzer.py +30 -0
- CPRRealTime/threaded_camera.py +103 -0
- CPRRealTime/warnings_overlayer.py +147 -0
- CPRRealTime/wrists_midpoint_analyzer.py +63 -0
- backupbackend.zip → CPRRealTime/yolo11n-pose.pt +2 -2
- main.py +380 -120
- test.py +61 -0
CPR/CPRAnalyzer.py
CHANGED
|
@@ -9,159 +9,308 @@ from CPR.role_classifier import RoleClassifier
|
|
| 9 |
from CPR.chest_initializer import ChestInitializer
|
| 10 |
from CPR.metrics_calculator import MetricsCalculator
|
| 11 |
from CPR.posture_analyzer import PostureAnalyzer
|
|
|
|
|
|
|
| 12 |
import os
|
| 13 |
-
import uuid
|
|
|
|
| 14 |
|
| 15 |
class CPRAnalyzer:
|
| 16 |
"""Main CPR analysis pipeline with execution tracing"""
|
| 17 |
|
| 18 |
def __init__(self, video_path):
|
| 19 |
print(f"\n[INIT] Initializing CPR Analyzer for: {video_path}")
|
|
|
|
|
|
|
|
|
|
| 20 |
self.cap = cv2.VideoCapture(video_path)
|
| 21 |
if not self.cap.isOpened():
|
| 22 |
print("[ERROR] Failed to open video file")
|
| 23 |
return
|
|
|
|
| 24 |
|
|
|
|
| 25 |
self.frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
# Get screen dimensions
|
| 30 |
-
|
| 31 |
-
self.screen_width =
|
| 32 |
-
self.screen_height =
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
print(f"[DISPLAY] Detected screen resolution: {self.screen_width}x{self.screen_height}")
|
| 36 |
|
| 37 |
-
# Initialize components
|
| 38 |
-
self.pose_estimator = PoseEstimator()
|
| 39 |
self.role_classifier = RoleClassifier()
|
| 40 |
self.chest_initializer = ChestInitializer()
|
| 41 |
-
self.metrics_calculator = MetricsCalculator(shoulder_width_cm=45)
|
| 42 |
-
self.posture_analyzer = PostureAnalyzer()
|
| 43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
self.collected_warnings = {}
|
| 45 |
|
| 46 |
-
self.average_compression_depth = 0
|
| 47 |
-
self.average_compression_rate = 0
|
| 48 |
|
| 49 |
-
#
|
| 50 |
self.window_name = "CPR Analysis"
|
| 51 |
-
|
| 52 |
-
print("[INIT]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
def run_analysis(self):
|
| 55 |
-
|
|
|
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
if ret:
|
| 66 |
-
self._handle_chest_point_rotation(frame)
|
| 67 |
-
self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0) # Reset to beginning
|
| 68 |
-
|
| 69 |
-
print(f"[INIT] Completed in {time.time()-start_time:.2f}s\n")
|
| 70 |
-
print("[PHASE] Starting main processing loop")
|
| 71 |
-
|
| 72 |
-
frame_counter = 0
|
| 73 |
-
while self.cap.isOpened():
|
| 74 |
-
ret, frame = self.cap.read()
|
| 75 |
-
if not ret:
|
| 76 |
-
print("\n[INFO] End of video stream reached")
|
| 77 |
-
break
|
| 78 |
-
|
| 79 |
-
frame = self._handle_frame_rotation(frame)
|
| 80 |
-
print(f"\n[FRAME {int(self.cap.get(cv2.CAP_PROP_POS_FRAMES))}/{self.frame_count}] Processing")
|
| 81 |
-
|
| 82 |
-
frame = self._process_frame(frame)
|
| 83 |
-
|
| 84 |
-
self._display_frame(frame)
|
| 85 |
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
-
|
|
|
|
|
|
|
| 91 |
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
#
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
|
| 123 |
def _handle_frame_rotation(self, frame):
|
| 124 |
-
|
| 125 |
if frame.shape[1] > frame.shape[0]: # Width > Height
|
| 126 |
frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
|
| 127 |
return frame
|
| 128 |
|
| 129 |
def _process_frame(self, frame):
|
| 130 |
-
|
| 131 |
-
|
|
|
|
| 132 |
|
| 133 |
-
# Pose
|
| 134 |
pose_results = self.pose_estimator.detect_poses(frame)
|
|
|
|
|
|
|
| 135 |
if not pose_results:
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
frame = self.pose_estimator.draw_keypoints(frame, pose_results)
|
| 141 |
-
frame = self._analyze_rescuer(frame, pose_results)
|
| 142 |
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
return frame
|
| 147 |
-
|
| 148 |
-
def _analyze_rescuer(self, frame, pose_results):
|
| 149 |
-
"""Analyze rescuer with detailed logging"""
|
| 150 |
-
rescuer_id = self.role_classifier.find_rescuer(pose_results, frame.shape[:2])
|
| 151 |
-
if rescuer_id is None:
|
| 152 |
-
print("[RESCUER] No rescuer identified in frame")
|
| 153 |
-
return frame
|
| 154 |
-
|
| 155 |
-
print(f"[RESCUER] Identified at index {rescuer_id}")
|
| 156 |
-
keypoints = self.pose_estimator.get_keypoints(pose_results, rescuer_id)
|
| 157 |
|
| 158 |
-
#
|
| 159 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 164 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 165 |
for warning in warnings:
|
| 166 |
if warning not in self.collected_warnings:
|
| 167 |
self.collected_warnings[warning] = []
|
|
@@ -173,33 +322,86 @@ class CPRAnalyzer:
|
|
| 173 |
cv2.imwrite(file_path, frame)
|
| 174 |
self.collected_warnings[warning].append(file_path)
|
| 175 |
print(f"[CAPTURE] Saved warning screenshot: {file_path}")
|
|
|
|
|
|
|
| 176 |
|
| 177 |
if warnings:
|
| 178 |
-
print(f"[
|
|
|
|
| 179 |
else:
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
return frame
|
| 186 |
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
print(f"[BOX] Rescuer bounding box: {w}x{h} pixels")
|
| 194 |
-
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 3)
|
| 195 |
-
cv2.putText(frame, "RESCUER", (x1, y1-10),
|
| 196 |
-
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
|
| 197 |
-
return frame
|
| 198 |
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 202 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 203 |
# Get original frame dimensions
|
| 204 |
h, w = frame.shape[:2]
|
| 205 |
if w == 0 or h == 0:
|
|
@@ -220,64 +422,61 @@ class CPRAnalyzer:
|
|
| 220 |
# Center window
|
| 221 |
pos_x = (self.screen_width - new_w) // 2
|
| 222 |
pos_y = (self.screen_height - new_h) // 2
|
| 223 |
-
|
| 224 |
|
| 225 |
-
|
| 226 |
-
print(f"[DISPLAY] Resized to {new_w}x{new_h} (scale: {scale:.2f})
|
| 227 |
|
| 228 |
-
def
|
| 229 |
-
"""Return a list of posture warning entries with image URLs and descriptions"""
|
| 230 |
-
result = []
|
| 231 |
-
for description, paths in self.collected_warnings.items():
|
| 232 |
-
for path in paths:
|
| 233 |
-
# You might want to convert local paths to URLs if you're serving them via FastAPI
|
| 234 |
-
result.append({
|
| 235 |
-
"image_url": f"{os.path.basename(path)}", # Adjust if hosted elsewhere
|
| 236 |
-
"description": description
|
| 237 |
-
})
|
| 238 |
-
return result
|
| 239 |
-
|
| 240 |
-
def get_compression_metrics(self):
|
| 241 |
-
"""Return average compression depth and rate"""
|
| 242 |
-
return {
|
| 243 |
-
"average_compression_depth": self.average_compression_depth,
|
| 244 |
-
"average_compression_rate": self.average_compression_rate
|
| 245 |
-
}
|
| 246 |
-
|
| 247 |
-
def _finalize_analysis(self):
|
| 248 |
-
"""Final analysis with detailed reporting"""
|
| 249 |
-
print("\n[PHASE] Starting final analysis")
|
| 250 |
-
start_time = time.time()
|
| 251 |
-
|
| 252 |
try:
|
| 253 |
-
|
| 254 |
-
|
| 255 |
|
| 256 |
-
print("[METRICS] Detecting compression peaks...")
|
| 257 |
self.metrics_calculator.detect_peaks()
|
|
|
|
| 258 |
|
| 259 |
-
print("[METRICS] Calculating depth and rate...")
|
| 260 |
depth, rate = self.metrics_calculator.calculate_metrics(
|
| 261 |
-
self.
|
| 262 |
-
self.cap.get(cv2.CAP_PROP_FPS)
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
print(
|
| 266 |
|
| 267 |
-
|
| 268 |
-
|
| 269 |
|
| 270 |
-
|
| 271 |
-
self.metrics_calculator.plot_motion_curve()
|
| 272 |
|
| 273 |
except Exception as e:
|
| 274 |
print(f"[ERROR] Metric calculation failed: {str(e)}")
|
| 275 |
-
|
| 276 |
-
finally:
|
| 277 |
-
self.cap.release()
|
| 278 |
-
#cv2.destroyAllWindows()
|
| 279 |
-
print(f"\n[ANALYSIS] Completed in {time.time()-start_time:.1f}s")
|
| 280 |
-
print("[CLEANUP] Resources released")
|
| 281 |
-
|
| 282 |
-
|
| 283 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
from CPR.chest_initializer import ChestInitializer
|
| 10 |
from CPR.metrics_calculator import MetricsCalculator
|
| 11 |
from CPR.posture_analyzer import PostureAnalyzer
|
| 12 |
+
from CPR.wrists_midpoint_analyzer import WristsMidpointAnalyzer
|
| 13 |
+
from CPR.shoulders_analyzer import ShouldersAnalyzer
|
| 14 |
import os
|
| 15 |
+
import uuid # For unique filenames
|
| 16 |
+
|
| 17 |
|
| 18 |
class CPRAnalyzer:
|
| 19 |
"""Main CPR analysis pipeline with execution tracing"""
|
| 20 |
|
| 21 |
def __init__(self, video_path):
|
| 22 |
print(f"\n[INIT] Initializing CPR Analyzer for: {video_path}")
|
| 23 |
+
|
| 24 |
+
self.video_path = video_path
|
| 25 |
+
#& Open video file
|
| 26 |
self.cap = cv2.VideoCapture(video_path)
|
| 27 |
if not self.cap.isOpened():
|
| 28 |
print("[ERROR] Failed to open video file")
|
| 29 |
return
|
| 30 |
+
print("[INIT] Video file opened successfully")
|
| 31 |
|
| 32 |
+
#& Get video properties
|
| 33 |
self.frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 34 |
+
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
|
| 35 |
+
print(f"[INIT] Video has {self.frame_count} frames at {self.fps:.2f} FPS")
|
| 36 |
+
|
| 37 |
+
#& Get screen dimensions
|
| 38 |
+
root = tk.Tk()
|
| 39 |
+
self.screen_width = root.winfo_screenwidth()
|
| 40 |
+
self.screen_height = root.winfo_screenheight()
|
| 41 |
+
root.destroy()
|
| 42 |
+
print(f"[INIT] Detected screen resolution: {self.screen_width}x{self.screen_height}")
|
|
|
|
| 43 |
|
| 44 |
+
#& Initialize system components
|
| 45 |
+
self.pose_estimator = PoseEstimator(min_confidence=0.5)
|
| 46 |
self.role_classifier = RoleClassifier()
|
| 47 |
self.chest_initializer = ChestInitializer()
|
| 48 |
+
self.metrics_calculator = MetricsCalculator(self.frame_count, shoulder_width_cm=45*0.65)
|
|
|
|
| 49 |
|
| 50 |
+
# if avg_right > self.right_arm_angle_threshold: error
|
| 51 |
+
# if avg_left < self.left_arm_angle_threshold: error
|
| 52 |
+
|
| 53 |
+
self.posture_analyzer = PostureAnalyzer(right_arm_angle_threshold=220, left_arm_angle_threshold=160, wrist_distance_threshold=170, history_length_to_average=10)
|
| 54 |
+
self.wrists_midpoint_analyzer = WristsMidpointAnalyzer()
|
| 55 |
+
self.shoulders_analyzer = ShouldersAnalyzer()
|
| 56 |
+
print("[INIT] System components initialized")
|
| 57 |
+
|
| 58 |
self.collected_warnings = {}
|
| 59 |
|
|
|
|
|
|
|
| 60 |
|
| 61 |
+
#& Configure display window
|
| 62 |
self.window_name = "CPR Analysis"
|
| 63 |
+
cv2.namedWindow(self.window_name, cv2.WINDOW_NORMAL)
|
| 64 |
+
print(f"[INIT] Window '{self.window_name}' created")
|
| 65 |
+
|
| 66 |
+
#& Keep track of previous results for continuity
|
| 67 |
+
self.prev_rescuer_processed_results = None
|
| 68 |
+
self.prev_patient_processed_results = None
|
| 69 |
+
self.prev_chest_params = None
|
| 70 |
+
self.prev_midpoint = None
|
| 71 |
+
self.prev_pose_results = None
|
| 72 |
+
print("[INIT] Previous results initialized")
|
| 73 |
+
|
| 74 |
+
#& Workaround for minor glitches
|
| 75 |
+
self.consecutive_frames_with_posture_errors = 0
|
| 76 |
+
self.max_consecutive_frames_with_posture_errors = 10
|
| 77 |
+
|
| 78 |
+
#& Initialize variables for reporting warnings
|
| 79 |
+
self.posture_errors_for_current_error_region = set()
|
| 80 |
+
|
| 81 |
+
#& Frequent depth and rate calculations
|
| 82 |
+
self.reporting_interval_in_seconds = 5
|
| 83 |
+
self.reporting_interval_in_frames = int(self.fps * self.reporting_interval_in_seconds)
|
| 84 |
+
print(f"[INIT] Reporting interval set to {self.reporting_interval_in_seconds} seconds ({self.reporting_interval_in_frames} frames)")
|
| 85 |
|
| 86 |
def run_analysis(self):
|
| 87 |
+
try:
|
| 88 |
+
print("\n[RUN ANALYSIS] Starting analysis")
|
| 89 |
|
| 90 |
+
main_loop_start_time = time.time()
|
| 91 |
+
|
| 92 |
+
#& Initialize Variables
|
| 93 |
+
# Handling chunks
|
| 94 |
+
first_time_to_have_a_proccessed_frame = True
|
| 95 |
+
waiting_to_start_new_chunk = False
|
| 96 |
+
# Hndling mini chunks
|
| 97 |
+
mini_chunk_start_frame_index = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
|
| 99 |
+
print("[RUN ANALYSIS] Starting main execution loop")
|
| 100 |
+
#& Main execution loop
|
| 101 |
+
while self.cap.isOpened():
|
| 102 |
+
#& Get frame number
|
| 103 |
+
# Retrieve the current position of the video frame being processed in the video capture object (self.cap).
|
| 104 |
+
frame_counter = self.cap.get(cv2.CAP_PROP_POS_FRAMES)
|
| 105 |
+
print(f"\n[FRAME {int(frame_counter)}/{self.frame_count}]")
|
| 106 |
+
|
| 107 |
+
#& Read frame
|
| 108 |
+
ret, frame = self.cap.read()
|
| 109 |
+
if not ret:
|
| 110 |
+
print("[ERROR] Failed to read frame or end of video reached")
|
| 111 |
+
break
|
| 112 |
+
print(f"[RUN ANALYSIS] Read frame")
|
| 113 |
|
| 114 |
+
#& Rotate frame
|
| 115 |
+
frame = self._handle_frame_rotation(frame)
|
| 116 |
+
print(f"[RUN ANALYSIS] Rotated frame")
|
| 117 |
|
| 118 |
+
#& Process frame
|
| 119 |
+
# Processing a frame means updating the values for the current and previous detections both in the CPR Analyzer and the system components it includes.
|
| 120 |
+
# The returned flags are:
|
| 121 |
+
# - is_complete_chunk: True if a "Posture Error" occurs in the frame, False otherwise.
|
| 122 |
+
# - accept_frame: True if the frame is accepted for further processing, False otherwise.
|
| 123 |
+
# Not that a frame containing an error could be accepted if the number of consecutive frames with errors is less than the threshold.
|
| 124 |
+
is_complete_chunk, accept_frame = self._process_frame(frame)
|
| 125 |
+
print(f"[RUN ANALYSIS] Processed frame")
|
| 126 |
+
|
| 127 |
+
#& Compose frame
|
| 128 |
+
# This function is responsible for drawing the data detected during the processing of the frame on it.
|
| 129 |
+
# The frame would not be displayed yet, just composed.
|
| 130 |
+
processed_frame = self._compose_frame(frame, accept_frame)
|
| 131 |
+
print(f"[RUN ANALYSIS] Composed frame")
|
| 132 |
+
|
| 133 |
+
#& Set the chunk start frame index for the first chunk
|
| 134 |
+
# Along the video when a failure in any step of the processing occurs, the variables are populated with the previous results to keep the analysis going.
|
| 135 |
+
# The problem occurs when the first few frames have a failure in the processing, and the variables are not populated yet.
|
| 136 |
+
# This is why the first chunk starts from the first frame that has been processed successfully.
|
| 137 |
+
if (processed_frame is not None) and first_time_to_have_a_proccessed_frame:
|
| 138 |
+
first_time_to_have_a_proccessed_frame = False
|
| 139 |
+
chunk_start_frame_index = frame_counter
|
| 140 |
+
mini_chunk_start_frame_index = frame_counter
|
| 141 |
+
print(f"[RUN ANALYSIS] First processed frame detected")
|
| 142 |
+
|
| 143 |
+
#& Set the chunk start frame index for the all chunks after the first one & append the errors detected in the error region before this chunk if any
|
| 144 |
+
# When a "Posture Error" occurs, a chunk is considered complete, and the program becomes ready to start a new chunk.
|
| 145 |
+
# is_complete_chunk is returned as true for every frame that has a "Posture Error" in it, and false for every other frame.
|
| 146 |
+
# This is why we need to wait for a frame with a false is_complete_chunk to start a new chunk.
|
| 147 |
+
if (waiting_to_start_new_chunk) and (not is_complete_chunk):
|
| 148 |
+
waiting_to_start_new_chunk = False
|
| 149 |
+
chunk_start_frame_index = frame_counter
|
| 150 |
+
mini_chunk_start_frame_index = frame_counter
|
| 151 |
+
print(f"[RUN ANALYSIS] A new chunk is starting")
|
| 152 |
+
|
| 153 |
+
if len(self.posture_errors_for_current_error_region) > 0:
|
| 154 |
+
self.posture_analyzer.posture_errors_for_all_error_region.append(self.posture_errors_for_current_error_region.copy())
|
| 155 |
+
self.posture_errors_for_current_error_region.clear()
|
| 156 |
+
print(f"[RUN ANALYSIS] Reset posture errors for current error region")
|
| 157 |
+
|
| 158 |
+
#& Process the current chunk or mini chunk if the conditions are met
|
| 159 |
+
process_chunk = (is_complete_chunk or frame_counter == self.frame_count - 1) and (not waiting_to_start_new_chunk)
|
| 160 |
+
process_mini_chunk = (frame_counter % self.reporting_interval_in_frames == 0) and (frame_counter != 0) and (mini_chunk_start_frame_index is not None) and (not is_complete_chunk)
|
| 161 |
+
|
| 162 |
+
if process_chunk:
|
| 163 |
+
print(f"[RUN ANALYSIS] Chunk completion detected")
|
| 164 |
+
|
| 165 |
+
# The difference here results from the fact a first middle chunk is terminated by a "Posture Error" which is a frame not included in the chunk.
|
| 166 |
+
# While the last chunk is terminated by the end of the video, which is a frame included in the chunk.
|
| 167 |
+
if is_complete_chunk:
|
| 168 |
+
chunk_end_frame_index = frame_counter - 1
|
| 169 |
+
elif frame_counter == self.frame_count - 1:
|
| 170 |
+
chunk_end_frame_index = frame_counter
|
| 171 |
+
print(f"[RUN ANALYSIS] Determined the last frame of the chunk")
|
| 172 |
+
|
| 173 |
+
depth, rate = self._calculate_rate_and_depth_for_chunk(chunk_start_frame_index, chunk_end_frame_index)
|
| 174 |
+
print(f"[RUN ANALYSIS] Calculated metrics for the chunk")
|
| 175 |
+
|
| 176 |
+
elif process_mini_chunk:
|
| 177 |
+
print(f"[RUN ANALYSIS] Mini chunk completion detected")
|
| 178 |
+
|
| 179 |
+
mini_chunk_end_frame_index = frame_counter
|
| 180 |
+
print(f"[RUN ANALYSIS] Determined the last frame of the mini chunk")
|
| 181 |
+
|
| 182 |
+
depth, rate = self._calculate_rate_and_depth_for_chunk(mini_chunk_start_frame_index, mini_chunk_end_frame_index)
|
| 183 |
+
print(f"[RUN ANALYSIS] Calculated metrics for the mini chunk")
|
| 184 |
+
|
| 185 |
+
if process_chunk or process_mini_chunk:
|
| 186 |
+
waiting_to_start_new_chunk = True
|
| 187 |
+
|
| 188 |
+
self.shoulders_analyzer.reset_shoulder_distances()
|
| 189 |
+
self.wrists_midpoint_analyzer.reset_midpoint_history()
|
| 190 |
+
print(f"[RUN ANALYSIS] Reset shoulder distances and midpoint history")
|
| 191 |
+
|
| 192 |
+
#& Display frame
|
| 193 |
+
if processed_frame is not None:
|
| 194 |
+
self._display_frame(processed_frame)
|
| 195 |
+
else:
|
| 196 |
+
self._display_frame(frame)
|
| 197 |
+
print(f"[RUN ANALYSIS] Displayed frame")
|
| 198 |
+
|
| 199 |
+
#& Check if the user wants to quit
|
| 200 |
+
if cv2.waitKey(1) == ord('q'):
|
| 201 |
+
print("\n[RUN ANALYSIS] Analysis interrupted by user")
|
| 202 |
+
break
|
| 203 |
+
|
| 204 |
+
main_loop_end_time = time.time()
|
| 205 |
+
elapsed_time = main_loop_end_time - main_loop_start_time
|
| 206 |
+
print(f"[TIMING] Main loop elapsed time: {elapsed_time:.2f}s")
|
| 207 |
+
|
| 208 |
+
except Exception as e:
|
| 209 |
+
print(f"[ERROR] An error occurred during main execution loop: {str(e)}")
|
| 210 |
+
|
| 211 |
+
finally:
|
| 212 |
+
report_and_plot_start_time = time.time()
|
| 213 |
+
|
| 214 |
+
#& Cleanup, calculate averages, and plot full motion curve
|
| 215 |
+
self.cap.release()
|
| 216 |
+
cv2.destroyAllWindows()
|
| 217 |
+
print("[RUN ANALYSIS] Released video capture and destroyed all windows")
|
| 218 |
+
|
| 219 |
+
self._calculate_rate_and_depth_for_all_chunks()
|
| 220 |
+
print("[RUN ANALYSIS] Calculated weighted averages of the metrics across all chunks")
|
| 221 |
+
|
| 222 |
+
graphResults = self._plot_full_motion_curve_for_all_chunks()
|
| 223 |
+
print("[RUN ANALYSIS] Plotted full motion curve")
|
| 224 |
+
|
| 225 |
+
report_and_plot_end_time = time.time()
|
| 226 |
+
report_and_plot_elapsed_time = report_and_plot_end_time - report_and_plot_start_time
|
| 227 |
+
print(f"[TIMING] Report and plot elapsed time: {report_and_plot_elapsed_time:.2f}s")
|
| 228 |
+
|
| 229 |
+
return self.metrics_calculator.annotate_video_with_chunks(self.video_path, self.posture_analyzer.posture_errors_for_all_error_region), graphResults, self.get_posture_warning_results(), self.metrics_calculator.get_json_chunk_data()
|
| 230 |
|
| 231 |
def _handle_frame_rotation(self, frame):
|
| 232 |
+
#! Till now, the code has only been testes on portrait videos.
|
| 233 |
if frame.shape[1] > frame.shape[0]: # Width > Height
|
| 234 |
frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
|
| 235 |
return frame
|
| 236 |
|
| 237 |
def _process_frame(self, frame):
|
| 238 |
+
#* Chunk Completion Check
|
| 239 |
+
is_complete_chunk = False
|
| 240 |
+
accept_frame = True
|
| 241 |
|
| 242 |
+
#& Pose Estimation
|
| 243 |
pose_results = self.pose_estimator.detect_poses(frame)
|
| 244 |
+
|
| 245 |
+
#~ Handle Failed Detection or Update Previous Results
|
| 246 |
if not pose_results:
|
| 247 |
+
pose_results = self.prev_pose_results
|
| 248 |
+
print("[POSE ESTIMATION] No pose detected, using previous results (could be None)")
|
| 249 |
+
else:
|
| 250 |
+
self.prev_pose_results = pose_results
|
|
|
|
|
|
|
| 251 |
|
| 252 |
+
if not pose_results:
|
| 253 |
+
print("[POSE ESTIMATION] Insufficient data for processing")
|
| 254 |
+
return is_complete_chunk, accept_frame
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 255 |
|
| 256 |
+
#& Rescuer and Patient Classification
|
| 257 |
+
rescuer_processed_results, patient_processed_results = self.role_classifier.classify_roles(pose_results, self.prev_rescuer_processed_results, self.prev_patient_processed_results)
|
| 258 |
+
|
| 259 |
+
#~ Handle Failed Classifications OR Update Previous Results
|
| 260 |
+
if not rescuer_processed_results:
|
| 261 |
+
rescuer_processed_results = self.prev_rescuer_processed_results
|
| 262 |
+
print("[ROLE CLASSIFICATION] No rescuer detected, using previous results (could be None)")
|
| 263 |
+
else:
|
| 264 |
+
self.prev_rescuer_processed_results = rescuer_processed_results
|
| 265 |
+
|
| 266 |
+
if not patient_processed_results:
|
| 267 |
+
patient_processed_results = self.prev_patient_processed_results
|
| 268 |
+
print("[ROLE CLASSIFICATION] No patient detected, using previous results (could be None)")
|
| 269 |
+
else:
|
| 270 |
+
self.prev_patient_processed_results = patient_processed_results
|
| 271 |
|
| 272 |
+
if not rescuer_processed_results or not patient_processed_results:
|
| 273 |
+
print("[ROLE CLASSIFICATION] Insufficient data for processing")
|
| 274 |
+
return is_complete_chunk, accept_frame
|
| 275 |
+
|
| 276 |
+
#^ Set Params in Role Classifier (to draw later)
|
| 277 |
+
self.role_classifier.rescuer_processed_results = rescuer_processed_results
|
| 278 |
+
self.role_classifier.patient_processed_results = patient_processed_results
|
| 279 |
+
print(f"[ROLE CLASSIFICATION] Updated role classifier with new results")
|
| 280 |
+
|
| 281 |
+
#& Chest Estimation
|
| 282 |
+
chest_params = self.chest_initializer.estimate_chest_region(patient_processed_results["keypoints"], patient_processed_results["bounding_box"], frame_width=frame.shape[1], frame_height=frame.shape[0])
|
| 283 |
+
|
| 284 |
+
#~ Handle Failed Estimation or Update Previous Results
|
| 285 |
+
if not chest_params:
|
| 286 |
+
chest_params = self.prev_chest_params
|
| 287 |
+
print("[CHEST ESTIMATION] No chest region detected, using previous results (could be None)")
|
| 288 |
+
else:
|
| 289 |
+
self.prev_chest_params = chest_params
|
| 290 |
+
|
| 291 |
+
if not chest_params:
|
| 292 |
+
print("[CHEST ESTIMATION] Insufficient data for processing")
|
| 293 |
+
return is_complete_chunk, accept_frame
|
| 294 |
+
|
| 295 |
+
#^ Set Params in Chest Initializer (to draw later)
|
| 296 |
+
self.chest_initializer.chest_params = chest_params
|
| 297 |
+
self.chest_initializer.chest_params_history.append(self.chest_initializer.chest_params)
|
| 298 |
+
|
| 299 |
+
#& Chest Expectation
|
| 300 |
+
# The estimation up to the last frame
|
| 301 |
+
expected_chest_params = self.chest_initializer.estimate_chest_region_weighted_avg(frame_width=frame.shape[1], frame_height=frame.shape[0])
|
| 302 |
+
|
| 303 |
+
#~ First "window_size" detections can't avg
|
| 304 |
+
if not expected_chest_params:
|
| 305 |
+
self.chest_initializer.expected_chest_params = self.chest_initializer.chest_params
|
| 306 |
+
else:
|
| 307 |
+
self.chest_initializer.expected_chest_params = expected_chest_params
|
| 308 |
|
| 309 |
+
#& Posture Analysis
|
| 310 |
+
# The midpoind of the last frame
|
| 311 |
+
warnings = self.posture_analyzer.validate_posture(rescuer_processed_results["keypoints"], self.prev_midpoint, self.chest_initializer.expected_chest_params)
|
| 312 |
+
|
| 313 |
+
##############################################################
|
| 314 |
for warning in warnings:
|
| 315 |
if warning not in self.collected_warnings:
|
| 316 |
self.collected_warnings[warning] = []
|
|
|
|
| 322 |
cv2.imwrite(file_path, frame)
|
| 323 |
self.collected_warnings[warning].append(file_path)
|
| 324 |
print(f"[CAPTURE] Saved warning screenshot: {file_path}")
|
| 325 |
+
|
| 326 |
+
##############################################################
|
| 327 |
|
| 328 |
if warnings:
|
| 329 |
+
print(f"[POSTURE ANALYSIS] Posture issues: {', '.join(warnings)}")
|
| 330 |
+
self.consecutive_frames_with_posture_errors += 1
|
| 331 |
else:
|
| 332 |
+
print("[POSTURE ANALYSIS] No posture issues detected")
|
| 333 |
+
self.consecutive_frames_with_posture_errors = 0
|
| 334 |
+
|
| 335 |
+
accept_frame = self.consecutive_frames_with_posture_errors < self.max_consecutive_frames_with_posture_errors
|
|
|
|
|
|
|
| 336 |
|
| 337 |
+
if accept_frame:
|
| 338 |
+
warnings = [] # Reset warnings if the frame is accepted
|
| 339 |
+
|
| 340 |
+
#^ Set Params in Posture Analyzer (to draw later)
|
| 341 |
+
self.posture_analyzer.warnings = warnings
|
| 342 |
+
print(f"[POSTURE ANALYSIS] Updated posture analyzer with new results")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 343 |
|
| 344 |
+
#& Wrist Midpoint Detection
|
| 345 |
+
midpoint = self.wrists_midpoint_analyzer.detect_wrists_midpoint(rescuer_processed_results["keypoints"])
|
| 346 |
+
|
| 347 |
+
#~ Handle Failed Detection or Update Previous Results
|
| 348 |
+
if not midpoint:
|
| 349 |
+
midpoint = self.prev_midpoint
|
| 350 |
+
print("[WRIST MIDPOINT DETECTION] No midpoint detected, using previous results (could be None)")
|
| 351 |
+
else:
|
| 352 |
+
self.prev_midpoint = midpoint
|
| 353 |
|
| 354 |
+
if not midpoint:
|
| 355 |
+
print("[WRIST MIDPOINT DETECTION] Insufficient data for processing")
|
| 356 |
+
return is_complete_chunk, accept_frame
|
| 357 |
+
|
| 358 |
+
if accept_frame:
|
| 359 |
+
#^ Set Params in Role Classifier (to draw later)
|
| 360 |
+
self.wrists_midpoint_analyzer.midpoint = midpoint
|
| 361 |
+
self.wrists_midpoint_analyzer.midpoint_history.append(midpoint)
|
| 362 |
+
print(f"[WRIST MIDPOINT DETECTION] Updated wrist midpoint analyzer with new results")
|
| 363 |
+
|
| 364 |
+
#& Shoulder Distance Calculation
|
| 365 |
+
shoulder_distance = self.shoulders_analyzer.calculate_shoulder_distance(rescuer_processed_results["keypoints"])
|
| 366 |
+
if shoulder_distance is not None:
|
| 367 |
+
self.shoulders_analyzer.shoulder_distance = shoulder_distance
|
| 368 |
+
self.shoulders_analyzer.shoulder_distance_history.append(shoulder_distance)
|
| 369 |
+
print(f"[SHOULDER DISTANCE] Updated shoulder distance analyzer with new results")
|
| 370 |
+
else:
|
| 371 |
+
#* Chunk Completion Check
|
| 372 |
+
is_complete_chunk = True
|
| 373 |
+
num_warnings_before = len(self.posture_errors_for_current_error_region)
|
| 374 |
+
|
| 375 |
+
for warning in warnings:
|
| 376 |
+
self.posture_errors_for_current_error_region.add(warning)
|
| 377 |
+
|
| 378 |
+
num_warnings_after = len(self.posture_errors_for_current_error_region)
|
| 379 |
+
|
| 380 |
+
if num_warnings_after > num_warnings_before:
|
| 381 |
+
print(f"[POSTURE ANALYSIS] Added warning to current error region: {warning}")
|
| 382 |
+
|
| 383 |
+
return is_complete_chunk, accept_frame
|
| 384 |
+
|
| 385 |
+
def _compose_frame(self, frame, accept_frame):
|
| 386 |
+
# Chest Region
|
| 387 |
+
if frame is not None:
|
| 388 |
+
frame = self.chest_initializer.draw_expected_chest_region(frame)
|
| 389 |
+
print(f"[VISUALIZATION] Drawn chest region")
|
| 390 |
+
|
| 391 |
+
# Warning Messages
|
| 392 |
+
if frame is not None:
|
| 393 |
+
frame = self.posture_analyzer.display_warnings(frame)
|
| 394 |
+
print(f"[VISUALIZATION] Drawn warnings")
|
| 395 |
+
|
| 396 |
+
if frame is not None:
|
| 397 |
+
if accept_frame:
|
| 398 |
+
# Midpoint
|
| 399 |
+
frame = self.wrists_midpoint_analyzer.draw_midpoint(frame)
|
| 400 |
+
print(f"[VISUALIZATION] Drawn midpoint")
|
| 401 |
+
|
| 402 |
+
return frame
|
| 403 |
+
|
| 404 |
+
def _display_frame(self, frame):
|
| 405 |
# Get original frame dimensions
|
| 406 |
h, w = frame.shape[:2]
|
| 407 |
if w == 0 or h == 0:
|
|
|
|
| 422 |
# Center window
|
| 423 |
pos_x = (self.screen_width - new_w) // 2
|
| 424 |
pos_y = (self.screen_height - new_h) // 2
|
| 425 |
+
cv2.moveWindow(self.window_name, pos_x, pos_y)
|
| 426 |
|
| 427 |
+
cv2.imshow(self.window_name, resized)
|
| 428 |
+
print(f"[DISPLAY FRAME] Resized to {new_w}x{new_h} (scale: {scale:.2f})")
|
| 429 |
|
| 430 |
+
def _calculate_rate_and_depth_for_chunk(self, chunk_start_frame_index, chunk_end_frame_index):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 431 |
try:
|
| 432 |
+
self.metrics_calculator.smooth_midpoints(self.wrists_midpoint_analyzer.midpoint_history)
|
| 433 |
+
print("[METRICS] Smoothed midpoints")
|
| 434 |
|
|
|
|
| 435 |
self.metrics_calculator.detect_peaks()
|
| 436 |
+
print("[METRICS] Detected peaks")
|
| 437 |
|
|
|
|
| 438 |
depth, rate = self.metrics_calculator.calculate_metrics(
|
| 439 |
+
self.shoulders_analyzer.shoulder_distance_history,
|
| 440 |
+
self.cap.get(cv2.CAP_PROP_FPS),
|
| 441 |
+
chunk_start_frame_index,
|
| 442 |
+
chunk_end_frame_index)
|
| 443 |
+
print("[METRICS] Calculated metrics")
|
| 444 |
|
| 445 |
+
if depth is None or rate is None:
|
| 446 |
+
print("[ERROR] Depth or rate calculation failed, likely due to insufficient data points (<2 peaks)")
|
| 447 |
|
| 448 |
+
return depth, rate
|
|
|
|
| 449 |
|
| 450 |
except Exception as e:
|
| 451 |
print(f"[ERROR] Metric calculation failed: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 452 |
|
| 453 |
+
def _calculate_rate_and_depth_for_all_chunks(self):
|
| 454 |
+
try:
|
| 455 |
+
self.metrics_calculator.calculate_weighted_averages()
|
| 456 |
+
print(f"[METRICS] Weighted averages calculated")
|
| 457 |
+
except Exception as e:
|
| 458 |
+
print(f"[ERROR] Failed to calculate weighted averages: {str(e)}")
|
| 459 |
+
|
| 460 |
+
def _plot_full_motion_curve_for_all_chunks(self):
|
| 461 |
+
try:
|
| 462 |
+
print("[PLOT] Full motion curve plotted")
|
| 463 |
+
return self.metrics_calculator.plot_motion_curve_for_all_chunks(self.posture_analyzer.posture_errors_for_all_error_region)
|
| 464 |
+
except Exception as e:
|
| 465 |
+
print(f"[ERROR] Failed to plot full motion curve: {str(e)}")
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
def get_posture_warning_results(self):
|
| 469 |
+
"""Return a list of posture warning entries with image URLs and descriptions"""
|
| 470 |
+
result = []
|
| 471 |
+
|
| 472 |
+
for description, paths in self.collected_warnings.items():
|
| 473 |
+
for path in paths:
|
| 474 |
+
# You might want to convert local paths to URLs if you're serving them via FastAPI
|
| 475 |
+
if (len(self.posture_analyzer.posture_errors_for_all_error_region) > 0):
|
| 476 |
+
# If the error region is not empty, append the image URL and description
|
| 477 |
+
result.append({
|
| 478 |
+
"image_url": f"{os.path.basename(path)}", # Adjust if hosted elsewhere
|
| 479 |
+
"description": description
|
| 480 |
+
})
|
| 481 |
+
|
| 482 |
+
return result
|
CPR/chest_initializer.py
CHANGED
|
@@ -1,153 +1,143 @@
|
|
| 1 |
-
# chest_initializer.py
|
| 2 |
import cv2
|
| 3 |
import numpy as np
|
| 4 |
from CPR.keypoints import CocoKeypoints
|
| 5 |
|
| 6 |
class ChestInitializer:
|
| 7 |
-
"""Handles chest point detection with
|
| 8 |
|
| 9 |
-
def __init__(self
|
| 10 |
-
self.
|
| 11 |
-
self.
|
| 12 |
-
self.
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
| 71 |
-
#
|
| 72 |
-
|
| 73 |
-
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
|
| 74 |
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
if (confs[i][CocoKeypoints.LEFT_SHOULDER.value] < self.min_confidence or
|
| 91 |
-
confs[i][CocoKeypoints.RIGHT_SHOULDER.value] < self.min_confidence):
|
| 92 |
-
continue
|
| 93 |
-
|
| 94 |
-
return (kp[CocoKeypoints.LEFT_SHOULDER.value],
|
| 95 |
-
kp[CocoKeypoints.RIGHT_SHOULDER.value],
|
| 96 |
-
kp[CocoKeypoints.LEFT_HIP.value],
|
| 97 |
-
kp[CocoKeypoints.RIGHT_HIP.value])
|
| 98 |
-
|
| 99 |
-
return None
|
| 100 |
-
|
| 101 |
-
def _calculate_chest_point(self, cap):
|
| 102 |
-
"""Calculate final chest point from valid samples"""
|
| 103 |
-
if not self.shoulder_samples:
|
| 104 |
-
return
|
| 105 |
-
|
| 106 |
-
avg_left = np.median([s[0] for s in self.shoulder_samples], axis=0)
|
| 107 |
-
avg_right = np.median([s[1] for s in self.shoulder_samples], axis=0)
|
| 108 |
-
avg_left_hip = np.median([h[0] for h in self.hip_samples], axis=0)
|
| 109 |
-
avg_right_hip = np.median([h[1] for h in self.hip_samples], axis=0)
|
| 110 |
-
|
| 111 |
-
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 112 |
-
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 113 |
|
| 114 |
-
|
| 115 |
-
avg_right_px = avg_right * np.array([frame_width, frame_height])
|
| 116 |
-
avg_left_hip_px = avg_left_hip * np.array([frame_width, frame_height])
|
| 117 |
-
avg_right_hip_px = avg_right_hip * np.array([frame_width, frame_height])
|
| 118 |
-
|
| 119 |
-
#midpoint = (avg_left_px + avg_right_px) / 2
|
| 120 |
-
#shoulder_dist = np.linalg.norm(avg_left_px - avg_right_px)
|
| 121 |
-
#downward_offset = 0.4 * shoulder_dist
|
| 122 |
-
#self.chest_point = (int(midpoint[0]), int(midpoint[1] + downward_offset))
|
| 123 |
-
|
| 124 |
-
if avg_left_px[1] < avg_right_px[1]:
|
| 125 |
-
shoulder = np.array(avg_left_px)
|
| 126 |
-
hip = np.array(avg_left_hip_px)
|
| 127 |
-
else:
|
| 128 |
-
shoulder = np.array(avg_right_px)
|
| 129 |
-
hip = np.array(avg_right_hip_px)
|
| 130 |
-
|
| 131 |
-
alpha = 0.412 # Relative chest position between shoulder and hip
|
| 132 |
-
offset = 10 # move 10 pixels upward into the body
|
| 133 |
-
self.chest_point = (
|
| 134 |
-
int(shoulder[0] + alpha * (hip[0] - shoulder[0])),
|
| 135 |
-
int(shoulder[1] + alpha * (hip[1] - shoulder[1])) - offset
|
| 136 |
-
)
|
| 137 |
-
|
| 138 |
-
# Visualize the chest point in the debug window for 2 seconds
|
| 139 |
-
cap.set(cv2.CAP_PROP_POS_FRAMES, 0) # Reset to the first frame
|
| 140 |
-
ret, frame = cap.read()
|
| 141 |
-
if ret:
|
| 142 |
-
frame_with_marker = self.draw_chest_marker(frame)
|
| 143 |
-
#cv2.imshow(self.debug_window, frame_with_marker)
|
| 144 |
-
cv2.waitKey(2000) # Wait for 2 seconds
|
| 145 |
-
|
| 146 |
-
def draw_chest_marker(self, frame):
|
| 147 |
-
"""Draw chest point with visualization"""
|
| 148 |
-
print(f"Chest point: {self.chest_point}")
|
| 149 |
-
if self.chest_point:
|
| 150 |
-
cv2.circle(frame, self.chest_point, 8, (0, 55, 120), -1)
|
| 151 |
-
cv2.putText(frame, "Chest", (self.chest_point[0] + 5, self.chest_point[1] - 10),
|
| 152 |
-
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
|
| 153 |
-
return frame
|
|
|
|
|
|
|
| 1 |
import cv2
|
| 2 |
import numpy as np
|
| 3 |
from CPR.keypoints import CocoKeypoints
|
| 4 |
|
| 5 |
class ChestInitializer:
|
| 6 |
+
"""Handles chest point detection with validations in estimation."""
|
| 7 |
|
| 8 |
+
def __init__(self):
|
| 9 |
+
self.chest_params = None
|
| 10 |
+
self.chest_params_history = []
|
| 11 |
+
self.expected_chest_params = None
|
| 12 |
+
|
| 13 |
+
def estimate_chest_region(self, keypoints, bounding_box, frame_width, frame_height):
|
| 14 |
+
"""Estimate and validate chest region. Returns (cx, cy, cw, ch) or None."""
|
| 15 |
+
try:
|
| 16 |
+
# Unpack bounding box and calculate shoulder dimensions
|
| 17 |
+
bbox_x1, bbox_y1, bbox_x2, bbox_y2 = bounding_box
|
| 18 |
+
bbox_delta_y = abs(bbox_y2 - bbox_y1)
|
| 19 |
+
|
| 20 |
+
# Keypoints for shoulders
|
| 21 |
+
left_shoulder = keypoints[CocoKeypoints.LEFT_SHOULDER.value]
|
| 22 |
+
right_shoulder = keypoints[CocoKeypoints.RIGHT_SHOULDER.value]
|
| 23 |
+
|
| 24 |
+
# Midpoints calculation
|
| 25 |
+
shoulder_center = np.array([(left_shoulder[0] + right_shoulder[0]) / 2,
|
| 26 |
+
(left_shoulder[1] + right_shoulder[1]) / 2])
|
| 27 |
+
|
| 28 |
+
# Calculate chest center by applying directional adjustment separately for x and y
|
| 29 |
+
chest_center_from_shoulder_x = shoulder_center[0] - 0.3 * bbox_delta_y
|
| 30 |
+
chest_center_from_shoulder_y = shoulder_center[1] - 0.1 * bbox_delta_y
|
| 31 |
+
chest_center_from_shoulder = np.array([chest_center_from_shoulder_x, chest_center_from_shoulder_y])
|
| 32 |
+
|
| 33 |
+
# Chest dimensions (85% of shoulder width, 40% height)
|
| 34 |
+
chest_dx = bbox_delta_y * 0.8
|
| 35 |
+
chest_dy = bbox_delta_y * 1.75
|
| 36 |
+
|
| 37 |
+
# Calculate region coordinates
|
| 38 |
+
x1 = chest_center_from_shoulder[0] - chest_dx / 2
|
| 39 |
+
y1 = chest_center_from_shoulder[1] - chest_dy / 2
|
| 40 |
+
x2 = chest_center_from_shoulder[0] + chest_dx / 2
|
| 41 |
+
y2 = chest_center_from_shoulder[1] + chest_dy / 2
|
| 42 |
+
|
| 43 |
+
# Clamp to frame boundaries
|
| 44 |
+
x1 = max(0, min(x1, frame_width - 1))
|
| 45 |
+
y1 = max(0, min(y1, frame_height - 1))
|
| 46 |
+
x2 = max(0, min(x2, frame_width - 1))
|
| 47 |
+
y2 = max(0, min(y2, frame_height - 1))
|
| 48 |
+
|
| 49 |
+
# Check validity
|
| 50 |
+
if x2 <= x1 or y2 <= y1:
|
| 51 |
+
return None
|
| 52 |
+
|
| 53 |
+
# Adjusted parameters
|
| 54 |
+
cx = (x1 + x2) / 2
|
| 55 |
+
cy = (y1 + y2) / 2
|
| 56 |
+
cw = x2 - x1
|
| 57 |
+
ch = y2 - y1
|
| 58 |
+
|
| 59 |
+
return (cx, cy, cw, ch)
|
| 60 |
+
|
| 61 |
+
except (IndexError, TypeError, ValueError) as e:
|
| 62 |
+
print(f"Chest estimation error: {e}")
|
| 63 |
+
return None
|
| 64 |
+
|
| 65 |
+
def estimate_chest_region_weighted_avg(self, frame_width, frame_height, window_size=60, min_samples=3):
|
| 66 |
+
"""
|
| 67 |
+
Calculate stabilized chest parameters using weighted averaging with boundary checks.
|
| 68 |
|
| 69 |
+
Args:
|
| 70 |
+
self.chest_params_history: List of recent chest parameters [(cx, cy, cw, ch), ...]
|
| 71 |
+
frame_width: Width of the video frame
|
| 72 |
+
frame_height: Height of the video frame
|
| 73 |
+
window_size: Number of recent frames to consider (default: 5)
|
| 74 |
+
min_samples: Minimum valid samples required (default: 3)
|
| 75 |
+
|
| 76 |
+
Returns:
|
| 77 |
+
Tuple of (cx, cy, cw, ch) as integers within frame boundaries,
|
| 78 |
+
or None if insufficient data or invalid rectangle
|
| 79 |
+
"""
|
| 80 |
+
if not self.chest_params_history:
|
| 81 |
+
return None
|
| 82 |
|
| 83 |
+
# Filter out None values and get recent frames
|
| 84 |
+
valid_history = [h for h in self.chest_params_history[-window_size:] if h is not None]
|
|
|
|
| 85 |
|
| 86 |
+
if len(valid_history) < min_samples:
|
| 87 |
+
return None
|
| 88 |
+
|
| 89 |
+
# Convert to numpy array (preserve floating-point precision)
|
| 90 |
+
history_array = np.array(valid_history, dtype=np.float32)
|
| 91 |
+
|
| 92 |
+
# Exponential weights (stronger emphasis on recent frames)
|
| 93 |
+
weights = np.exp(np.linspace(1, 3, len(history_array)))
|
| 94 |
+
weights /= weights.sum()
|
| 95 |
+
|
| 96 |
+
try:
|
| 97 |
+
# Calculate weighted average in float space
|
| 98 |
+
cx, cy, cw, ch = np.average(history_array, axis=0, weights=weights)
|
| 99 |
+
|
| 100 |
+
# Convert to rectangle coordinates (still floating point)
|
| 101 |
+
x1 = max(0.0, cx - cw/2)
|
| 102 |
+
y1 = max(0.0, cy - ch/2)
|
| 103 |
+
x2 = min(float(frame_width - 1), cx + cw/2)
|
| 104 |
+
y2 = min(float(frame_height - 1), cy + ch/2)
|
| 105 |
+
|
| 106 |
+
# Only round to integers after all calculations
|
| 107 |
+
x1, y1, x2, y2 = map(round, [x1, y1, x2, y2])
|
| 108 |
+
|
| 109 |
+
# Validate rectangle
|
| 110 |
+
if x2 <= x1 or y2 <= y1:
|
| 111 |
+
return None
|
| 112 |
+
|
| 113 |
+
return (
|
| 114 |
+
(x1 + x2) // 2, # cx
|
| 115 |
+
(y1 + y2) // 2, # cy
|
| 116 |
+
x2 - x1, # cw
|
| 117 |
+
y2 - y1 # ch
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
except Exception as e:
|
| 121 |
+
print(f"Chest region estimation error: {e}")
|
| 122 |
+
return None
|
| 123 |
+
|
| 124 |
+
def draw_expected_chest_region(self, frame):
|
| 125 |
+
"""Draws the chest region without validation."""
|
| 126 |
+
if self.expected_chest_params is None:
|
| 127 |
+
return frame
|
| 128 |
|
| 129 |
+
cx, cy, cw, ch = self.expected_chest_params
|
| 130 |
+
x1 = int(cx - cw / 2)
|
| 131 |
+
y1 = int(cy - ch / 2)
|
| 132 |
+
x2 = int(cx + cw / 2)
|
| 133 |
+
y2 = int(cy + ch / 2)
|
| 134 |
+
|
| 135 |
+
# Draw rectangle and center
|
| 136 |
+
cv2.rectangle(frame, (x1, y1), (x2, y2), (128, 128, 0), 5)
|
| 137 |
+
|
| 138 |
+
cv2.circle(frame, (int(cx), int(cy)), 8, (128, 128, 0), -1)
|
| 139 |
+
|
| 140 |
+
cv2.putText(frame, "EXPECTED CHEST", (x1, max(10, y1 - 5)),
|
| 141 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (128, 128, 0), 2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
|
| 143 |
+
return frame
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
CPR/metrics_calculator.py
CHANGED
|
@@ -2,11 +2,18 @@
|
|
| 2 |
import numpy as np
|
| 3 |
from scipy.signal import savgol_filter, find_peaks
|
| 4 |
import matplotlib.pyplot as plt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
class MetricsCalculator:
|
| 7 |
"""Rate and depth calculation from motion data with improved peak detection"""
|
| 8 |
|
| 9 |
-
def __init__(self, shoulder_width_cm
|
| 10 |
self.shoulder_width_cm = shoulder_width_cm
|
| 11 |
self.peaks = np.array([])
|
| 12 |
self.peaks_max = np.array([])
|
|
@@ -16,6 +23,28 @@ class MetricsCalculator:
|
|
| 16 |
self.midpoints_list = np.array([])
|
| 17 |
self.shoulder_distances = []
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
def smooth_midpoints(self, midpoints):
|
| 20 |
"""Apply Savitzky-Golay filter to smooth motion data"""
|
| 21 |
self.midpoints_list = np.array(midpoints)
|
|
@@ -38,24 +67,71 @@ class MetricsCalculator:
|
|
| 38 |
return False
|
| 39 |
|
| 40 |
def detect_peaks(self):
|
| 41 |
-
"""Improved peak detection with
|
| 42 |
if self.y_smoothed.size == 0:
|
| 43 |
print("No smoothed values found for peak detection")
|
| 44 |
return False
|
| 45 |
|
| 46 |
try:
|
| 47 |
-
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
self.peaks = np.sort(np.concatenate((self.peaks_max, self.peaks_min)))
|
|
|
|
| 50 |
return len(self.peaks) > 0
|
| 51 |
except Exception as e:
|
| 52 |
print(f"Peak detection error: {e}")
|
| 53 |
return False
|
| 54 |
|
| 55 |
-
def
|
| 56 |
-
"""
|
| 57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
try:
|
| 60 |
# Calculate pixel to cm ratio
|
| 61 |
if len(self.shoulder_distances) > 0:
|
|
@@ -75,35 +151,59 @@ class MetricsCalculator:
|
|
| 75 |
if len(self.peaks_max) > 1:
|
| 76 |
rate = 1 / (np.mean(np.diff(self.peaks_max)) / fps) * 60 # Convert to CPM
|
| 77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
return depth, rate
|
| 79 |
|
| 80 |
except Exception as e:
|
| 81 |
print(f"Metric calculation error: {e}")
|
| 82 |
return None, None
|
| 83 |
|
| 84 |
-
def plot_motion_curve(self):
|
| 85 |
"""Enhanced visualization with original and smoothed data"""
|
| 86 |
if self.midpoints_list.size == 0:
|
| 87 |
print("No midpoint data to plot")
|
| 88 |
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
plt.figure(figsize=(12, 6))
|
| 91 |
|
| 92 |
-
# Plot original and smoothed data
|
| 93 |
-
plt.plot(self.midpoints_list[:, 1],
|
| 94 |
label="Original Motion",
|
| 95 |
color="red",
|
| 96 |
linestyle="dashed",
|
| 97 |
alpha=0.6)
|
| 98 |
-
|
| 99 |
-
plt.plot(self.y_smoothed,
|
| 100 |
label="Smoothed Motion",
|
| 101 |
color="blue",
|
| 102 |
linewidth=2)
|
| 103 |
|
| 104 |
# Plot peaks if detected
|
| 105 |
if self.peaks.size > 0:
|
| 106 |
-
plt.plot(self.peaks,
|
| 107 |
self.y_smoothed[self.peaks],
|
| 108 |
"x",
|
| 109 |
color="green",
|
|
@@ -117,4 +217,335 @@ class MetricsCalculator:
|
|
| 117 |
plt.title("Compression Motion Analysis")
|
| 118 |
plt.grid(True)
|
| 119 |
plt.legend()
|
| 120 |
-
plt.show()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
import numpy as np
|
| 3 |
from scipy.signal import savgol_filter, find_peaks
|
| 4 |
import matplotlib.pyplot as plt
|
| 5 |
+
import sys
|
| 6 |
+
import cv2
|
| 7 |
+
import cloudinary as cld
|
| 8 |
+
import cloudinary.uploader
|
| 9 |
+
import tempfile
|
| 10 |
+
import json
|
| 11 |
+
import os
|
| 12 |
|
| 13 |
class MetricsCalculator:
|
| 14 |
"""Rate and depth calculation from motion data with improved peak detection"""
|
| 15 |
|
| 16 |
+
def __init__(self, frame_count, shoulder_width_cm):
|
| 17 |
self.shoulder_width_cm = shoulder_width_cm
|
| 18 |
self.peaks = np.array([])
|
| 19 |
self.peaks_max = np.array([])
|
|
|
|
| 23 |
self.midpoints_list = np.array([])
|
| 24 |
self.shoulder_distances = []
|
| 25 |
|
| 26 |
+
# Parameters for the final report
|
| 27 |
+
self.chunks_depth = []
|
| 28 |
+
self.chunks_rate = []
|
| 29 |
+
self.chunks_start_and_end_indices = []
|
| 30 |
+
|
| 31 |
+
self.chunks_midpoints = []
|
| 32 |
+
self.chunks_smoothed = []
|
| 33 |
+
self.chunks_peaks = []
|
| 34 |
+
|
| 35 |
+
self.frame_count = frame_count
|
| 36 |
+
|
| 37 |
+
# Validation thresholds
|
| 38 |
+
self.depth = None
|
| 39 |
+
self.rate = None
|
| 40 |
+
|
| 41 |
+
self.min_depth_threshold = 3.0 # cm
|
| 42 |
+
self.max_depth_threshold = 6.0 # cm
|
| 43 |
+
|
| 44 |
+
self.min_rate_threshold = 100.0 # cpm
|
| 45 |
+
self.max_rate_threshold = 120.0 # cpm
|
| 46 |
+
|
| 47 |
+
|
| 48 |
def smooth_midpoints(self, midpoints):
|
| 49 |
"""Apply Savitzky-Golay filter to smooth motion data"""
|
| 50 |
self.midpoints_list = np.array(midpoints)
|
|
|
|
| 67 |
return False
|
| 68 |
|
| 69 |
def detect_peaks(self):
|
| 70 |
+
"""Improved peak detection with adjusted prominence for min peaks"""
|
| 71 |
if self.y_smoothed.size == 0:
|
| 72 |
print("No smoothed values found for peak detection")
|
| 73 |
return False
|
| 74 |
|
| 75 |
try:
|
| 76 |
+
distance = min(10, len(self.y_smoothed)) # Dynamic distance based on data length
|
| 77 |
+
|
| 78 |
+
# Detect max peaks with default prominence
|
| 79 |
+
self.peaks_max, _ = find_peaks(self.y_smoothed, distance=distance)
|
| 80 |
+
|
| 81 |
+
# Detect min peaks with reduced or no prominence requirement
|
| 82 |
+
self.peaks_min, _ = find_peaks(
|
| 83 |
+
-self.y_smoothed,
|
| 84 |
+
distance=distance,
|
| 85 |
+
prominence=(0.3, None) # Adjust based on your data's characteristics
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
self.peaks = np.sort(np.concatenate((self.peaks_max, self.peaks_min)))
|
| 89 |
+
|
| 90 |
return len(self.peaks) > 0
|
| 91 |
except Exception as e:
|
| 92 |
print(f"Peak detection error: {e}")
|
| 93 |
return False
|
| 94 |
|
| 95 |
+
def _validate_chunk(self, chunk_start_frame_index, chunk_end_frame_index):
|
| 96 |
+
"""Validate that the data length matches the expected frame range.
|
| 97 |
+
Terminates the program with error code 1 if validation fails.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
chunk_start_frame_index: Start frame index of the chunk
|
| 101 |
+
chunk_end_frame_index: End frame index of the chunk
|
| 102 |
+
|
| 103 |
+
Exits:
|
| 104 |
+
If validation fails, prints error message and exits with code 1
|
| 105 |
+
"""
|
| 106 |
+
try:
|
| 107 |
+
# Calculate expected number of frames
|
| 108 |
+
num_frames = chunk_end_frame_index - chunk_start_frame_index + 1
|
| 109 |
+
|
| 110 |
+
# Validate midpoints data
|
| 111 |
+
if len(self.midpoints_list[:, 1]) != num_frames:
|
| 112 |
+
print(f"\nERROR: Data length mismatch in midpoints_list")
|
| 113 |
+
print(f"Expected: {num_frames} frames ({chunk_start_frame_index}-{chunk_end_frame_index})")
|
| 114 |
+
print(f"Actual: {len(self.midpoints_list[:, 1])} frames")
|
| 115 |
+
sys.exit(1)
|
| 116 |
+
|
| 117 |
+
# Validate smoothed data
|
| 118 |
+
if len(self.y_smoothed) != num_frames:
|
| 119 |
+
print(f"\nERROR: Data length mismatch in y_smoothed")
|
| 120 |
+
print(f"Expected: {num_frames} frames ({chunk_start_frame_index}-{chunk_end_frame_index})")
|
| 121 |
+
print(f"Actual: {len(self.y_smoothed)} frames")
|
| 122 |
+
sys.exit(1)
|
| 123 |
+
|
| 124 |
+
except Exception as e:
|
| 125 |
+
print(f"\nCRITICAL VALIDATION ERROR: {str(e)}")
|
| 126 |
+
sys.exit(1)
|
| 127 |
+
|
| 128 |
+
def calculate_metrics(self, shoulder_distances, fps, chunk_start_frame_index, chunk_end_frame_index):
|
| 129 |
+
"""Calculate compression metrics with improved calculations"""
|
| 130 |
|
| 131 |
+
self._validate_chunk(chunk_start_frame_index, chunk_end_frame_index)
|
| 132 |
+
|
| 133 |
+
self.shoulder_distances = shoulder_distances
|
| 134 |
+
|
| 135 |
try:
|
| 136 |
# Calculate pixel to cm ratio
|
| 137 |
if len(self.shoulder_distances) > 0:
|
|
|
|
| 151 |
if len(self.peaks_max) > 1:
|
| 152 |
rate = 1 / (np.mean(np.diff(self.peaks_max)) / fps) * 60 # Convert to CPM
|
| 153 |
|
| 154 |
+
if depth is None or rate is None:
|
| 155 |
+
depth = 0
|
| 156 |
+
rate = 0
|
| 157 |
+
self.peaks = np.array([]) # Reset peaks if no valid data
|
| 158 |
+
|
| 159 |
+
# Store the results of this chunk for the final report if they are not None
|
| 160 |
+
self.depth = depth
|
| 161 |
+
self.rate = rate
|
| 162 |
+
|
| 163 |
+
self.chunks_depth.append(depth)
|
| 164 |
+
self.chunks_rate.append(rate)
|
| 165 |
+
self.chunks_start_and_end_indices.append((chunk_start_frame_index, chunk_end_frame_index))
|
| 166 |
+
|
| 167 |
+
self.chunks_midpoints.append(self.midpoints_list.copy())
|
| 168 |
+
self.chunks_smoothed.append(self.y_smoothed.copy())
|
| 169 |
+
self.chunks_peaks.append(self.peaks.copy())
|
| 170 |
+
|
| 171 |
+
|
| 172 |
return depth, rate
|
| 173 |
|
| 174 |
except Exception as e:
|
| 175 |
print(f"Metric calculation error: {e}")
|
| 176 |
return None, None
|
| 177 |
|
| 178 |
+
def plot_motion_curve(self, chunk_start_frame_index, chunk_end_frame_index):
|
| 179 |
"""Enhanced visualization with original and smoothed data"""
|
| 180 |
if self.midpoints_list.size == 0:
|
| 181 |
print("No midpoint data to plot")
|
| 182 |
return
|
| 183 |
+
|
| 184 |
+
self._validate_chunk(chunk_start_frame_index, chunk_end_frame_index)
|
| 185 |
+
|
| 186 |
+
# Create frame index array for x-axis
|
| 187 |
+
frame_indices = np.arange(chunk_start_frame_index, chunk_end_frame_index + 1)
|
| 188 |
+
|
| 189 |
|
| 190 |
plt.figure(figsize=(12, 6))
|
| 191 |
|
| 192 |
+
# Plot original and smoothed data with correct frame indices
|
| 193 |
+
plt.plot(frame_indices, self.midpoints_list[:, 1],
|
| 194 |
label="Original Motion",
|
| 195 |
color="red",
|
| 196 |
linestyle="dashed",
|
| 197 |
alpha=0.6)
|
| 198 |
+
|
| 199 |
+
plt.plot(frame_indices, self.y_smoothed,
|
| 200 |
label="Smoothed Motion",
|
| 201 |
color="blue",
|
| 202 |
linewidth=2)
|
| 203 |
|
| 204 |
# Plot peaks if detected
|
| 205 |
if self.peaks.size > 0:
|
| 206 |
+
plt.plot(frame_indices[self.peaks],
|
| 207 |
self.y_smoothed[self.peaks],
|
| 208 |
"x",
|
| 209 |
color="green",
|
|
|
|
| 217 |
plt.title("Compression Motion Analysis")
|
| 218 |
plt.grid(True)
|
| 219 |
plt.legend()
|
| 220 |
+
plt.show()
|
| 221 |
+
|
| 222 |
+
def calculate_weighted_averages(self):
|
| 223 |
+
"""Calculate weighted averages based on chunk durations
|
| 224 |
+
"""
|
| 225 |
+
if not self.chunks_depth or not self.chunks_rate or not self.chunks_start_and_end_indices:
|
| 226 |
+
print("[WARNING] No chunk data available for averaging")
|
| 227 |
+
return None
|
| 228 |
+
|
| 229 |
+
if not (len(self.chunks_depth) == len(self.chunks_rate) == len(self.chunks_start_and_end_indices)):
|
| 230 |
+
print("[ERROR] Mismatched chunk data lists")
|
| 231 |
+
return None
|
| 232 |
+
|
| 233 |
+
total_weight = 0
|
| 234 |
+
weighted_depth_sum = 0
|
| 235 |
+
weighted_rate_sum = 0
|
| 236 |
+
|
| 237 |
+
for depth, rate, (start, end) in zip(self.chunks_depth,
|
| 238 |
+
self.chunks_rate,
|
| 239 |
+
self.chunks_start_and_end_indices):
|
| 240 |
+
|
| 241 |
+
# Calculate chunk duration (+1 because inclusive)
|
| 242 |
+
chunk_duration = end - start + 1
|
| 243 |
+
|
| 244 |
+
weighted_depth_sum += depth * chunk_duration
|
| 245 |
+
weighted_rate_sum += rate * chunk_duration
|
| 246 |
+
total_weight += chunk_duration
|
| 247 |
+
|
| 248 |
+
if total_weight == 0:
|
| 249 |
+
print("[ERROR] Total chunk durations is zero")
|
| 250 |
+
return None
|
| 251 |
+
|
| 252 |
+
weighted_depth = weighted_depth_sum / total_weight
|
| 253 |
+
weighted_rate = weighted_rate_sum / total_weight
|
| 254 |
+
|
| 255 |
+
print(f"[RESULTS] Weighted average depth: {weighted_depth:.1f} cm")
|
| 256 |
+
print(f"[RESULTS] Weighted average rate: {weighted_rate:.1f} cpm")
|
| 257 |
+
|
| 258 |
+
return weighted_depth, weighted_rate
|
| 259 |
+
|
| 260 |
+
def plot_motion_curve_for_all_chunks(self, posture_errors_for_all_error_region):
|
| 261 |
+
"""Plot combined analysis with metrics annotations and posture error labels"""
|
| 262 |
+
if not self.chunks_start_and_end_indices:
|
| 263 |
+
print("No chunk data available for plotting")
|
| 264 |
+
return
|
| 265 |
+
|
| 266 |
+
# Print chunk information before plotting
|
| 267 |
+
print("\n=== Chunk Ranges ===")
|
| 268 |
+
for i, (start_end, depth, rate) in enumerate(zip(self.chunks_start_and_end_indices,
|
| 269 |
+
self.chunks_depth,
|
| 270 |
+
self.chunks_rate)):
|
| 271 |
+
print(f"Chunk {i+1}: Frames {start_end[0]}-{start_end[1]} | "
|
| 272 |
+
f"Depth: {depth:.1f}cm | Rate: {rate:.1f}cpm")
|
| 273 |
+
|
| 274 |
+
plt.figure(figsize=(16, 8))
|
| 275 |
+
ax = plt.gca()
|
| 276 |
+
|
| 277 |
+
# Sort chunks chronologically
|
| 278 |
+
sorted_chunks = sorted(zip(self.chunks_start_and_end_indices,
|
| 279 |
+
self.chunks_depth,
|
| 280 |
+
self.chunks_rate),
|
| 281 |
+
key=lambda x: x[0][0])
|
| 282 |
+
|
| 283 |
+
# 1. Plot all valid chunks with metrics
|
| 284 |
+
prev_chunk_end = None # Track previous chunk's end position
|
| 285 |
+
|
| 286 |
+
for idx, ((start, end), depth, rate) in enumerate(sorted_chunks):
|
| 287 |
+
chunk_frames = np.arange(start, end + 1)
|
| 288 |
+
midpoints = self.chunks_midpoints[idx]
|
| 289 |
+
smoothed = self.chunks_smoothed[idx]
|
| 290 |
+
peaks = self.chunks_peaks[idx]
|
| 291 |
+
|
| 292 |
+
# Add separator line between chunks
|
| 293 |
+
if prev_chunk_end is not None:
|
| 294 |
+
separator_x = prev_chunk_end + 0.5 # Midpoint between chunks
|
| 295 |
+
ax.axvline(x=separator_x, color='orange', linestyle=':', linewidth=1.5)
|
| 296 |
+
|
| 297 |
+
# Plot data
|
| 298 |
+
ax.plot(chunk_frames, midpoints[:, 1],
|
| 299 |
+
color="red", linestyle="dashed", alpha=0.6,
|
| 300 |
+
label="Original Motion" if idx == 0 else "")
|
| 301 |
+
ax.plot(chunk_frames, smoothed,
|
| 302 |
+
color="blue", linewidth=2,
|
| 303 |
+
label="Smoothed Motion" if idx == 0 else "")
|
| 304 |
+
|
| 305 |
+
# Plot peaks
|
| 306 |
+
if peaks.size > 0:
|
| 307 |
+
ax.plot(chunk_frames[peaks], smoothed[peaks],
|
| 308 |
+
"x", color="green", markersize=8,
|
| 309 |
+
label="Peaks" if idx == 0 else "")
|
| 310 |
+
|
| 311 |
+
# Annotate chunk metrics
|
| 312 |
+
mid_frame = (start + end) // 2
|
| 313 |
+
ax.annotate(f"Depth: {depth:.1f}cm\nRate: {rate:.1f}cpm",
|
| 314 |
+
xy=(mid_frame, np.max(smoothed)),
|
| 315 |
+
xytext=(0, 10), textcoords='offset points',
|
| 316 |
+
ha='center', va='bottom', fontsize=9,
|
| 317 |
+
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5))
|
| 318 |
+
|
| 319 |
+
# Update previous chunk end tracker
|
| 320 |
+
prev_chunk_end = end
|
| 321 |
+
# 2. Identify and label posture error regions
|
| 322 |
+
error_regions = []
|
| 323 |
+
|
| 324 |
+
# Before first chunk
|
| 325 |
+
if sorted_chunks[0][0][0] > 0:
|
| 326 |
+
error_regions.append((0, sorted_chunks[0][0][0]-1))
|
| 327 |
+
|
| 328 |
+
# Between chunks
|
| 329 |
+
for i in range(1, len(sorted_chunks)):
|
| 330 |
+
prev_end = sorted_chunks[i-1][0][1]
|
| 331 |
+
curr_start = sorted_chunks[i][0][0]
|
| 332 |
+
if curr_start - prev_end > 1:
|
| 333 |
+
error_regions.append((prev_end + 1, curr_start - 1))
|
| 334 |
+
|
| 335 |
+
# After last chunk
|
| 336 |
+
last_end = sorted_chunks[-1][0][1]
|
| 337 |
+
if last_end < self.frame_count - 1:
|
| 338 |
+
error_regions.append((last_end + 1, self.frame_count - 1))
|
| 339 |
+
|
| 340 |
+
# Print error regions information
|
| 341 |
+
print("\n=== Error Regions ===")
|
| 342 |
+
for i, (start, end) in enumerate(error_regions):
|
| 343 |
+
# Get errors for this region if available
|
| 344 |
+
try:
|
| 345 |
+
errors = posture_errors_for_all_error_region[i]
|
| 346 |
+
error_str = ", ".join(errors) if errors else "No errors detected"
|
| 347 |
+
except IndexError:
|
| 348 |
+
error_str = "No error data"
|
| 349 |
+
|
| 350 |
+
print(f"Error Region {i+1}: Frames {start}-{end} | Errors: {error_str}")
|
| 351 |
+
|
| 352 |
+
# Shade and label error regions
|
| 353 |
+
for error_region_index, region in enumerate (error_regions):
|
| 354 |
+
ax.axvspan(region[0], region[1],
|
| 355 |
+
color='gray', alpha=0.2,
|
| 356 |
+
label='Posture Errors' if region == error_regions[0] else "")
|
| 357 |
+
|
| 358 |
+
# Add vertical dotted lines at boundaries
|
| 359 |
+
ax.axvline(x=region[0], color='black', linestyle=':', alpha=0.5)
|
| 360 |
+
ax.axvline(x=region[1], color='black', linestyle=':', alpha=0.5)
|
| 361 |
+
|
| 362 |
+
# Add frame number labels - properly aligned
|
| 363 |
+
y_pos = ax.get_ylim()[0] + 0.02 * (ax.get_ylim()[1] - ax.get_ylim()[0])
|
| 364 |
+
|
| 365 |
+
# Start frame label - right aligned before the line
|
| 366 |
+
ax.text(region[0] - 1, y_pos,
|
| 367 |
+
f"Frame {region[0]}",
|
| 368 |
+
rotation=90, va='bottom', ha='right',
|
| 369 |
+
fontsize=8, alpha=0.7)
|
| 370 |
+
|
| 371 |
+
# End frame label - left aligned after the line
|
| 372 |
+
ax.text(region[1] + 1, y_pos,
|
| 373 |
+
f"Frame {region[1]}",
|
| 374 |
+
rotation=90, va='bottom', ha='left',
|
| 375 |
+
fontsize=8, alpha=0.7)
|
| 376 |
+
|
| 377 |
+
# Add error labels if available
|
| 378 |
+
if posture_errors_for_all_error_region:
|
| 379 |
+
try:
|
| 380 |
+
# Get errors for this specific error region
|
| 381 |
+
region_errors = posture_errors_for_all_error_region[error_region_index]
|
| 382 |
+
|
| 383 |
+
# Format errors text
|
| 384 |
+
error_text = "Errors:\n" + "\n".join(region_errors) if region_errors else ""
|
| 385 |
+
|
| 386 |
+
# Position text in middle of the error region
|
| 387 |
+
mid_frame = (region[0] + region[1]) // 2
|
| 388 |
+
ax.text(mid_frame, np.mean(ax.get_ylim()),
|
| 389 |
+
error_text,
|
| 390 |
+
ha='center', va='center',
|
| 391 |
+
fontsize=9, color='red', alpha=0.8,
|
| 392 |
+
bbox=dict(boxstyle='round,pad=0.3',
|
| 393 |
+
fc='white', ec='red', alpha=0.7))
|
| 394 |
+
except IndexError:
|
| 395 |
+
print(f"No error data for region {error_region_index}")
|
| 396 |
+
|
| 397 |
+
# 3. Add weighted averages
|
| 398 |
+
if hasattr(self, 'weighted_depth') and hasattr(self, 'weighted_rate'):
|
| 399 |
+
ax.annotate(f"Weighted Averages:\nDepth: {self.weighted_depth:.1f}cm\nRate: {self.weighted_rate:.1f}cpm",
|
| 400 |
+
xy=(0.98, 0.98), xycoords='axes fraction',
|
| 401 |
+
ha='right', va='top', fontsize=10,
|
| 402 |
+
bbox=dict(boxstyle='round,pad=0.5', fc='white', ec='black'))
|
| 403 |
+
|
| 404 |
+
# 4. Configure legend and layout
|
| 405 |
+
handles, labels = ax.get_legend_handles_labels()
|
| 406 |
+
unique = [(h, l) for i, (h, l) in enumerate(zip(handles, labels)) if l not in labels[:i]]
|
| 407 |
+
ax.legend(*zip(*unique), loc='upper right')
|
| 408 |
+
|
| 409 |
+
plt.xlabel("Frame Number")
|
| 410 |
+
plt.ylabel("Vertical Position (px)")
|
| 411 |
+
plt.title("Complete CPR Analysis with Metrics")
|
| 412 |
+
plt.grid(True)
|
| 413 |
+
plt.tight_layout()
|
| 414 |
+
|
| 415 |
+
os.makedirs("plots", exist_ok=True)
|
| 416 |
+
plot_path = "plots/cpr_analysis.png"
|
| 417 |
+
plt.savefig("plots/cpr_analysis.png", dpi=300)
|
| 418 |
+
response = cloudinary.uploader.upload(plot_path, resource_type="image")
|
| 419 |
+
plt.close()
|
| 420 |
+
plt.show()
|
| 421 |
+
return response['secure_url']
|
| 422 |
+
|
| 423 |
+
def validate_calculate_metrics(self):
|
| 424 |
+
"""Validate the calculated metrics against thresholds"""
|
| 425 |
+
if self.depth is None or self.rate is None:
|
| 426 |
+
print("[ERROR] Depth and rate must be calculated before validation")
|
| 427 |
+
return False
|
| 428 |
+
|
| 429 |
+
depth_valid = self.min_depth_threshold <= self.depth <= self.max_depth_threshold
|
| 430 |
+
rate_valid = self.min_rate_threshold <= self.rate <= self.max_rate_threshold
|
| 431 |
+
|
| 432 |
+
if not depth_valid:
|
| 433 |
+
print(f"[WARNING] Depth {self.depth:.1f}cm is out of range ({self.min_depth_threshold}-{self.max_depth_threshold})")
|
| 434 |
+
if not rate_valid:
|
| 435 |
+
print(f"[WARNING] Rate {self.rate:.1f}cpm is out of range ({self.min_rate_threshold}-{self.max_rate_threshold})")
|
| 436 |
+
|
| 437 |
+
return depth_valid and rate_valid
|
| 438 |
+
|
| 439 |
+
def get_json_chunk_data(self):
|
| 440 |
+
"""Get chunk data in JSON format for external use"""
|
| 441 |
+
if not self.chunks_start_and_end_indices:
|
| 442 |
+
print("No chunk data available")
|
| 443 |
+
return None
|
| 444 |
+
|
| 445 |
+
chunk_data = [
|
| 446 |
+
{
|
| 447 |
+
"start": round(start / 30, 2),
|
| 448 |
+
"end": round(end / 30, 2),
|
| 449 |
+
"depth": depth,
|
| 450 |
+
"rate": rate
|
| 451 |
+
}
|
| 452 |
+
for (start, end), depth, rate in zip(
|
| 453 |
+
self.chunks_start_and_end_indices,
|
| 454 |
+
self.chunks_depth,
|
| 455 |
+
self.chunks_rate
|
| 456 |
+
)
|
| 457 |
+
if (end - start) >= 60
|
| 458 |
+
]
|
| 459 |
+
|
| 460 |
+
return chunk_data
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
def annotate_video_with_chunks(self, input_video_path, posture_errors_for_all_error_region):
|
| 464 |
+
cap = cv2.VideoCapture(input_video_path)
|
| 465 |
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 466 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 467 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 468 |
+
|
| 469 |
+
# Validate video opened
|
| 470 |
+
if not cap.isOpened() or fps == 0 or width == 0 or height == 0:
|
| 471 |
+
raise ValueError("Failed to open input video or invalid video properties.")
|
| 472 |
+
|
| 473 |
+
# Create named temp file for cloudinary upload (must not be open during writing on Windows)
|
| 474 |
+
temp_video = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False)
|
| 475 |
+
temp_video_path = temp_video.name
|
| 476 |
+
temp_video.close()
|
| 477 |
+
|
| 478 |
+
# ALSO save a local version for inspection
|
| 479 |
+
#os.makedirs("temp_output", exist_ok=True)
|
| 480 |
+
#local_output_path = os.path.join("temp_output", "annotated_output.mp4")
|
| 481 |
+
|
| 482 |
+
out = cv2.VideoWriter(temp_video_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
|
| 483 |
+
#local_out = cv2.VideoWriter(local_output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
|
| 484 |
+
|
| 485 |
+
frame_idx = 0
|
| 486 |
+
current_chunk = 0
|
| 487 |
+
|
| 488 |
+
# Generate error regions
|
| 489 |
+
error_regions = []
|
| 490 |
+
# Before first chunk
|
| 491 |
+
if self.chunks_start_and_end_indices[0][0] > 0:
|
| 492 |
+
error_regions.append((0, self.chunks_start_and_end_indices[0][0] - 1))
|
| 493 |
+
|
| 494 |
+
# Between chunks
|
| 495 |
+
for i in range(1, len(self.chunks_start_and_end_indices)):
|
| 496 |
+
prev_end = self.chunks_start_and_end_indices[i - 1][1]
|
| 497 |
+
curr_start = self.chunks_start_and_end_indices[i][0]
|
| 498 |
+
if curr_start - prev_end > 1:
|
| 499 |
+
error_regions.append((prev_end + 1, curr_start - 1))
|
| 500 |
+
|
| 501 |
+
# After last chunk
|
| 502 |
+
last_end = self.chunks_start_and_end_indices[-1][1]
|
| 503 |
+
if last_end < self.frame_count - 1:
|
| 504 |
+
error_regions.append((last_end + 1, self.frame_count - 1))
|
| 505 |
+
|
| 506 |
+
# Iterate over the video frames and annotate
|
| 507 |
+
while cap.isOpened():
|
| 508 |
+
ret, frame = cap.read()
|
| 509 |
+
if not ret:
|
| 510 |
+
break
|
| 511 |
+
|
| 512 |
+
# Handle chunk annotation
|
| 513 |
+
if current_chunk < len(self.chunks_start_and_end_indices):
|
| 514 |
+
start_idx, end_idx = self.chunks_start_and_end_indices[current_chunk]
|
| 515 |
+
if start_idx <= frame_idx <= end_idx:
|
| 516 |
+
rate = self.chunks_rate[current_chunk]
|
| 517 |
+
depth = self.chunks_depth[current_chunk]
|
| 518 |
+
|
| 519 |
+
text1 = f"Rate: {rate:.1f}cpm"
|
| 520 |
+
text2 = f"Depth: {depth:.1f}cm"
|
| 521 |
+
cv2.putText(frame, text1, (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
|
| 522 |
+
cv2.putText(frame, text2, (20, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
|
| 523 |
+
|
| 524 |
+
if frame_idx > end_idx:
|
| 525 |
+
current_chunk += 1
|
| 526 |
+
|
| 527 |
+
# Annotate error regions
|
| 528 |
+
for i, (start, end) in enumerate(error_regions):
|
| 529 |
+
if start <= frame_idx <= end:
|
| 530 |
+
region_errors = posture_errors_for_all_error_region[i]
|
| 531 |
+
|
| 532 |
+
# Format errors text
|
| 533 |
+
error_text = "Errors: ".join(region_errors) if region_errors else ""
|
| 534 |
+
cv2.putText(frame, error_text, (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
|
| 535 |
+
|
| 536 |
+
out.write(frame)
|
| 537 |
+
#local_out.write(frame)
|
| 538 |
+
frame_idx += 1
|
| 539 |
+
|
| 540 |
+
cap.release()
|
| 541 |
+
out.release()
|
| 542 |
+
#local_out.release()
|
| 543 |
+
|
| 544 |
+
# Upload to Cloudinary
|
| 545 |
+
response = cloudinary.uploader.upload(temp_video_path, resource_type="video")
|
| 546 |
+
|
| 547 |
+
# Clean up temp file
|
| 548 |
+
os.remove(temp_video_path)
|
| 549 |
+
|
| 550 |
+
#print(f"✅ Local saved video at: {local_output_path}")
|
| 551 |
+
return response['secure_url']
|
CPR/pose_estimation.py
CHANGED
|
@@ -3,18 +3,19 @@ import cv2
|
|
| 3 |
import numpy as np
|
| 4 |
from ultralytics import YOLO
|
| 5 |
from CPR.keypoints import CocoKeypoints
|
|
|
|
| 6 |
|
| 7 |
class PoseEstimator:
|
| 8 |
"""Human pose estimation using YOLO"""
|
| 9 |
|
| 10 |
def __init__(self, model_path="yolo11n-pose.pt", min_confidence=0.2):
|
| 11 |
-
self.model = YOLO(model_path).to(
|
| 12 |
self.min_confidence = min_confidence
|
| 13 |
|
| 14 |
def detect_poses(self, frame):
|
| 15 |
"""Detect human poses in a frame"""
|
| 16 |
try:
|
| 17 |
-
results = self.model(frame, verbose=False)
|
| 18 |
if not results or len(results[0].keypoints.xy) == 0:
|
| 19 |
return None
|
| 20 |
return results[0]
|
|
|
|
| 3 |
import numpy as np
|
| 4 |
from ultralytics import YOLO
|
| 5 |
from CPR.keypoints import CocoKeypoints
|
| 6 |
+
import torch
|
| 7 |
|
| 8 |
class PoseEstimator:
|
| 9 |
"""Human pose estimation using YOLO"""
|
| 10 |
|
| 11 |
def __init__(self, model_path="yolo11n-pose.pt", min_confidence=0.2):
|
| 12 |
+
self.model = YOLO(model_path).to("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 13 |
self.min_confidence = min_confidence
|
| 14 |
|
| 15 |
def detect_poses(self, frame):
|
| 16 |
"""Detect human poses in a frame"""
|
| 17 |
try:
|
| 18 |
+
results = self.model(frame, verbose=False, conf=self.min_confidence, show=False)
|
| 19 |
if not results or len(results[0].keypoints.xy) == 0:
|
| 20 |
return None
|
| 21 |
return results[0]
|
CPR/posture_analyzer.py
CHANGED
|
@@ -6,20 +6,31 @@ from CPR.keypoints import CocoKeypoints
|
|
| 6 |
|
| 7 |
class PostureAnalyzer:
|
| 8 |
"""Posture analysis and visualization with comprehensive validation"""
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
def __init__(self, right_arm_angle_threshold
|
|
|
|
|
|
|
| 11 |
self.right_arm_angles = []
|
| 12 |
self.left_arm_angles = []
|
| 13 |
self.wrist_distances = []
|
|
|
|
| 14 |
self.right_arm_angle_threshold = right_arm_angle_threshold
|
| 15 |
self.left_arm_angle_threshold = left_arm_angle_threshold
|
| 16 |
self.wrist_distance_threshold = wrist_distance_threshold
|
|
|
|
| 17 |
self.warning_positions = {
|
| 18 |
-
'
|
| 19 |
-
'
|
|
|
|
|
|
|
| 20 |
}
|
| 21 |
|
| 22 |
-
|
|
|
|
|
|
|
| 23 |
"""Calculate angle between three points"""
|
| 24 |
try:
|
| 25 |
ang = math.degrees(math.atan2(c[1]-b[1], c[0]-b[0]) -
|
|
@@ -28,97 +39,172 @@ class PostureAnalyzer:
|
|
| 28 |
except Exception as e:
|
| 29 |
print(f"Angle calculation error: {e}")
|
| 30 |
return 0
|
| 31 |
-
|
| 32 |
-
def
|
| 33 |
-
"""Check for
|
| 34 |
warnings = []
|
| 35 |
try:
|
| 36 |
-
# Right arm analysis
|
| 37 |
shoulder = keypoints[CocoKeypoints.RIGHT_SHOULDER.value]
|
| 38 |
elbow = keypoints[CocoKeypoints.RIGHT_ELBOW.value]
|
| 39 |
wrist = keypoints[CocoKeypoints.RIGHT_WRIST.value]
|
| 40 |
-
right_angle = self.calculate_angle(wrist, elbow, shoulder)
|
| 41 |
-
self.right_arm_angles.append(right_angle)
|
| 42 |
|
| 43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
shoulder = keypoints[CocoKeypoints.LEFT_SHOULDER.value]
|
| 45 |
elbow = keypoints[CocoKeypoints.LEFT_ELBOW.value]
|
| 46 |
wrist = keypoints[CocoKeypoints.LEFT_WRIST.value]
|
| 47 |
-
left_angle = self.calculate_angle(wrist, elbow, shoulder)
|
| 48 |
-
self.left_arm_angles.append(left_angle)
|
| 49 |
|
| 50 |
-
|
| 51 |
-
avg_right = np.mean(self.right_arm_angles[-10:] if self.right_arm_angles else 0)
|
| 52 |
-
avg_left = np.mean(self.left_arm_angles[-10:] if self.left_arm_angles else 0)
|
| 53 |
|
| 54 |
-
|
| 55 |
-
|
|
|
|
|
|
|
| 56 |
if avg_left < self.left_arm_angle_threshold:
|
| 57 |
warnings.append("Left arm bent")
|
|
|
|
|
|
|
| 58 |
|
| 59 |
except Exception as e:
|
| 60 |
-
print(f"
|
| 61 |
|
| 62 |
return warnings
|
| 63 |
|
| 64 |
-
def
|
| 65 |
"""Check for one-handed CPR pattern (returns warning)"""
|
|
|
|
| 66 |
try:
|
|
|
|
| 67 |
left_wrist = keypoints[CocoKeypoints.LEFT_WRIST.value]
|
| 68 |
right_wrist = keypoints[CocoKeypoints.RIGHT_WRIST.value]
|
| 69 |
|
| 70 |
wrist_distance = np.linalg.norm(left_wrist - right_wrist)
|
| 71 |
self.wrist_distances.append(wrist_distance)
|
| 72 |
|
| 73 |
-
|
|
|
|
| 74 |
|
| 75 |
if avg_distance > self.wrist_distance_threshold:
|
| 76 |
-
|
|
|
|
| 77 |
except Exception as e:
|
| 78 |
-
print(f"One-handed check error: {e}")
|
| 79 |
|
| 80 |
-
return
|
| 81 |
|
| 82 |
-
def
|
| 83 |
-
"""Check if hands are
|
|
|
|
| 84 |
try:
|
| 85 |
-
|
| 86 |
-
|
|
|
|
| 87 |
|
| 88 |
-
#
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
print(f"Left wrist distance: {left_distance}, Right wrist distance: {right_distance}")
|
| 93 |
|
| 94 |
-
if
|
| 95 |
-
|
|
|
|
| 96 |
except Exception as e:
|
| 97 |
-
print(f"
|
| 98 |
|
| 99 |
-
return
|
| 100 |
|
| 101 |
-
def validate_posture(self, keypoints,
|
| 102 |
"""Run all posture validations (returns aggregated warnings)"""
|
| 103 |
warnings = []
|
| 104 |
-
warnings += self.
|
| 105 |
-
warnings += self.
|
| 106 |
-
warnings += self.
|
|
|
|
| 107 |
return warnings
|
| 108 |
|
| 109 |
-
def display_warnings(self, frame
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
|
| 113 |
try:
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
|
|
|
|
|
|
| 121 |
except Exception as e:
|
| 122 |
print(f"Warning display error: {e}")
|
| 123 |
-
|
| 124 |
return frame
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
class PostureAnalyzer:
|
| 8 |
"""Posture analysis and visualization with comprehensive validation"""
|
| 9 |
+
|
| 10 |
+
#! The warnings depend on the average readings from the last 10 frames
|
| 11 |
+
#! This "10" should be adjusted according to the sampling rate of the video
|
| 12 |
|
| 13 |
+
def __init__(self, right_arm_angle_threshold, left_arm_angle_threshold, wrist_distance_threshold, history_length_to_average):
|
| 14 |
+
self.history_length_to_average = history_length_to_average
|
| 15 |
+
|
| 16 |
self.right_arm_angles = []
|
| 17 |
self.left_arm_angles = []
|
| 18 |
self.wrist_distances = []
|
| 19 |
+
|
| 20 |
self.right_arm_angle_threshold = right_arm_angle_threshold
|
| 21 |
self.left_arm_angle_threshold = left_arm_angle_threshold
|
| 22 |
self.wrist_distance_threshold = wrist_distance_threshold
|
| 23 |
+
|
| 24 |
self.warning_positions = {
|
| 25 |
+
'right_arm_angle': (50, 50),
|
| 26 |
+
'left_arm_angle': (50, 100),
|
| 27 |
+
'one_handed': (50, 150),
|
| 28 |
+
'hands_not_on_chest': (50, 200)
|
| 29 |
}
|
| 30 |
|
| 31 |
+
self.posture_errors_for_all_error_region = []
|
| 32 |
+
|
| 33 |
+
def _calculate_angle(self, a, b, c):
|
| 34 |
"""Calculate angle between three points"""
|
| 35 |
try:
|
| 36 |
ang = math.degrees(math.atan2(c[1]-b[1], c[0]-b[0]) -
|
|
|
|
| 39 |
except Exception as e:
|
| 40 |
print(f"Angle calculation error: {e}")
|
| 41 |
return 0
|
| 42 |
+
|
| 43 |
+
def _check_bended_right_arm(self, keypoints):
|
| 44 |
+
"""Check for right arm bending (returns warning)"""
|
| 45 |
warnings = []
|
| 46 |
try:
|
|
|
|
| 47 |
shoulder = keypoints[CocoKeypoints.RIGHT_SHOULDER.value]
|
| 48 |
elbow = keypoints[CocoKeypoints.RIGHT_ELBOW.value]
|
| 49 |
wrist = keypoints[CocoKeypoints.RIGHT_WRIST.value]
|
|
|
|
|
|
|
| 50 |
|
| 51 |
+
right_angle = self._calculate_angle(wrist, elbow, shoulder)
|
| 52 |
+
|
| 53 |
+
self.right_arm_angles.append(right_angle)
|
| 54 |
+
|
| 55 |
+
avg_right = np.mean(self.right_arm_angles[-self.history_length_to_average:] if self.right_arm_angles else 0)
|
| 56 |
+
|
| 57 |
+
if avg_right > self.right_arm_angle_threshold:
|
| 58 |
+
warnings.append("Right arm bent")
|
| 59 |
+
|
| 60 |
+
return warnings
|
| 61 |
+
|
| 62 |
+
except Exception as e:
|
| 63 |
+
print(f"Right arm check error: {e}")
|
| 64 |
+
|
| 65 |
+
return warnings
|
| 66 |
+
|
| 67 |
+
def _check_bended_left_arm(self, keypoints):
|
| 68 |
+
"""Check for left arm bending (returns warning)"""
|
| 69 |
+
warnings = []
|
| 70 |
+
try:
|
| 71 |
shoulder = keypoints[CocoKeypoints.LEFT_SHOULDER.value]
|
| 72 |
elbow = keypoints[CocoKeypoints.LEFT_ELBOW.value]
|
| 73 |
wrist = keypoints[CocoKeypoints.LEFT_WRIST.value]
|
|
|
|
|
|
|
| 74 |
|
| 75 |
+
left_angle = self._calculate_angle(wrist, elbow, shoulder)
|
|
|
|
|
|
|
| 76 |
|
| 77 |
+
self.left_arm_angles.append(left_angle)
|
| 78 |
+
|
| 79 |
+
avg_left = np.mean(self.left_arm_angles[-self.history_length_to_average:] if self.left_arm_angles else 0)
|
| 80 |
+
|
| 81 |
if avg_left < self.left_arm_angle_threshold:
|
| 82 |
warnings.append("Left arm bent")
|
| 83 |
+
|
| 84 |
+
return warnings
|
| 85 |
|
| 86 |
except Exception as e:
|
| 87 |
+
print(f"Left arm check error: {e}")
|
| 88 |
|
| 89 |
return warnings
|
| 90 |
|
| 91 |
+
def _check_one_handed_cpr(self, keypoints):
|
| 92 |
"""Check for one-handed CPR pattern (returns warning)"""
|
| 93 |
+
warnings = []
|
| 94 |
try:
|
| 95 |
+
# Calculate wrist distance
|
| 96 |
left_wrist = keypoints[CocoKeypoints.LEFT_WRIST.value]
|
| 97 |
right_wrist = keypoints[CocoKeypoints.RIGHT_WRIST.value]
|
| 98 |
|
| 99 |
wrist_distance = np.linalg.norm(left_wrist - right_wrist)
|
| 100 |
self.wrist_distances.append(wrist_distance)
|
| 101 |
|
| 102 |
+
# Analyze distance with moving average
|
| 103 |
+
avg_distance = np.mean(self.wrist_distances[-self.history_length_to_average:] if self.wrist_distances else 0)
|
| 104 |
|
| 105 |
if avg_distance > self.wrist_distance_threshold:
|
| 106 |
+
warnings.append("One-handed CPR detected!")
|
| 107 |
+
|
| 108 |
except Exception as e:
|
| 109 |
+
print(f"One-handed CPR check error: {e}")
|
| 110 |
|
| 111 |
+
return warnings
|
| 112 |
|
| 113 |
+
def _check_hands_on_chest(self, wrists_midpoint, chest_params): # (cx, cy, cw, ch)
|
| 114 |
+
"""Check if hands are on the chest (returns warning)"""
|
| 115 |
+
warnings = []
|
| 116 |
try:
|
| 117 |
+
# Check if hands are on the chest
|
| 118 |
+
if wrists_midpoint is None or chest_params is None:
|
| 119 |
+
return ["Hands not on chest"]
|
| 120 |
|
| 121 |
+
# Unpack parameters
|
| 122 |
+
wrist_x, wrist_y = wrists_midpoint
|
| 123 |
+
cx, cy, cw, ch = chest_params
|
|
|
|
|
|
|
| 124 |
|
| 125 |
+
if not ((cx - cw/2 < wrist_x < cx + cw/2) and (cy - ch/2 < wrist_y < cy + ch/2)):
|
| 126 |
+
warnings.append("Hands not on chest")
|
| 127 |
+
|
| 128 |
except Exception as e:
|
| 129 |
+
print(f"Hands on chest check error: {e}")
|
| 130 |
|
| 131 |
+
return warnings
|
| 132 |
|
| 133 |
+
def validate_posture(self, keypoints, wrists_midpoint, chest_params):
|
| 134 |
"""Run all posture validations (returns aggregated warnings)"""
|
| 135 |
warnings = []
|
| 136 |
+
warnings += self._check_bended_right_arm(keypoints)
|
| 137 |
+
warnings += self._check_bended_left_arm(keypoints)
|
| 138 |
+
warnings += self._check_one_handed_cpr(keypoints)
|
| 139 |
+
warnings += self._check_hands_on_chest(wrists_midpoint, chest_params)
|
| 140 |
return warnings
|
| 141 |
|
| 142 |
+
def display_warnings(self, frame):
|
| 143 |
+
"""Display posture warnings with colored background rectangles
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
frame: Input image frame to draw warnings on
|
| 147 |
+
|
| 148 |
+
Returns:
|
| 149 |
+
Frame with warnings and background rectangles drawn
|
| 150 |
+
"""
|
| 151 |
+
if not self.warnings:
|
| 152 |
+
return frame
|
| 153 |
|
| 154 |
+
warning_config = {
|
| 155 |
+
"Right arm bent": {
|
| 156 |
+
"color": (0, 0, 255), # Red
|
| 157 |
+
"position": self.warning_positions['right_arm_angle'],
|
| 158 |
+
"text": "Right arm bent!"
|
| 159 |
+
},
|
| 160 |
+
"Left arm bent": {
|
| 161 |
+
"color": (0, 255, 255), # Yellow
|
| 162 |
+
"position": self.warning_positions['left_arm_angle'],
|
| 163 |
+
"text": "Left arm bent!"
|
| 164 |
+
},
|
| 165 |
+
"One-handed": {
|
| 166 |
+
"color": (0, 255, 0), # Green
|
| 167 |
+
"position": self.warning_positions['one_handed'],
|
| 168 |
+
"text": "One-handed CPR detected!"
|
| 169 |
+
},
|
| 170 |
+
"Hands not on chest": {
|
| 171 |
+
"color": (255, 0, 0), # Blue
|
| 172 |
+
"position": self.warning_positions['hands_not_on_chest'],
|
| 173 |
+
"text": "Hands not on chest!"
|
| 174 |
+
}
|
| 175 |
+
}
|
| 176 |
|
| 177 |
try:
|
| 178 |
+
for warning_text, config in warning_config.items():
|
| 179 |
+
if any(warning_text in w for w in self.warnings):
|
| 180 |
+
self._draw_warning_banner(
|
| 181 |
+
frame=frame,
|
| 182 |
+
text=config['text'],
|
| 183 |
+
color=config['color'],
|
| 184 |
+
position=config['position']
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
except Exception as e:
|
| 188 |
print(f"Warning display error: {e}")
|
| 189 |
+
|
| 190 |
return frame
|
| 191 |
+
|
| 192 |
+
def _draw_warning_banner(self, frame, text, color, position):
|
| 193 |
+
"""Helper function to draw a single warning banner"""
|
| 194 |
+
(text_width, text_height), _ = cv2.getTextSize(
|
| 195 |
+
text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2)
|
| 196 |
+
|
| 197 |
+
x, y = position
|
| 198 |
+
# Calculate background rectangle coordinates
|
| 199 |
+
x1 = x - 10
|
| 200 |
+
y1 = y - text_height - 10
|
| 201 |
+
x2 = x + text_width + 10
|
| 202 |
+
y2 = y + 10
|
| 203 |
+
|
| 204 |
+
# Draw background rectangle
|
| 205 |
+
cv2.rectangle(frame, (x1, y1), (x2, y2), color, -1)
|
| 206 |
+
|
| 207 |
+
# Draw warning text
|
| 208 |
+
cv2.putText(frame, text, (x, y),
|
| 209 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2,
|
| 210 |
+
cv2.LINE_AA)
|
CPR/quick_test.ipynb
DELETED
|
@@ -1,37 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"cells": [
|
| 3 |
-
{
|
| 4 |
-
"cell_type": "code",
|
| 5 |
-
"execution_count": null,
|
| 6 |
-
"id": "c819609e",
|
| 7 |
-
"metadata": {},
|
| 8 |
-
"outputs": [],
|
| 9 |
-
"source": [
|
| 10 |
-
"# Example notebook usage\n",
|
| 11 |
-
"from pose_estimation import PoseEstimator\n",
|
| 12 |
-
"from role_classifier import RoleClassifier\n",
|
| 13 |
-
"from metrics_calculator import MetricsCalculator\n",
|
| 14 |
-
"\n",
|
| 15 |
-
"# Test pose estimation\n",
|
| 16 |
-
"estimator = PoseEstimator()\n",
|
| 17 |
-
"frame = cv2.imread(\"test_frame.jpg\")\n",
|
| 18 |
-
"results = estimator.detect_poses(frame)\n",
|
| 19 |
-
"annotated = estimator.draw_keypoints(frame, results)\n",
|
| 20 |
-
"cv2.imshow(\"Pose Test\", annotated)\n",
|
| 21 |
-
"\n",
|
| 22 |
-
"# Test metrics calculation\n",
|
| 23 |
-
"calculator = MetricsCalculator()\n",
|
| 24 |
-
"calculator.smooth_midpoints(midpoints_data)\n",
|
| 25 |
-
"calculator.detect_peaks()\n",
|
| 26 |
-
"calculator.plot_motion_curve()"
|
| 27 |
-
]
|
| 28 |
-
}
|
| 29 |
-
],
|
| 30 |
-
"metadata": {
|
| 31 |
-
"language_info": {
|
| 32 |
-
"name": "python"
|
| 33 |
-
}
|
| 34 |
-
},
|
| 35 |
-
"nbformat": 4,
|
| 36 |
-
"nbformat_minor": 5
|
| 37 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
CPR/role_classifier.py
CHANGED
|
@@ -1,81 +1,176 @@
|
|
| 1 |
# role_classifier.py
|
| 2 |
import cv2
|
| 3 |
import numpy as np
|
|
|
|
| 4 |
from CPR.keypoints import CocoKeypoints
|
| 5 |
|
|
|
|
| 6 |
class RoleClassifier:
|
| 7 |
"""Role classification and tracking using image processing"""
|
| 8 |
|
| 9 |
def __init__(self, proximity_thresh=0.3):
|
| 10 |
self.proximity_thresh = proximity_thresh
|
| 11 |
self.rescuer_id = None
|
| 12 |
-
self.
|
| 13 |
-
self.
|
| 14 |
-
self.rescuer_keypoints = None
|
| 15 |
|
| 16 |
-
def
|
| 17 |
-
"""
|
| 18 |
try:
|
| 19 |
-
|
| 20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
-
|
| 31 |
-
for (i, px, py) in people:
|
| 32 |
-
for (hx, hy, hw, hh) in horizontal_objects:
|
| 33 |
-
distance = np.sqrt(((px-hx)/frame_shape[1])**2 + ((py-hy)/frame_shape[0])**2)
|
| 34 |
-
if distance < min_distance and distance < self.proximity_thresh:
|
| 35 |
-
min_distance = distance
|
| 36 |
-
self.rescuer_id = i
|
| 37 |
-
return self.rescuer_id
|
| 38 |
-
except Exception as e:
|
| 39 |
-
print(f"Rescuer finding error: {e}")
|
| 40 |
-
return None
|
| 41 |
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
right_shoulder = self.rescuer_keypoints[CocoKeypoints.RIGHT_SHOULDER.value]
|
| 76 |
-
|
| 77 |
-
shoulder_dist = np.linalg.norm(left_shoulder - right_shoulder)
|
| 78 |
-
self.shoulder_distances.append(shoulder_dist)
|
| 79 |
-
print(f"Shoulder distance: {shoulder_dist:.2f} pixels")
|
| 80 |
-
except Exception as e:
|
| 81 |
-
print(f"Shoulder distance calculation error: {e}")
|
|
|
|
| 1 |
# role_classifier.py
|
| 2 |
import cv2
|
| 3 |
import numpy as np
|
| 4 |
+
from ultralytics.utils.plotting import Annotator # Import YOLO's annotator
|
| 5 |
from CPR.keypoints import CocoKeypoints
|
| 6 |
|
| 7 |
+
|
| 8 |
class RoleClassifier:
|
| 9 |
"""Role classification and tracking using image processing"""
|
| 10 |
|
| 11 |
def __init__(self, proximity_thresh=0.3):
|
| 12 |
self.proximity_thresh = proximity_thresh
|
| 13 |
self.rescuer_id = None
|
| 14 |
+
self.rescuer_processed_results = None
|
| 15 |
+
self.patient_processed_results = None
|
|
|
|
| 16 |
|
| 17 |
+
def _calculate_verticality_score(self, bounding_box):
|
| 18 |
+
"""Calculate posture verticality score (0=horizontal, 1=vertical) using bounding box aspect ratio."""
|
| 19 |
try:
|
| 20 |
+
x1, y1, x2, y2 = bounding_box
|
| 21 |
+
width = abs(x2 - x1)
|
| 22 |
+
height = abs(y2 - y1)
|
| 23 |
+
|
| 24 |
+
# Handle edge cases with invalid dimensions
|
| 25 |
+
if width == 0 or height == 0:
|
| 26 |
+
return -1
|
| 27 |
+
|
| 28 |
+
return 1 if height > width else 0 # 1 for vertical, 0 for horizontal
|
| 29 |
|
| 30 |
+
except (TypeError, ValueError) as e:
|
| 31 |
+
print(f"Verticality score calculation error: {e}")
|
| 32 |
+
return -1
|
| 33 |
+
|
| 34 |
+
def _calculate_bounding_box_center(self, bounding_box):
|
| 35 |
+
"""Calculate the center coordinates of a bounding box.
|
| 36 |
+
"""
|
| 37 |
+
x1, y1, x2, y2 = bounding_box
|
| 38 |
+
return (x1 + x2) / 2, (y1 + y2) / 2
|
| 39 |
|
| 40 |
+
def _calculate_distance(self, point1, point2):
|
| 41 |
+
"""Calculate Euclidean distance between two points"""
|
| 42 |
+
return ((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2)**0.5
|
| 43 |
+
|
| 44 |
+
def _calculate_bbox_areas(self, rescuer_bbox, patient_bbox):
|
| 45 |
+
"""
|
| 46 |
+
Calculate bounding box areas for rescuer and patient.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
rescuer_bbox: [x1, y1, x2, y2] coordinates of rescuer's bounding box
|
| 50 |
+
patient_bbox: [x1, y1, x2, y2] coordinates of patient's bounding box
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
Tuple: (rescuer_area, patient_area) in pixels
|
| 54 |
+
"""
|
| 55 |
+
def compute_area(bbox):
|
| 56 |
+
if bbox is None:
|
| 57 |
+
return 0
|
| 58 |
+
width = bbox[2] - bbox[0] # x2 - x1
|
| 59 |
+
height = bbox[3] - bbox[1] # y2 - y1
|
| 60 |
+
return abs(width * height) # Absolute value to handle negative coordinates
|
| 61 |
+
|
| 62 |
+
return compute_area(rescuer_bbox), compute_area(patient_bbox)
|
| 63 |
+
|
| 64 |
+
def classify_roles(self, results, prev_rescuer_processed_results=None, prev_patient_processed_results=None):
|
| 65 |
+
"""
|
| 66 |
+
Classify roles of rescuer and patient based on detected keypoints and bounding boxes.
|
| 67 |
+
"""
|
| 68 |
|
| 69 |
+
processed_results = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
| 71 |
+
# Calculate combined area threshold if previous boxes exist
|
| 72 |
+
threshold = None
|
| 73 |
+
if prev_rescuer_processed_results and prev_patient_processed_results:
|
| 74 |
+
prev_rescuer_bbox = prev_rescuer_processed_results["bounding_box"]
|
| 75 |
+
prev_patient_bbox = prev_patient_processed_results["bounding_box"]
|
| 76 |
+
|
| 77 |
+
rescuer_area = (prev_rescuer_bbox[2]-prev_rescuer_bbox[0])*(prev_rescuer_bbox[3]-prev_rescuer_bbox[1])
|
| 78 |
+
patient_area = (prev_patient_bbox[2]-prev_patient_bbox[0])*(prev_patient_bbox[3]-prev_patient_bbox[1])
|
| 79 |
+
threshold = rescuer_area + patient_area
|
| 80 |
+
|
| 81 |
+
for i, (box, keypoints) in enumerate(zip(results.boxes.xywh.cpu().numpy(),
|
| 82 |
+
results.keypoints.xy.cpu().numpy())):
|
| 83 |
+
try:
|
| 84 |
+
# Convert box to [x1,y1,x2,y2] format
|
| 85 |
+
x_center, y_center, width, height = box
|
| 86 |
+
bounding_box = [
|
| 87 |
+
x_center - width/2, # x1
|
| 88 |
+
y_center - height/2, # y1
|
| 89 |
+
x_center + width/2, # x2
|
| 90 |
+
y_center + height/2 # y2
|
| 91 |
+
]
|
| 92 |
+
|
| 93 |
+
# Skip if box exceeds area threshold (when threshold exists)
|
| 94 |
+
if threshold:
|
| 95 |
+
box_area = width * height
|
| 96 |
+
if box_area > threshold * 1.2: # 20% tolerance
|
| 97 |
+
print(f"Filtered oversized box {i} (area: {box_area:.1f} > threshold: {threshold:.1f})")
|
| 98 |
+
continue
|
| 99 |
+
|
| 100 |
+
# Calculate features
|
| 101 |
+
verticality_score = self._calculate_verticality_score(bounding_box)
|
| 102 |
+
bounding_box_center = self._calculate_bounding_box_center(bounding_box)
|
| 103 |
+
|
| 104 |
+
# Store valid results
|
| 105 |
+
processed_results.append({
|
| 106 |
+
'original_index': i,
|
| 107 |
+
'bounding_box': bounding_box,
|
| 108 |
+
'bounding_box_center': bounding_box_center,
|
| 109 |
+
'verticality_score': verticality_score,
|
| 110 |
+
'keypoints': keypoints,
|
| 111 |
+
})
|
| 112 |
|
| 113 |
+
except Exception as e:
|
| 114 |
+
print(f"Error processing detection {i}: {e}")
|
| 115 |
+
continue
|
| 116 |
+
|
| 117 |
+
# Step 2: Identify the patient (horizontal posture)
|
| 118 |
+
patient_candidates = [res for res in processed_results
|
| 119 |
+
if res['verticality_score'] == 0]
|
| 120 |
+
|
| 121 |
+
# If more than one horizontal person, select person with lowest center (likely lying down)
|
| 122 |
+
if len(patient_candidates) > 1:
|
| 123 |
+
patient_candidates = sorted(patient_candidates,
|
| 124 |
+
key=lambda x: x['bounding_box_center'][1])[:1] # Sort by y-coordinate
|
| 125 |
+
|
| 126 |
+
patient = patient_candidates[0] if patient_candidates else None
|
| 127 |
+
|
| 128 |
+
# Step 3: Identify the rescuer
|
| 129 |
+
rescuer = None
|
| 130 |
+
if patient:
|
| 131 |
+
# Find vertical people who aren't the patient
|
| 132 |
+
potential_rescuers = [
|
| 133 |
+
res for res in processed_results
|
| 134 |
+
if res['verticality_score'] == 1
|
| 135 |
+
and res['original_index'] != patient['original_index']
|
| 136 |
+
]
|
| 137 |
|
| 138 |
+
if potential_rescuers:
|
| 139 |
+
# Select rescuer closest to patient
|
| 140 |
+
rescuer = min(potential_rescuers,
|
| 141 |
+
key=lambda x: self._calculate_distance(
|
| 142 |
+
x['bounding_box_center'],
|
| 143 |
+
patient['bounding_box_center']))
|
| 144 |
+
|
| 145 |
+
return rescuer, patient
|
| 146 |
+
|
| 147 |
+
def draw_rescuer_and_patient(self, frame):
|
| 148 |
+
# Create annotator object
|
| 149 |
+
annotator = Annotator(frame)
|
| 150 |
+
|
| 151 |
+
# Draw rescuer (A) with green box and keypoints
|
| 152 |
+
if self.rescuer_processed_results:
|
| 153 |
+
try:
|
| 154 |
+
x1, y1, x2, y2 = map(int, self.rescuer_processed_results["bounding_box"])
|
| 155 |
+
annotator.box_label((x1, y1, x2, y2), "Rescuer A", color=(0, 255, 0))
|
| 156 |
+
|
| 157 |
+
if "keypoints" in self.rescuer_processed_results:
|
| 158 |
+
keypoints = self.rescuer_processed_results["keypoints"]
|
| 159 |
+
annotator.kpts(keypoints, shape=frame.shape[:2])
|
| 160 |
+
except Exception as e:
|
| 161 |
+
print(f"Error drawing rescuer: {str(e)}")
|
| 162 |
|
| 163 |
+
# Draw patient (B) with red box and keypoints
|
| 164 |
+
if self.patient_processed_results:
|
| 165 |
+
try:
|
| 166 |
+
x1, y1, x2, y2 = map(int, self.patient_processed_results["bounding_box"])
|
| 167 |
+
annotator.box_label((x1, y1, x2, y2), "Patient B", color=(0, 0, 255))
|
| 168 |
+
|
| 169 |
+
if "keypoints" in self.patient_processed_results:
|
| 170 |
+
keypoints = self.patient_processed_results["keypoints"]
|
| 171 |
+
annotator.kpts(keypoints, shape=frame.shape[:2])
|
| 172 |
+
except Exception as e:
|
| 173 |
+
print(f"Error drawing patient: {str(e)}")
|
| 174 |
|
| 175 |
+
return annotator.result()
|
| 176 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
CPR/shoulders_analyzer.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# analyzers.py
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
from CPR.keypoints import CocoKeypoints
|
| 5 |
+
|
| 6 |
+
class ShouldersAnalyzer:
|
| 7 |
+
"""Analyzes shoulder distances and posture"""
|
| 8 |
+
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.shoulder_distance = None
|
| 11 |
+
self.shoulder_distance_history = []
|
| 12 |
+
|
| 13 |
+
def calculate_shoulder_distance(self, rescuer_keypoints):
|
| 14 |
+
"""Calculate and store shoulder distance"""
|
| 15 |
+
if rescuer_keypoints is None:
|
| 16 |
+
return
|
| 17 |
+
|
| 18 |
+
try:
|
| 19 |
+
left = rescuer_keypoints[CocoKeypoints.LEFT_SHOULDER.value]
|
| 20 |
+
right = rescuer_keypoints[CocoKeypoints.RIGHT_SHOULDER.value]
|
| 21 |
+
|
| 22 |
+
distance = np.linalg.norm(np.array(left) - np.array(right))
|
| 23 |
+
|
| 24 |
+
return distance
|
| 25 |
+
except Exception as e:
|
| 26 |
+
print(f"Shoulder distance error: {e}")
|
| 27 |
+
return
|
| 28 |
+
|
| 29 |
+
def reset_shoulder_distances(self):
|
| 30 |
+
"""Reset shoulder distances"""
|
| 31 |
+
self.shoulder_distance_history = []
|
CPR/wrists_midpoint_analyzer.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
from CPR.keypoints import CocoKeypoints
|
| 4 |
+
|
| 5 |
+
class WristsMidpointAnalyzer:
|
| 6 |
+
"""Analyzes and tracks wrist midpoints for rescuer"""
|
| 7 |
+
|
| 8 |
+
def __init__(self, allowed_distance_between_wrists=170):
|
| 9 |
+
self.allowed_distance_between_wrists = allowed_distance_between_wrists
|
| 10 |
+
self.midpoint = None
|
| 11 |
+
self.midpoint_history = []
|
| 12 |
+
|
| 13 |
+
def detect_wrists_midpoint(self, rescuer_keypoints):
|
| 14 |
+
"""Calculate midpoint between wrists in pixel coordinates"""
|
| 15 |
+
try:
|
| 16 |
+
if rescuer_keypoints is None:
|
| 17 |
+
return None
|
| 18 |
+
|
| 19 |
+
# Get wrist coordinates
|
| 20 |
+
lw = rescuer_keypoints[CocoKeypoints.LEFT_WRIST.value]
|
| 21 |
+
rw = rescuer_keypoints[CocoKeypoints.RIGHT_WRIST.value]
|
| 22 |
+
|
| 23 |
+
# If the distance between wrists is too large, return None
|
| 24 |
+
distance = np.linalg.norm(np.array(lw) - np.array(rw))
|
| 25 |
+
if distance > self.allowed_distance_between_wrists:
|
| 26 |
+
return None
|
| 27 |
+
|
| 28 |
+
# Calculate midpoint
|
| 29 |
+
midpoint = (
|
| 30 |
+
int((lw[0] + rw[0]) / 2),
|
| 31 |
+
int((lw[1] + rw[1]) / 2)
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
return midpoint
|
| 35 |
+
|
| 36 |
+
except Exception as e:
|
| 37 |
+
print(f"Midpoint tracking error: {e}")
|
| 38 |
+
return None
|
| 39 |
+
|
| 40 |
+
def draw_midpoint(self, frame):
|
| 41 |
+
"""Visualize the midpoint on frame"""
|
| 42 |
+
|
| 43 |
+
if self.midpoint is None:
|
| 44 |
+
return frame
|
| 45 |
+
|
| 46 |
+
try:
|
| 47 |
+
# Draw visualization
|
| 48 |
+
cv2.circle(frame, self.midpoint, 8, (0, 255, 0), -1)
|
| 49 |
+
cv2.putText(
|
| 50 |
+
frame, "MIDPOINT",
|
| 51 |
+
(self.midpoint[0] + 5, self.midpoint[1] - 10),
|
| 52 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
return frame
|
| 56 |
+
except Exception as e:
|
| 57 |
+
print(f"Midpoint drawing error: {e}")
|
| 58 |
+
return frame
|
| 59 |
+
|
| 60 |
+
def reset_midpoint_history(self):
|
| 61 |
+
"""Reset midpoint history"""
|
| 62 |
+
self.midpoint_history = []
|
CPRRealTime/analysis_socket_server.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import socket
|
| 2 |
+
import json
|
| 3 |
+
from threading import Thread
|
| 4 |
+
from queue import Queue
|
| 5 |
+
import threading
|
| 6 |
+
from CPRRealTime.logging_config import cpr_logger
|
| 7 |
+
import queue
|
| 8 |
+
|
| 9 |
+
class AnalysisSocketServer:
|
| 10 |
+
def __init__(self, host='localhost', port=5000):
|
| 11 |
+
self.host = host
|
| 12 |
+
self.port = port
|
| 13 |
+
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| 14 |
+
self.conn = None
|
| 15 |
+
self.running = False
|
| 16 |
+
self.warning_queue = Queue()
|
| 17 |
+
self.connection_event = threading.Event()
|
| 18 |
+
cpr_logger.info(f"[SOCKET] Server initialized on {host}:{port}")
|
| 19 |
+
|
| 20 |
+
def start_server(self):
|
| 21 |
+
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
| 22 |
+
self.sock.bind((self.host, self.port))
|
| 23 |
+
self.sock.listen()
|
| 24 |
+
self.running = True
|
| 25 |
+
Thread(target=self._accept_connections, daemon=True).start()
|
| 26 |
+
|
| 27 |
+
def _accept_connections(self):
|
| 28 |
+
while self.running:
|
| 29 |
+
try:
|
| 30 |
+
self.conn, addr = self.sock.accept()
|
| 31 |
+
cpr_logger.info(f"[SOCKET] Connected by {addr}")
|
| 32 |
+
self.connection_event.set() # Signal that connection was made
|
| 33 |
+
Thread(target=self._handle_client, args=(self.conn,), daemon=True).start()
|
| 34 |
+
except Exception as e:
|
| 35 |
+
#! Not an error
|
| 36 |
+
cpr_logger.error(f"[SOCKET] Connection error: {str(e)}")
|
| 37 |
+
|
| 38 |
+
def wait_for_connection(self, timeout=None):
|
| 39 |
+
"""Block until a client connects"""
|
| 40 |
+
#^ Set as an error for cleaner logging purposes
|
| 41 |
+
cpr_logger.error("[SOCKET] Waiting for client connection...")
|
| 42 |
+
self.connection_event.clear() # Reset the event
|
| 43 |
+
return self.connection_event.wait(timeout)
|
| 44 |
+
|
| 45 |
+
def _handle_client(self, conn):
|
| 46 |
+
while self.running:
|
| 47 |
+
try:
|
| 48 |
+
# Block until a warning is available (reduces CPU usage)
|
| 49 |
+
warnings = self.warning_queue.get(block=True, timeout=0.1)
|
| 50 |
+
serialized = json.dumps(warnings) + "\n"
|
| 51 |
+
conn.sendall(serialized.encode('utf-8'))
|
| 52 |
+
except queue.Empty:
|
| 53 |
+
continue # Timeout allows checking self.running periodically
|
| 54 |
+
except (BrokenPipeError, ConnectionResetError):
|
| 55 |
+
cpr_logger.error("[SOCKET] Client disconnected")
|
| 56 |
+
break
|
| 57 |
+
except Exception as e:
|
| 58 |
+
cpr_logger.error(f"[SOCKET] Error: {str(e)}")
|
| 59 |
+
break
|
| 60 |
+
conn.close()
|
| 61 |
+
|
| 62 |
+
def stop_server(self):
|
| 63 |
+
self.running = False
|
| 64 |
+
self.sock.close()
|
| 65 |
+
cpr_logger.info("[SOCKET] Server stopped")
|
CPRRealTime/chest_initializer.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
from CPRRealTime.keypoints import CocoKeypoints
|
| 4 |
+
from CPRRealTime.logging_config import cpr_logger
|
| 5 |
+
|
| 6 |
+
class ChestInitializer:
|
| 7 |
+
"""Handles chest point detection with validations in estimation."""
|
| 8 |
+
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.chest_params = None
|
| 11 |
+
self.chest_params_history = []
|
| 12 |
+
self.expected_chest_params = None
|
| 13 |
+
|
| 14 |
+
def estimate_chest_region(self, keypoints, bounding_box, frame_width, frame_height):
|
| 15 |
+
"""Estimate and validate chest region. Returns (cx, cy, cw, ch) or None."""
|
| 16 |
+
try:
|
| 17 |
+
# Unpack bounding box and calculate shoulder dimensions
|
| 18 |
+
bbox_x1, bbox_y1, bbox_x2, bbox_y2 = bounding_box
|
| 19 |
+
bbox_delta_y = abs(bbox_y2 - bbox_y1)
|
| 20 |
+
|
| 21 |
+
# Keypoints for shoulders
|
| 22 |
+
left_shoulder = keypoints[CocoKeypoints.LEFT_SHOULDER.value]
|
| 23 |
+
right_shoulder = keypoints[CocoKeypoints.RIGHT_SHOULDER.value]
|
| 24 |
+
|
| 25 |
+
# Midpoints calculation
|
| 26 |
+
shoulder_center = np.array([(left_shoulder[0] + right_shoulder[0]) / 2,
|
| 27 |
+
(left_shoulder[1] + right_shoulder[1]) / 2])
|
| 28 |
+
|
| 29 |
+
#& Handing different patient positions
|
| 30 |
+
# If the x-coordinate shoulder center is closer to that of the Bottom-Right bbox corner (2)
|
| 31 |
+
# then the orientation is "right"
|
| 32 |
+
# If the x-coordinate shoulder center is closer to that of the Top-Left bbox corner (1)
|
| 33 |
+
# then the orientation is "left"
|
| 34 |
+
|
| 35 |
+
if abs(shoulder_center[0] - bbox_x2) < abs(shoulder_center[0] - bbox_x1): # Orientation is "right"
|
| 36 |
+
chest_center_from_shoulder_x = shoulder_center[0] - 0.3 * bbox_delta_y
|
| 37 |
+
chest_center_from_shoulder_y = shoulder_center[1] - 0.1 * bbox_delta_y
|
| 38 |
+
chest_center_from_shoulder = np.array([chest_center_from_shoulder_x, chest_center_from_shoulder_y])
|
| 39 |
+
else: # Orientation is "left"
|
| 40 |
+
chest_center_from_shoulder_x = shoulder_center[0] + 1.0 * bbox_delta_y
|
| 41 |
+
chest_center_from_shoulder_y = shoulder_center[1] - 0.1 * bbox_delta_y
|
| 42 |
+
chest_center_from_shoulder = np.array([chest_center_from_shoulder_x, chest_center_from_shoulder_y])
|
| 43 |
+
|
| 44 |
+
# Chest dimensions (85% of shoulder width, 40% height)
|
| 45 |
+
chest_dx = bbox_delta_y * 0.8
|
| 46 |
+
chest_dy = bbox_delta_y * 1.75
|
| 47 |
+
|
| 48 |
+
# Calculate region coordinates
|
| 49 |
+
x1 = chest_center_from_shoulder[0] - chest_dx / 2
|
| 50 |
+
y1 = chest_center_from_shoulder[1] - chest_dy / 2
|
| 51 |
+
x2 = chest_center_from_shoulder[0] + chest_dx / 2
|
| 52 |
+
y2 = chest_center_from_shoulder[1] + chest_dy / 2
|
| 53 |
+
|
| 54 |
+
# Clamp to frame boundaries
|
| 55 |
+
x1 = max(0, min(x1, frame_width - 1))
|
| 56 |
+
y1 = max(0, min(y1, frame_height - 1))
|
| 57 |
+
x2 = max(0, min(x2, frame_width - 1))
|
| 58 |
+
y2 = max(0, min(y2, frame_height - 1))
|
| 59 |
+
|
| 60 |
+
# Check validity
|
| 61 |
+
if x2 <= x1 or y2 <= y1:
|
| 62 |
+
return None
|
| 63 |
+
|
| 64 |
+
# Adjusted parameters
|
| 65 |
+
cx = (x1 + x2) / 2
|
| 66 |
+
cy = (y1 + y2) / 2
|
| 67 |
+
cw = x2 - x1
|
| 68 |
+
ch = y2 - y1
|
| 69 |
+
|
| 70 |
+
return (cx, cy, cw, ch)
|
| 71 |
+
|
| 72 |
+
except (IndexError, TypeError, ValueError) as e:
|
| 73 |
+
cpr_logger.error(f"Chest estimation error: {e}")
|
| 74 |
+
return None
|
| 75 |
+
|
| 76 |
+
def estimate_chest_region_weighted_avg(self, frame_width, frame_height, window_size=60, min_samples=3):
|
| 77 |
+
"""
|
| 78 |
+
Calculate stabilized chest parameters using weighted averaging with boundary checks.
|
| 79 |
+
|
| 80 |
+
Args:
|
| 81 |
+
self.chest_params_history: List of recent chest parameters [(cx, cy, cw, ch), ...]
|
| 82 |
+
frame_width: Width of the video frame
|
| 83 |
+
frame_height: Height of the video frame
|
| 84 |
+
window_size: Number of recent frames to consider (default: 5)
|
| 85 |
+
min_samples: Minimum valid samples required (default: 3)
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
Tuple of (cx, cy, cw, ch) as integers within frame boundaries,
|
| 89 |
+
or None if insufficient data or invalid rectangle
|
| 90 |
+
"""
|
| 91 |
+
if not self.chest_params_history:
|
| 92 |
+
return None
|
| 93 |
+
|
| 94 |
+
# Filter out None values and get recent frames
|
| 95 |
+
valid_history = [h for h in self.chest_params_history[-window_size:] if h is not None]
|
| 96 |
+
|
| 97 |
+
if len(valid_history) < min_samples:
|
| 98 |
+
return None
|
| 99 |
+
|
| 100 |
+
# Convert to numpy array (preserve floating-point precision)
|
| 101 |
+
history_array = np.array(valid_history, dtype=np.float32)
|
| 102 |
+
|
| 103 |
+
# Exponential weights (stronger emphasis on recent frames)
|
| 104 |
+
weights = np.exp(np.linspace(1, 3, len(history_array)))
|
| 105 |
+
weights /= weights.sum()
|
| 106 |
+
|
| 107 |
+
try:
|
| 108 |
+
# Calculate weighted average in float space
|
| 109 |
+
cx, cy, cw, ch = np.average(history_array, axis=0, weights=weights)
|
| 110 |
+
|
| 111 |
+
# Convert to rectangle coordinates (still floating point)
|
| 112 |
+
x1 = max(0.0, cx - cw/2)
|
| 113 |
+
y1 = max(0.0, cy - ch/2)
|
| 114 |
+
x2 = min(float(frame_width - 1), cx + cw/2)
|
| 115 |
+
y2 = min(float(frame_height - 1), cy + ch/2)
|
| 116 |
+
|
| 117 |
+
# Only round to integers after all calculations
|
| 118 |
+
x1, y1, x2, y2 = map(round, [x1, y1, x2, y2])
|
| 119 |
+
|
| 120 |
+
# Validate rectangle
|
| 121 |
+
if x2 <= x1 or y2 <= y1:
|
| 122 |
+
return None
|
| 123 |
+
|
| 124 |
+
return (
|
| 125 |
+
(x1 + x2) // 2, # cx
|
| 126 |
+
(y1 + y2) // 2, # cy
|
| 127 |
+
x2 - x1, # cw
|
| 128 |
+
y2 - y1 # ch
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
except Exception as e:
|
| 132 |
+
cpr_logger.error(f"Chest region estimation error: {e}")
|
| 133 |
+
return None
|
| 134 |
+
|
| 135 |
+
def draw_expected_chest_region(self, frame):
|
| 136 |
+
"""Draws the chest region without validation."""
|
| 137 |
+
if self.expected_chest_params is None:
|
| 138 |
+
return frame
|
| 139 |
+
|
| 140 |
+
cx, cy, cw, ch = self.expected_chest_params
|
| 141 |
+
x1 = int(cx - cw / 2)
|
| 142 |
+
y1 = int(cy - ch / 2)
|
| 143 |
+
x2 = int(cx + cw / 2)
|
| 144 |
+
y2 = int(cy + ch / 2)
|
| 145 |
+
|
| 146 |
+
# Draw rectangle and center
|
| 147 |
+
cv2.rectangle(frame, (x1, y1), (x2, y2), (128, 128, 0), 5)
|
| 148 |
+
|
| 149 |
+
cv2.circle(frame, (int(cx), int(cy)), 8, (128, 128, 0), -1)
|
| 150 |
+
|
| 151 |
+
cv2.putText(frame, "EXPECTED CHEST", (x1, max(10, y1 - 5)),
|
| 152 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (128, 128, 0), 2)
|
| 153 |
+
|
| 154 |
+
return frame
|
CPRRealTime/client.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import socket
|
| 2 |
+
import json
|
| 3 |
+
|
| 4 |
+
from CPRRealTime.logging_config import cpr_logger
|
| 5 |
+
|
| 6 |
+
HOST = 'localhost' # The server's hostname or IP address
|
| 7 |
+
PORT = 5000 # The port used by the server
|
| 8 |
+
|
| 9 |
+
#! Not an error
|
| 10 |
+
|
| 11 |
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
| 12 |
+
s.connect((HOST, PORT))
|
| 13 |
+
#^ Set as an error for cleaner logging purposes
|
| 14 |
+
cpr_logger.error(f"Connected to {HOST}:{PORT}")
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
while True:
|
| 18 |
+
data = s.recv(1024)
|
| 19 |
+
if not data:
|
| 20 |
+
break
|
| 21 |
+
|
| 22 |
+
# Split messages (in case multiple JSONs in buffer)
|
| 23 |
+
for line in data.decode('utf-8').split('\n'):
|
| 24 |
+
if line.strip():
|
| 25 |
+
try:
|
| 26 |
+
warnings = json.loads(line)
|
| 27 |
+
cpr_logger.error("\nReceived warnings:")
|
| 28 |
+
cpr_logger.error(f"Status: {warnings['status']}")
|
| 29 |
+
cpr_logger.error(f"Posture Warnings: {warnings['posture_warnings']}")
|
| 30 |
+
cpr_logger.error(f"Rate/Depth Warnings: {warnings['rate_and_depth_warnings']}")
|
| 31 |
+
except json.JSONDecodeError:
|
| 32 |
+
cpr_logger.error("Received invalid JSON")
|
| 33 |
+
except KeyboardInterrupt:
|
| 34 |
+
cpr_logger.error("Disconnecting...")
|
CPRRealTime/graph_plotter.py
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import matplotlib.pyplot as plt
|
| 3 |
+
import sys
|
| 4 |
+
import cv2
|
| 5 |
+
from CPRRealTime.logging_config import cpr_logger
|
| 6 |
+
from matplotlib.ticker import MultipleLocator
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
class GraphPlotter:
|
| 10 |
+
"""Class to plot graphs for various metrics"""
|
| 11 |
+
|
| 12 |
+
def __init__(self):
|
| 13 |
+
self.chunks_y_preprocessed = []
|
| 14 |
+
self.chunks_peaks = []
|
| 15 |
+
self.chunks_depth = []
|
| 16 |
+
self.chunks_rate = []
|
| 17 |
+
self.chunks_start_and_end_indices = []
|
| 18 |
+
self.posture_warnings_regions = []
|
| 19 |
+
self.sampling_interval_in_frames = 0
|
| 20 |
+
self.fps = None
|
| 21 |
+
|
| 22 |
+
self.error_symbols = {
|
| 23 |
+
"Right arm bent!": ('o', '#A61D1D'), # circle
|
| 24 |
+
"Left arm bent!": ('s', '#A61D1D'), # square
|
| 25 |
+
"Left hand not on chest!": ('P', '#A61D1D'), # plus
|
| 26 |
+
"Right hand not on chest!": ('*', '#A61D1D'), # star
|
| 27 |
+
"Both hands not on chest!": ('D', '#A61D1D') # diamond
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
self.annotation_y_level = None # Will store our target y-position
|
| 31 |
+
|
| 32 |
+
def _assign_graph_data(self, chunks_y_preprocessed, chunks_peaks, chunks_depth, chunks_rate, chunks_start_and_end_indices, posture_warnings_regions, sampling_interval_in_frames, fps):
|
| 33 |
+
"""Assign data members for the class"""
|
| 34 |
+
self.chunks_y_preprocessed = chunks_y_preprocessed
|
| 35 |
+
self.chunks_peaks = chunks_peaks
|
| 36 |
+
self.chunks_depth = chunks_depth
|
| 37 |
+
self.chunks_rate = chunks_rate
|
| 38 |
+
self.chunks_start_and_end_indices = chunks_start_and_end_indices
|
| 39 |
+
self.posture_warnings_regions = posture_warnings_regions
|
| 40 |
+
self.sampling_interval_in_frames = sampling_interval_in_frames
|
| 41 |
+
self.fps = fps # Store FPS
|
| 42 |
+
|
| 43 |
+
cpr_logger.info(f"[Graph Plotter] Data members assigned with {len(self.chunks_start_and_end_indices)} chunks and {len(self.posture_warnings_regions)} error regions for a sampling interval of {self.sampling_interval_in_frames} frames and FPS {self.fps}")
|
| 44 |
+
|
| 45 |
+
def _plot_single_chunk(self, ax, chunk, idx, prev_last_point, prev_chunk_end):
|
| 46 |
+
(start_frame, end_frame), depth, rate = chunk
|
| 47 |
+
# Convert frames to time
|
| 48 |
+
chunk_frames = np.arange(start_frame, end_frame + 1, self.sampling_interval_in_frames)
|
| 49 |
+
chunk_times = chunk_frames / self.fps # Convert to seconds
|
| 50 |
+
y_preprocessed = self.chunks_y_preprocessed[idx]
|
| 51 |
+
peaks = self.chunks_peaks[idx]
|
| 52 |
+
|
| 53 |
+
# Check if chunks are contiguous and need connection (frame-based logic)
|
| 54 |
+
if (prev_chunk_end is not None and
|
| 55 |
+
start_frame == prev_chunk_end + self.sampling_interval_in_frames and
|
| 56 |
+
prev_last_point is not None):
|
| 57 |
+
|
| 58 |
+
# Convert connection points to seconds
|
| 59 |
+
connect_start = prev_chunk_end / self.fps
|
| 60 |
+
connect_end = start_frame / self.fps
|
| 61 |
+
connect_times = [connect_start, connect_end]
|
| 62 |
+
|
| 63 |
+
cpr_logger.info(f"[Graph Plotter] Connecting chunk {idx+1} to previous chunk (time {connect_start:.2f}-{connect_end:.2f}s)")
|
| 64 |
+
ax.plot(connect_times, [prev_last_point['y_preprocessed'], y_preprocessed[0]],
|
| 65 |
+
color="#2F5597", linewidth=2.5)
|
| 66 |
+
|
| 67 |
+
# Plot current chunk data
|
| 68 |
+
cpr_logger.info(f"[Graph Plotter] Plotting chunk {idx+1} (time {chunk_times[0]:.2f}-{chunk_times[-1]:.2f}s)")
|
| 69 |
+
smooth_label = "Motion" if idx == 0 else ""
|
| 70 |
+
peaks_label = "Peaks" if idx == 0 else ""
|
| 71 |
+
|
| 72 |
+
# Updated motion plot
|
| 73 |
+
ax.plot(chunk_times, y_preprocessed,
|
| 74 |
+
color="#2F5597", linewidth=2.5,
|
| 75 |
+
marker='o', markersize=4,
|
| 76 |
+
markerfacecolor='#2F5597', markeredgecolor='#2F5597',
|
| 77 |
+
label=smooth_label)
|
| 78 |
+
|
| 79 |
+
# Updated peaks
|
| 80 |
+
if peaks.size > 0:
|
| 81 |
+
ax.plot(chunk_times[peaks], y_preprocessed[peaks],
|
| 82 |
+
"x", color="#ED7D31", markersize=8,
|
| 83 |
+
label=peaks_label)
|
| 84 |
+
|
| 85 |
+
# Annotate chunk metrics (time-based)
|
| 86 |
+
if (depth is not None and rate is not None) and (depth > 0 and rate > 0):
|
| 87 |
+
mid_time = (start_frame + end_frame) / (2 * self.fps)
|
| 88 |
+
cpr_logger.info(f"[Graph Plotter] Chunk {idx+1} metrics: {depth:.1f}cm depth, {rate:.1f}cpm rate")
|
| 89 |
+
|
| 90 |
+
# Calculate or use stored annotation y-level
|
| 91 |
+
if self.annotation_y_level is None:
|
| 92 |
+
# For first chunk, calculate midpoint between min and max of y_preprocessed
|
| 93 |
+
y_range = np.max(y_preprocessed) - np.min(y_preprocessed)
|
| 94 |
+
self.annotation_y_level = np.min(y_preprocessed) + y_range * 0.5 # 70% up from bottom
|
| 95 |
+
cpr_logger.info(f"[Graph Plotter] Setting annotation y-level to {self.annotation_y_level:.2f}")
|
| 96 |
+
|
| 97 |
+
# Updated annotation box using consistent y-level
|
| 98 |
+
ax.annotate(f"Depth: {depth:.1f}cm\nRate: {rate:.1f}cpm",
|
| 99 |
+
xy=(mid_time, self.annotation_y_level),
|
| 100 |
+
xytext=(0, 10), textcoords='offset points',
|
| 101 |
+
ha='center', va='bottom', fontsize=9,
|
| 102 |
+
bbox=dict(boxstyle='round,pad=0.5',
|
| 103 |
+
fc='#F2F2F2', ec='#595959', alpha=0.8))
|
| 104 |
+
|
| 105 |
+
return {'y_preprocessed': y_preprocessed[-1]}, end_frame
|
| 106 |
+
|
| 107 |
+
def _plot_error_regions(self, ax, computed_error_regions):
|
| 108 |
+
"""Visualize error regions with adaptive symbol sizing"""
|
| 109 |
+
cpr_logger.info("[Graph Plotter] Rendering error regions:")
|
| 110 |
+
|
| 111 |
+
# Size parameters
|
| 112 |
+
target_width_ratio = 0.7 # Max 80% of region width
|
| 113 |
+
legend_size = 80 # Fixed legend symbol size (points²)
|
| 114 |
+
|
| 115 |
+
legend_handles = []
|
| 116 |
+
y_mid = np.mean(ax.get_ylim())
|
| 117 |
+
|
| 118 |
+
# Get figure dimensions for size conversion
|
| 119 |
+
fig = ax.figure
|
| 120 |
+
fig_width_points = fig.get_figwidth() * fig.dpi
|
| 121 |
+
x_min, x_max = ax.get_xlim()
|
| 122 |
+
data_range = x_max - x_min
|
| 123 |
+
points_per_second = fig_width_points / data_range
|
| 124 |
+
|
| 125 |
+
for idx, (start_sec, end_sec) in enumerate(computed_error_regions):
|
| 126 |
+
region_width = end_sec - start_sec
|
| 127 |
+
region_data = self.posture_warnings_regions[idx]
|
| 128 |
+
warnings = region_data.get('posture_warnings', [])
|
| 129 |
+
|
| 130 |
+
# Calculate max allowed width in data units (seconds)
|
| 131 |
+
max_data_width = region_width * target_width_ratio
|
| 132 |
+
|
| 133 |
+
# Convert legend size to data units
|
| 134 |
+
legend_data_width = (np.sqrt(legend_size) / points_per_second)
|
| 135 |
+
|
| 136 |
+
# Determine final symbol width (data units)
|
| 137 |
+
symbol_data_width = min(legend_data_width, max_data_width)
|
| 138 |
+
|
| 139 |
+
# Convert back to points² for matplotlib
|
| 140 |
+
symbol_point_width = symbol_data_width * points_per_second
|
| 141 |
+
symbol_size = symbol_point_width ** 2
|
| 142 |
+
|
| 143 |
+
for error in warnings:
|
| 144 |
+
if error in self.error_symbols:
|
| 145 |
+
marker, color = self.error_symbols[error]
|
| 146 |
+
|
| 147 |
+
ax.scatter(
|
| 148 |
+
x=(start_sec + end_sec)/2,
|
| 149 |
+
y=y_mid,
|
| 150 |
+
s=symbol_size,
|
| 151 |
+
marker=marker,
|
| 152 |
+
color=color,
|
| 153 |
+
alpha=0.7,
|
| 154 |
+
edgecolors='black',
|
| 155 |
+
linewidths=0.5,
|
| 156 |
+
zorder=5
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
# Create legend entry once
|
| 160 |
+
if not any(error == h.get_label() for h in legend_handles):
|
| 161 |
+
legend_handles.append(
|
| 162 |
+
ax.scatter([], [],
|
| 163 |
+
s=legend_size,
|
| 164 |
+
marker=marker,
|
| 165 |
+
color=color,
|
| 166 |
+
edgecolors='black',
|
| 167 |
+
linewidths=0.5,
|
| 168 |
+
alpha=0.7,
|
| 169 |
+
label=error)
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
# Updated error region fill
|
| 173 |
+
ax.axvspan(start_sec, end_sec,
|
| 174 |
+
color='#FCE4D6', alpha=0.3, zorder=1)
|
| 175 |
+
|
| 176 |
+
if not ax.get_xlabel():
|
| 177 |
+
ax.set_xlabel("Time (seconds)", fontsize=10)
|
| 178 |
+
if not ax.get_ylabel():
|
| 179 |
+
ax.set_ylabel("Signal Value", fontsize=10)
|
| 180 |
+
|
| 181 |
+
return legend_handles
|
| 182 |
+
|
| 183 |
+
def plot_motion_curve_for_all_chunks(self, chunks_y_preprocessed, chunks_peaks, chunks_depth, chunks_rate, chunks_start_and_end_indices, posture_warnings_regions, sampling_interval_in_frames, fps, plot_output_path):
|
| 184 |
+
"""Plot combined analysis with connected chunks and proper error regions"""
|
| 185 |
+
|
| 186 |
+
self._assign_graph_data(chunks_y_preprocessed, chunks_peaks, chunks_depth, chunks_rate, chunks_start_and_end_indices, posture_warnings_regions, sampling_interval_in_frames, fps)
|
| 187 |
+
cpr_logger.info("[Graph Plotter] Starting to plot motion curve for all chunks")
|
| 188 |
+
|
| 189 |
+
# Create figure even if there's only error regions to plot
|
| 190 |
+
plt.figure(figsize=(16, 8))
|
| 191 |
+
ax = plt.gca()
|
| 192 |
+
ax.xaxis.set_major_locator(MultipleLocator(5))
|
| 193 |
+
|
| 194 |
+
# Plot CPR chunks if they exist
|
| 195 |
+
if self.chunks_start_and_end_indices:
|
| 196 |
+
sorted_chunks = sorted(zip(self.chunks_start_and_end_indices,
|
| 197 |
+
self.chunks_depth,
|
| 198 |
+
self.chunks_rate),
|
| 199 |
+
key=lambda x: x[0][0])
|
| 200 |
+
cpr_logger.info(f"[Graph Plotter] Processing {len(sorted_chunks)} CPR chunks")
|
| 201 |
+
|
| 202 |
+
prev_last_point = None
|
| 203 |
+
prev_chunk_end = None
|
| 204 |
+
|
| 205 |
+
for idx, chunk in enumerate(sorted_chunks):
|
| 206 |
+
cpr_logger.info(f"[Graph Plotter] Rendering chunk {idx+1}/{len(sorted_chunks)}")
|
| 207 |
+
prev_last_point, prev_chunk_end = self._plot_single_chunk(ax, chunk, idx, prev_last_point, prev_chunk_end)
|
| 208 |
+
|
| 209 |
+
self._print_analysis_details(sorted_chunks)
|
| 210 |
+
else:
|
| 211 |
+
cpr_logger.info("[Graph Plotter] No chunk data available for plotting")
|
| 212 |
+
# Set reasonable default axis if only plotting errors
|
| 213 |
+
ax.set_ylim(0, 100) # Example default Y-axis range for position
|
| 214 |
+
|
| 215 |
+
# Always plot error regions if they exist
|
| 216 |
+
computed_error_regions = [(er['start_frame']/self.fps, er['end_frame']/self.fps)
|
| 217 |
+
for er in self.posture_warnings_regions]
|
| 218 |
+
|
| 219 |
+
# In the "Configure remaining elements" section (replace existing legend code):
|
| 220 |
+
handles, labels = ax.get_legend_handles_labels()
|
| 221 |
+
|
| 222 |
+
# Collect error handles from _plot_error_regions (modified to return them)
|
| 223 |
+
error_legend_handles = self._plot_error_regions(ax, computed_error_regions)
|
| 224 |
+
|
| 225 |
+
# Merge both sets of handles/labels
|
| 226 |
+
if error_legend_handles:
|
| 227 |
+
handles += error_legend_handles
|
| 228 |
+
labels += [h.get_label() for h in error_legend_handles]
|
| 229 |
+
|
| 230 |
+
# Remove duplicate labels
|
| 231 |
+
unique = [(h, l) for i, (h, l) in enumerate(zip(handles, labels)) if l not in labels[:i]]
|
| 232 |
+
|
| 233 |
+
# Create single horizontal legend at bottom
|
| 234 |
+
if unique:
|
| 235 |
+
ax.legend(
|
| 236 |
+
*zip(*unique),
|
| 237 |
+
loc='upper center',
|
| 238 |
+
bbox_to_anchor=(0.5, -0.08),
|
| 239 |
+
ncol=len(unique),
|
| 240 |
+
fontsize=8,
|
| 241 |
+
handletextpad=0.3,
|
| 242 |
+
columnspacing=1.5,
|
| 243 |
+
framealpha=0.9,
|
| 244 |
+
borderpad=0.7
|
| 245 |
+
)
|
| 246 |
+
plt.tight_layout(rect=[0, 0.025, 1, 1])
|
| 247 |
+
|
| 248 |
+
plt.xlabel("Time (seconds)")
|
| 249 |
+
plt.ylabel("Vertical Position (px)")
|
| 250 |
+
plt.title("Complete CPR Analysis with Metrics", pad=20) # Added pad parameter
|
| 251 |
+
|
| 252 |
+
plt.grid(True)
|
| 253 |
+
cpr_logger.info(f"\n[Graph Plotter] Finalizing plot layout")
|
| 254 |
+
|
| 255 |
+
# Adjust tight_layout with additional padding
|
| 256 |
+
plt.tight_layout(rect=[0, 0.025, 1, 0.95]) # Reduced top from 1 to 0.95 to make space
|
| 257 |
+
|
| 258 |
+
if plot_output_path:
|
| 259 |
+
# Ensure directory exists
|
| 260 |
+
os.makedirs(os.path.dirname(plot_output_path), exist_ok=True)
|
| 261 |
+
plt.savefig(plot_output_path, dpi=300, bbox_inches='tight')
|
| 262 |
+
cpr_logger.info(f"[Graph Plotter] Plot saved to {plot_output_path}")
|
| 263 |
+
|
| 264 |
+
plt.show()
|
| 265 |
+
cpr_logger.info("[Graph Plotter] Plot display complete")
|
| 266 |
+
|
| 267 |
+
def _print_analysis_details(self, sorted_chunks):
|
| 268 |
+
"""Combined helper for printing chunks and error regions in seconds"""
|
| 269 |
+
cpr_logger.info(f"\n\n=== CPR Chunk Analysis ===")
|
| 270 |
+
display_idx = 0 # Separate counter for displayed indices
|
| 271 |
+
|
| 272 |
+
# Convert frame numbers to seconds using video FPS
|
| 273 |
+
fps = self.fps # Get FPS from class instance
|
| 274 |
+
|
| 275 |
+
for ((start_frame, end_frame), depth, rate) in sorted_chunks:
|
| 276 |
+
# Skip chunks with both values at 0
|
| 277 |
+
if depth == 0 and rate == 0:
|
| 278 |
+
continue
|
| 279 |
+
|
| 280 |
+
# Convert frames to seconds
|
| 281 |
+
start_sec = start_frame / fps
|
| 282 |
+
end_sec = end_frame / fps
|
| 283 |
+
duration_sec = (end_frame - start_frame + 1) / fps # +1 to include both endpoints
|
| 284 |
+
|
| 285 |
+
cpr_logger.info(f"[Graph Plotter] Chunk {display_idx+1}: "
|
| 286 |
+
f"Time {start_sec:.2f}s - {end_sec:.2f}s ({duration_sec:.2f}s), "
|
| 287 |
+
f"Depth: {depth:.1f}cm, Rate: {rate:.1f}cpm")
|
| 288 |
+
|
| 289 |
+
display_idx += 1
|
| 290 |
+
|
| 291 |
+
cpr_logger.info(f"\n\n=== Error Region Analysis ===")
|
| 292 |
+
|
| 293 |
+
for i, region in enumerate(self.posture_warnings_regions): # Updated to match actual attribute name
|
| 294 |
+
start_frame = region['start_frame']
|
| 295 |
+
end_frame = region['end_frame']
|
| 296 |
+
errors = region['posture_warnings']
|
| 297 |
+
|
| 298 |
+
# Convert to seconds
|
| 299 |
+
start_sec = start_frame / fps
|
| 300 |
+
end_sec = end_frame / fps
|
| 301 |
+
error_str = ", ".join(errors) if errors else "No errors detected"
|
| 302 |
+
|
| 303 |
+
cpr_logger.info(f"[Graph Plotter] Region {i+1}: "
|
| 304 |
+
f"Time {start_sec:.2f}s - {end_sec:.2f}s - {error_str}")
|
| 305 |
+
|
| 306 |
+
cpr_logger.info(f"\n\n")
|
CPRRealTime/keypoints.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# keypoints.py
|
| 2 |
+
from enum import Enum
|
| 3 |
+
|
| 4 |
+
class CocoKeypoints(Enum):
|
| 5 |
+
"""Enum for COCO keypoints (17 points)"""
|
| 6 |
+
NOSE = 0
|
| 7 |
+
LEFT_EYE = 1
|
| 8 |
+
RIGHT_EYE = 2
|
| 9 |
+
LEFT_EAR = 3
|
| 10 |
+
RIGHT_EAR = 4
|
| 11 |
+
LEFT_SHOULDER = 5
|
| 12 |
+
RIGHT_SHOULDER = 6
|
| 13 |
+
LEFT_ELBOW = 7
|
| 14 |
+
RIGHT_ELBOW = 8
|
| 15 |
+
LEFT_WRIST = 9
|
| 16 |
+
RIGHT_WRIST = 10
|
| 17 |
+
LEFT_HIP = 11
|
| 18 |
+
RIGHT_HIP = 12
|
| 19 |
+
LEFT_KNEE = 13
|
| 20 |
+
RIGHT_KNEE = 14
|
| 21 |
+
LEFT_ANKLE = 15
|
| 22 |
+
RIGHT_ANKLE = 16
|
CPRRealTime/logging_config.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# logging_config.py
|
| 2 |
+
import logging
|
| 3 |
+
|
| 4 |
+
# 1. Set default log level here (change this value as needed)
|
| 5 |
+
DEFAULT_LOG_LEVEL = logging.INFO # Switch to logging.ERROR for errors-only by default
|
| 6 |
+
|
| 7 |
+
# 2. Configure logger with default level
|
| 8 |
+
cpr_logger = logging.getLogger("CPR-Analyzer")
|
| 9 |
+
cpr_logger.setLevel(DEFAULT_LOG_LEVEL)
|
| 10 |
+
|
| 11 |
+
# 3. Create console handler with formatter
|
| 12 |
+
console_handler = logging.StreamHandler()
|
| 13 |
+
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
| 14 |
+
console_handler.setFormatter(formatter)
|
| 15 |
+
|
| 16 |
+
# 4. Add handler to logger
|
| 17 |
+
cpr_logger.addHandler(console_handler)
|
| 18 |
+
|
| 19 |
+
# 5. Prevent propagation to root logger
|
| 20 |
+
cpr_logger.propagate = False
|
CPRRealTime/main.py
ADDED
|
@@ -0,0 +1,723 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# main.py
|
| 2 |
+
import cv2
|
| 3 |
+
import time
|
| 4 |
+
import math
|
| 5 |
+
import numpy as np
|
| 6 |
+
import os # Added for path handling
|
| 7 |
+
import sys
|
| 8 |
+
|
| 9 |
+
from CPRRealTime.pose_estimation import PoseEstimator
|
| 10 |
+
from CPRRealTime.role_classifier import RoleClassifier
|
| 11 |
+
from CPRRealTime.chest_initializer import ChestInitializer
|
| 12 |
+
from CPRRealTime.metrics_calculator import MetricsCalculator
|
| 13 |
+
from CPRRealTime.posture_analyzer import PostureAnalyzer
|
| 14 |
+
from CPRRealTime.wrists_midpoint_analyzer import WristsMidpointAnalyzer
|
| 15 |
+
from CPRRealTime.shoulders_analyzer import ShouldersAnalyzer
|
| 16 |
+
from CPRRealTime.graph_plotter import GraphPlotter
|
| 17 |
+
from CPRRealTime.warnings_overlayer import WarningsOverlayer
|
| 18 |
+
from CPRRealTime.threaded_camera import ThreadedCamera
|
| 19 |
+
from CPRRealTime.analysis_socket_server import AnalysisSocketServer
|
| 20 |
+
from CPRRealTime.logging_config import cpr_logger
|
| 21 |
+
|
| 22 |
+
class CPRAnalyzer:
|
| 23 |
+
"""Main CPR analysis pipeline with execution tracing"""
|
| 24 |
+
|
| 25 |
+
def __init__(self, input_video, video_output_path, plot_output_path, requested_fps):
|
| 26 |
+
|
| 27 |
+
cpr_logger.info(f"[INIT] Initializing CPR Analyzer")
|
| 28 |
+
|
| 29 |
+
#& Frame counter
|
| 30 |
+
self.frame_counter = -1
|
| 31 |
+
cpr_logger.info(f"[INIT] Frame counter initialized")
|
| 32 |
+
|
| 33 |
+
self.processed_frame_counter = 0
|
| 34 |
+
cpr_logger.info(f"[INIT] Processed frame counter initialized")
|
| 35 |
+
|
| 36 |
+
#!$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
|
| 37 |
+
#& Add socket server
|
| 38 |
+
self.socket_server = AnalysisSocketServer()
|
| 39 |
+
self.socket_server.start_server()
|
| 40 |
+
cpr_logger.info(f"[INIT] Socket server started")
|
| 41 |
+
|
| 42 |
+
#& Open the camera source and get the FPS
|
| 43 |
+
self.cap = ThreadedCamera(input_video, requested_fps)
|
| 44 |
+
self.fps = self.cap.fps
|
| 45 |
+
cpr_logger.info(f"[INIT] Camera FPS: {self.fps}")
|
| 46 |
+
|
| 47 |
+
#& Generate output path with MP4 extension
|
| 48 |
+
self.video_output_path = video_output_path
|
| 49 |
+
self.video_writer = None
|
| 50 |
+
self._writer_initialized = False
|
| 51 |
+
cpr_logger.info(f"[INIT] Output path: {self.video_output_path}")
|
| 52 |
+
|
| 53 |
+
#& For the graph plotter
|
| 54 |
+
self.plot_output_path = plot_output_path
|
| 55 |
+
|
| 56 |
+
#& Initialize system components
|
| 57 |
+
self.pose_estimator = PoseEstimator(min_confidence=0.2)
|
| 58 |
+
self.role_classifier = RoleClassifier()
|
| 59 |
+
self.chest_initializer = ChestInitializer()
|
| 60 |
+
self.metrics_calculator = MetricsCalculator(shoulder_width_cm=45*0.65)
|
| 61 |
+
|
| 62 |
+
# Remeber the conditions if you need to adjust the thresholds
|
| 63 |
+
# if avg_right > self.right_arm_angle_threshold: error
|
| 64 |
+
# if avg_left < self.left_arm_angle_threshold: error
|
| 65 |
+
|
| 66 |
+
self.posture_analyzer = PostureAnalyzer(right_arm_angle_threshold=220, left_arm_angle_threshold=160, wrist_distance_threshold=170, history_length_to_average=10)
|
| 67 |
+
self.wrists_midpoint_analyzer = WristsMidpointAnalyzer()
|
| 68 |
+
self.shoulders_analyzer = ShouldersAnalyzer()
|
| 69 |
+
self.graph_plotter = GraphPlotter()
|
| 70 |
+
self.warnings_overlayer = WarningsOverlayer()
|
| 71 |
+
cpr_logger.info("[INIT] System components initialized")
|
| 72 |
+
|
| 73 |
+
#& Warm up pose estimator with dummy data
|
| 74 |
+
dummy_frame = np.zeros((480, 640, 3), dtype=np.uint8)
|
| 75 |
+
self.pose_estimator.detect_poses(dummy_frame) # Force model loading
|
| 76 |
+
cpr_logger.info("[INIT] Pose estimator warmed up with dummy data")
|
| 77 |
+
|
| 78 |
+
#& Keep track of previous results for continuity
|
| 79 |
+
self.prev_rescuer_processed_results = None
|
| 80 |
+
self.prev_patient_processed_results = None
|
| 81 |
+
self.prev_chest_params = None
|
| 82 |
+
self.prev_midpoint = None
|
| 83 |
+
self.prev_pose_results = None
|
| 84 |
+
cpr_logger.info("[INIT] Previous results initialized")
|
| 85 |
+
|
| 86 |
+
#& Fundamental timing parameters (in seconds)
|
| 87 |
+
self.MIN_ERROR_DURATION = 1.0 # Require sustained errors for 1 second
|
| 88 |
+
self.REPORTING_INTERVAL = 5.0 # Generate reports every 5 seconds
|
| 89 |
+
self.SAMPLING_INTERVAL = 0.2 # Analyze every 0.2 seconds
|
| 90 |
+
self.KEEP_RATE_AND_DEPTH_WARNINGS_INTERVAL = 3.0
|
| 91 |
+
self.MIN_CHUNK_LENGTH_TO_REPORT = 3.0
|
| 92 |
+
|
| 93 |
+
# Derived frame counts
|
| 94 |
+
self.sampling_interval_frames = int(round(self.fps * self.SAMPLING_INTERVAL))
|
| 95 |
+
self.error_threshold_frames = int(self.MIN_ERROR_DURATION / self.SAMPLING_INTERVAL)
|
| 96 |
+
self.reporting_interval_frames = int(self.REPORTING_INTERVAL / self.SAMPLING_INTERVAL)
|
| 97 |
+
self.return_rate_and_depth_warnings_interval_frames = int(self.KEEP_RATE_AND_DEPTH_WARNINGS_INTERVAL / self.SAMPLING_INTERVAL)
|
| 98 |
+
self.min_chunk_length_to_report_frames = int(self.MIN_CHUNK_LENGTH_TO_REPORT / self.SAMPLING_INTERVAL)
|
| 99 |
+
|
| 100 |
+
# For cleaner feedback, the reporting interval must be an exact multiple of the sampling interval.
|
| 101 |
+
ratio = self.REPORTING_INTERVAL / self.SAMPLING_INTERVAL
|
| 102 |
+
assert math.isclose(ratio, round(ratio)), \
|
| 103 |
+
f"Reporting interval ({self.REPORTING_INTERVAL}) must be an exact multiple of "\
|
| 104 |
+
f"sampling interval ({self.SAMPLING_INTERVAL}). Actual ratio: {ratio:.2f}"
|
| 105 |
+
|
| 106 |
+
# To be able to detect an error, the error detection window must be greater than or equal to the sampling interval.
|
| 107 |
+
assert self.MIN_ERROR_DURATION >= self.SAMPLING_INTERVAL, \
|
| 108 |
+
f"Error detection window ({self.MIN_ERROR_DURATION}s) must be ≥ sampling interval ({self.SAMPLING_INTERVAL}s)"
|
| 109 |
+
|
| 110 |
+
cpr_logger.info(f"[INIT] Temporal alignment:")
|
| 111 |
+
cpr_logger.info(f" - {self.SAMPLING_INTERVAL}s sampling → {self.sampling_interval_frames} frames")
|
| 112 |
+
cpr_logger.info(f" - {self.MIN_ERROR_DURATION}s error detection → {self.error_threshold_frames} samples")
|
| 113 |
+
cpr_logger.info(f" - {self.REPORTING_INTERVAL}s reporting → {self.reporting_interval_frames} samples")
|
| 114 |
+
|
| 115 |
+
#& Workaround for minor glitches
|
| 116 |
+
# A frame is accepted as long as this counter does not exceed the error_threshold_frames set above.
|
| 117 |
+
#! These (and those in the warnings_overlayer) should exactly match the ones appended in the PostureAnalyzer.
|
| 118 |
+
self.possible_warnings = [
|
| 119 |
+
"Right arm bent!",
|
| 120 |
+
"Left arm bent!",
|
| 121 |
+
"Left hand not on chest!",
|
| 122 |
+
"Right hand not on chest!",
|
| 123 |
+
"Both hands not on chest!",
|
| 124 |
+
]
|
| 125 |
+
self.consecutive_frames_with_posture_errors_counters = {warning: 0 for warning in self.possible_warnings}
|
| 126 |
+
|
| 127 |
+
#& Initialize variables for reporting warnings
|
| 128 |
+
|
| 129 |
+
self.rate_and_depth_warnings_from_the_last_report = []
|
| 130 |
+
cpr_logger.info("[INIT] Rate and depth warnings from the last report initialized")
|
| 131 |
+
|
| 132 |
+
#& Chunk and mini chunk management (Indexes and Flags)
|
| 133 |
+
self.has_not_processed_a_frame_successfully_before = True
|
| 134 |
+
self.waiting_to_start_new_chunk = False
|
| 135 |
+
|
| 136 |
+
self.chunk_start_frame_index = None
|
| 137 |
+
self.chunk_end_frame_index = None
|
| 138 |
+
|
| 139 |
+
#& Posture warnings region management
|
| 140 |
+
self.prev_is_part_of_a_posture_warnings_region = False
|
| 141 |
+
self.posture_warnings_region_start_frame_index = None
|
| 142 |
+
self.posture_warnings_region_end_frame_index = None
|
| 143 |
+
|
| 144 |
+
self.posture_warnings = []
|
| 145 |
+
self.rate_and_depth_warnings = []
|
| 146 |
+
|
| 147 |
+
#& For Formated Warnings
|
| 148 |
+
self.cached_posture_warnings = []
|
| 149 |
+
self.cached_rate_and_depth_warnings = []
|
| 150 |
+
self.return_rate_and_depth_warnings_interval_frames_counter = self.return_rate_and_depth_warnings_interval_frames
|
| 151 |
+
cpr_logger.info("[INIT] Formatted warnings initialized")
|
| 152 |
+
|
| 153 |
+
def _initialize_video_writer(self, frame):
|
| 154 |
+
"""Initialize writer with safe fallback options"""
|
| 155 |
+
height, width = frame.shape[:2]
|
| 156 |
+
effective_fps = self.fps / max(1, self.sampling_interval_frames)
|
| 157 |
+
|
| 158 |
+
# Try different codec/container combinations
|
| 159 |
+
for codec, ext, fmt in [('avc1', 'mp4', 'mp4v'), # H.264
|
| 160 |
+
('MJPG', 'avi', 'avi'),
|
| 161 |
+
('XVID', 'avi', 'avi')]:
|
| 162 |
+
fourcc = cv2.VideoWriter_fourcc(*codec)
|
| 163 |
+
writer = cv2.VideoWriter(self.video_output_path, fourcc, effective_fps, (width, height))
|
| 164 |
+
|
| 165 |
+
if writer.isOpened():
|
| 166 |
+
self.video_writer = writer
|
| 167 |
+
self._writer_initialized = True
|
| 168 |
+
cpr_logger.info(f"[VIDEO WRITER] Initialized with {codec} codec")
|
| 169 |
+
return
|
| 170 |
+
else:
|
| 171 |
+
writer.release()
|
| 172 |
+
|
| 173 |
+
cpr_logger.info("[ERROR] Failed to initialize any video writer!")
|
| 174 |
+
self._writer_initialized = False
|
| 175 |
+
|
| 176 |
+
def _handle_chunk_end(self):
|
| 177 |
+
"""Helper to handle chunk termination logic"""
|
| 178 |
+
self._calculate_rate_and_depth_for_chunk()
|
| 179 |
+
cpr_logger.info(f"[RUN ANALYSIS] Calculated rate and depth for the chunk")
|
| 180 |
+
|
| 181 |
+
rate_and_depth_warnings = self._get_rate_and_depth_warnings()
|
| 182 |
+
|
| 183 |
+
# If the chunk is too short, we don't want to report any warnings it might contain.
|
| 184 |
+
if (self.chunk_end_frame_index - self.chunk_start_frame_index) < self.min_chunk_length_to_report_frames:
|
| 185 |
+
rate_and_depth_warnings = []
|
| 186 |
+
|
| 187 |
+
self.cached_rate_and_depth_warnings = rate_and_depth_warnings
|
| 188 |
+
self.return_rate_and_depth_warnings_interval_frames_counter = self.return_rate_and_depth_warnings_interval_frames
|
| 189 |
+
cpr_logger.info(f"[RUN ANALYSIS] Retrieved rate and depth warnings for the chunk")
|
| 190 |
+
|
| 191 |
+
self.rate_and_depth_warnings.append({
|
| 192 |
+
'start_frame': self.chunk_start_frame_index,
|
| 193 |
+
'end_frame': self.chunk_end_frame_index,
|
| 194 |
+
'rate_and_depth_warnings': rate_and_depth_warnings,
|
| 195 |
+
})
|
| 196 |
+
cpr_logger.info(f"[RUN ANALYSIS] Assigned rate and depth warnings region data")
|
| 197 |
+
cpr_logger.info(f"[RUN ANALYSIS] Start frame: {self.chunk_start_frame_index}, End frame: {self.chunk_end_frame_index}, Rate and depth warnings: {rate_and_depth_warnings}")
|
| 198 |
+
sys.exit(1)
|
| 199 |
+
|
| 200 |
+
self.shoulders_analyzer.reset_shoulder_distances()
|
| 201 |
+
self.wrists_midpoint_analyzer.reset_midpoint_history()
|
| 202 |
+
cpr_logger.info(f"[RUN ANALYSIS] Reset shoulder distances and midpoint history for the chunk")
|
| 203 |
+
|
| 204 |
+
def _handle_posture_warnings_region_end(self):
|
| 205 |
+
"""Helper to handle posture warnings region termination"""
|
| 206 |
+
self.posture_warnings.append({
|
| 207 |
+
'start_frame': self.posture_warnings_region_start_frame_index,
|
| 208 |
+
'end_frame': self.posture_warnings_region_end_frame_index,
|
| 209 |
+
'posture_warnings': self.cached_posture_warnings.copy(),
|
| 210 |
+
})
|
| 211 |
+
cpr_logger.info(f"[RUN ANALYSIS] Assigned posture warnings region data")
|
| 212 |
+
|
| 213 |
+
def _start_new_chunk(self, chunk_type="chunk"):
|
| 214 |
+
"""Helper to initialize new chunk"""
|
| 215 |
+
self.chunk_start_frame_index = self.frame_counter
|
| 216 |
+
self.waiting_to_start_new_chunk = False
|
| 217 |
+
cpr_logger.info(f"[CHUNK] New {chunk_type} started at {self.frame_counter}")
|
| 218 |
+
|
| 219 |
+
def _start_new_posture_warnings_region(self):
|
| 220 |
+
"""Helper to initialize new posture warnings region"""
|
| 221 |
+
self.posture_warnings_region_start_frame_index = self.frame_counter
|
| 222 |
+
cpr_logger.info(f"[POSTURE WARNINGS] New region started at {self.frame_counter}")
|
| 223 |
+
|
| 224 |
+
def run_analysis(self):
|
| 225 |
+
try:
|
| 226 |
+
cpr_logger.info("[RUN ANALYSIS] Starting analysis")
|
| 227 |
+
|
| 228 |
+
#& Video Capture
|
| 229 |
+
# Start camera capture AFTER client connects
|
| 230 |
+
self.cap.start_capture()
|
| 231 |
+
cpr_logger.info("[RUN ANALYSIS] Camera capture started")
|
| 232 |
+
|
| 233 |
+
#& Main execution loop
|
| 234 |
+
main_loop_start_time = time.time()
|
| 235 |
+
cpr_logger.info("[RUN ANALYSIS] Main loop started")
|
| 236 |
+
while True:
|
| 237 |
+
#& Read Frame
|
| 238 |
+
# Get frame from camera queue
|
| 239 |
+
frame = self.cap.read()
|
| 240 |
+
|
| 241 |
+
# Check for termination sentinel
|
| 242 |
+
if frame is None:
|
| 243 |
+
cpr_logger.info("Camera stream ended")
|
| 244 |
+
|
| 245 |
+
# Handle any open regions before breaking
|
| 246 |
+
if self.prev_is_part_of_a_posture_warnings_region:
|
| 247 |
+
# End the posture warnings region
|
| 248 |
+
self.posture_warnings_region_end_frame_index = self.frame_counter
|
| 249 |
+
cpr_logger.info(f"[RUN ANALYSIS] End of posture warnings region detected")
|
| 250 |
+
cpr_logger.info(f"[RUN ANALYSIS] Posture warnings region start frame: {self.posture_warnings_region_start_frame_index}")
|
| 251 |
+
cpr_logger.info(f"[RUN ANALYSIS] Posture warnings region end frame: {self.posture_warnings_region_end_frame_index}")
|
| 252 |
+
self._handle_posture_warnings_region_end()
|
| 253 |
+
|
| 254 |
+
elif self.chunk_start_frame_index is not None:
|
| 255 |
+
# End the current chunk
|
| 256 |
+
self.chunk_end_frame_index = self.frame_counter
|
| 257 |
+
cpr_logger.info(f"[RUN ANALYSIS] End of chunk detected")
|
| 258 |
+
cpr_logger.info(f"[RUN ANALYSIS] Chunk start frame: {self.chunk_start_frame_index}")
|
| 259 |
+
cpr_logger.info(f"[RUN ANALYSIS] Chunk end frame: {self.chunk_end_frame_index}")
|
| 260 |
+
self._handle_chunk_end()
|
| 261 |
+
break
|
| 262 |
+
|
| 263 |
+
#& Increment frame counter
|
| 264 |
+
self.frame_counter += 1
|
| 265 |
+
|
| 266 |
+
cpr_logger.info(f"\n[FRAME {int(self.frame_counter)}]")
|
| 267 |
+
|
| 268 |
+
#& Check if you want to skip the frame
|
| 269 |
+
if self.frame_counter % self.sampling_interval_frames != 0:
|
| 270 |
+
#^ Formated Warnings
|
| 271 |
+
# Return the cashed warnings
|
| 272 |
+
formatted_warnings = self._format_warnings()
|
| 273 |
+
cpr_logger.info(f"[RUN ANALYSIS] Formatted warnings: {formatted_warnings}")
|
| 274 |
+
|
| 275 |
+
#!$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
|
| 276 |
+
self.socket_server.warning_queue.put(formatted_warnings)
|
| 277 |
+
cpr_logger.info(f"[RUN ANALYSIS] Sent warnings to socket server")
|
| 278 |
+
|
| 279 |
+
cpr_logger.info(f"[SKIP FRAME] Skipping frame")
|
| 280 |
+
continue
|
| 281 |
+
|
| 282 |
+
#& Rotate frame
|
| 283 |
+
frame = self._handle_frame_rotation(frame)
|
| 284 |
+
cpr_logger.info(f"[RUN ANALYSIS] Rotated frame")
|
| 285 |
+
|
| 286 |
+
#& Process frame
|
| 287 |
+
# If there are (sustained) posture warnings, then we did not even attempt to detect the midpoint.
|
| 288 |
+
# If there were no (sustained) posture warnings, we attempt to detect the midpoint which might either succeed or fail.
|
| 289 |
+
# This is why we need two variables to indicated what happened inside the process_frame function.
|
| 290 |
+
posture_warnings, has_appended_midpoint = self._process_frame(frame)
|
| 291 |
+
cpr_logger.info(f"[RUN ANALYSIS] Processed frame")
|
| 292 |
+
|
| 293 |
+
#& Posture Warnings Region Setting Flags
|
| 294 |
+
# When a frame is accepted, its warnings -if any- are reset.
|
| 295 |
+
# So if the function did return any errors this means that the frame is not accepted and is part of an posture warnings region.
|
| 296 |
+
is_part_of_a_posture_warnings_region = len(posture_warnings) > 0
|
| 297 |
+
|
| 298 |
+
# Then we need to decide if the frame marks a transition between a chunk region and an posture warnings region.
|
| 299 |
+
is_start_of_posture_warnings_region = (not self.prev_is_part_of_a_posture_warnings_region) and is_part_of_a_posture_warnings_region
|
| 300 |
+
is_end_of_posture_warnings_region = self.prev_is_part_of_a_posture_warnings_region and not is_part_of_a_posture_warnings_region
|
| 301 |
+
|
| 302 |
+
# Update the cached value for the next iteration
|
| 303 |
+
self.prev_is_part_of_a_posture_warnings_region = is_part_of_a_posture_warnings_region
|
| 304 |
+
|
| 305 |
+
cpr_logger.info(f"[RUN ANALYSIS] Posture warnings region flags updated")
|
| 306 |
+
|
| 307 |
+
#& Chunks and Posture Warnings Regions Management
|
| 308 |
+
#~ Case 1: posture warnings region after a chunk
|
| 309 |
+
if is_start_of_posture_warnings_region:
|
| 310 |
+
cpr_logger.info(f"[RUN ANALYSIS] Case 1: posture warnings region after a chunk")
|
| 311 |
+
|
| 312 |
+
# Start a new posture warnings region
|
| 313 |
+
self._start_new_posture_warnings_region()
|
| 314 |
+
|
| 315 |
+
# End the previous chunk if it exists
|
| 316 |
+
if self.chunk_start_frame_index is not None:
|
| 317 |
+
self.chunk_end_frame_index = self.frame_counter - 1
|
| 318 |
+
cpr_logger.info(f"[RUN ANALYSIS] End of chunk detected")
|
| 319 |
+
cpr_logger.info(f"[RUN ANALYSIS] Chunk start frame: {self.chunk_start_frame_index}")
|
| 320 |
+
cpr_logger.info(f"[RUN ANALYSIS] Chunk end frame: {self.chunk_end_frame_index}")
|
| 321 |
+
self._handle_chunk_end()
|
| 322 |
+
|
| 323 |
+
#~ Case 2: posture warnings region after a posture warnings region
|
| 324 |
+
if (self.cached_posture_warnings != posture_warnings) and (is_part_of_a_posture_warnings_region) and (not is_start_of_posture_warnings_region) and (not is_end_of_posture_warnings_region):
|
| 325 |
+
cpr_logger.info(f"[RUN ANALYSIS] Case 2: posture warnings region after a posture warnings region")
|
| 326 |
+
|
| 327 |
+
# End the previous posture warnings region
|
| 328 |
+
self.posture_warnings_region_end_frame_index = self.frame_counter - 1
|
| 329 |
+
cpr_logger.info(f"[RUN ANALYSIS] End of posture warnings region detected")
|
| 330 |
+
cpr_logger.info(f"[RUN ANALYSIS] Posture warnings region start frame: {self.posture_warnings_region_start_frame_index}")
|
| 331 |
+
cpr_logger.info(f"[RUN ANALYSIS] Posture warnings region end frame: {self.posture_warnings_region_end_frame_index}")
|
| 332 |
+
self._handle_posture_warnings_region_end()
|
| 333 |
+
|
| 334 |
+
# Start a new posture warnings region
|
| 335 |
+
self._start_new_posture_warnings_region()
|
| 336 |
+
|
| 337 |
+
#~ Case 3: chunk after a posture warnings region
|
| 338 |
+
if is_end_of_posture_warnings_region:
|
| 339 |
+
cpr_logger.info(f"[RUN ANALYSIS] Case 3: chunk after a posture warnings region")
|
| 340 |
+
|
| 341 |
+
# Start a new chunk
|
| 342 |
+
self.waiting_to_start_new_chunk = True
|
| 343 |
+
cpr_logger.info(f"[RUN ANALYSIS] Waiting to start a new chunk")
|
| 344 |
+
new_chunk_type = "chunk"
|
| 345 |
+
|
| 346 |
+
# End the previous posture warnings region
|
| 347 |
+
self.posture_warnings_region_end_frame_index = self.frame_counter - 1
|
| 348 |
+
cpr_logger.info(f"[RUN ANALYSIS] End of posture warnings region detected")
|
| 349 |
+
cpr_logger.info(f"[RUN ANALYSIS] Posture warnings region start frame: {self.posture_warnings_region_start_frame_index}")
|
| 350 |
+
cpr_logger.info(f"[RUN ANALYSIS] Posture warnings region end frame: {self.posture_warnings_region_end_frame_index}")
|
| 351 |
+
self._handle_posture_warnings_region_end()
|
| 352 |
+
|
| 353 |
+
#~ Case 4: chunk after a chunk
|
| 354 |
+
if (not is_part_of_a_posture_warnings_region) and (not is_end_of_posture_warnings_region) and (self.processed_frame_counter % self.reporting_interval_frames == 0):
|
| 355 |
+
cpr_logger.info(f"[RUN ANALYSIS] Case 4: chunk after a chunk")
|
| 356 |
+
|
| 357 |
+
# End the previous chunk if it exists
|
| 358 |
+
if self.chunk_start_frame_index is not None and self.chunk_start_frame_index != self.frame_counter:
|
| 359 |
+
self.chunk_end_frame_index = self.frame_counter
|
| 360 |
+
cpr_logger.info(f"[RUN ANALYSIS] End of chunk detected")
|
| 361 |
+
cpr_logger.info(f"[RUN ANALYSIS] Chunk start frame: {self.chunk_start_frame_index}")
|
| 362 |
+
cpr_logger.info(f"[RUN ANALYSIS] Chunk end frame: {self.chunk_end_frame_index}")
|
| 363 |
+
self._handle_chunk_end()
|
| 364 |
+
|
| 365 |
+
# Start a new chunk
|
| 366 |
+
self.waiting_to_start_new_chunk = True
|
| 367 |
+
cpr_logger.info(f"[RUN ANALYSIS] Waiting to start a new chunk")
|
| 368 |
+
|
| 369 |
+
new_chunk_type = "mini chunk"
|
| 370 |
+
|
| 371 |
+
#~ Follow up on cases 3 and 4
|
| 372 |
+
if (self.waiting_to_start_new_chunk) and (has_appended_midpoint):
|
| 373 |
+
cpr_logger.info(f"[RUN ANALYSIS] Follow up on cases 3 and 4")
|
| 374 |
+
|
| 375 |
+
if (new_chunk_type == "chunk") or (new_chunk_type == "mini chunk" and self.frame_counter != self.chunk_end_frame_index):
|
| 376 |
+
self._start_new_chunk()
|
| 377 |
+
|
| 378 |
+
#& Compose frame
|
| 379 |
+
# This function is responsible for drawing the the chest region and the midpoint.
|
| 380 |
+
# The frame would not be displayed yet, just composed.
|
| 381 |
+
composed_frame = self._compose_frame(frame, is_part_of_a_posture_warnings_region)
|
| 382 |
+
|
| 383 |
+
if composed_frame is not None:
|
| 384 |
+
frame = composed_frame
|
| 385 |
+
cpr_logger.info(f"[RUN ANALYSIS] Frame composed successfully")
|
| 386 |
+
else:
|
| 387 |
+
cpr_logger.info(f"[RUN ANALYSIS] Frame composition failed")
|
| 388 |
+
|
| 389 |
+
#& Initialize video writer if not done yet
|
| 390 |
+
if frame is not None and not self._writer_initialized:
|
| 391 |
+
self._initialize_video_writer(frame)
|
| 392 |
+
cpr_logger.info(f"[VIDEO WRITER] Initialized video writer")
|
| 393 |
+
|
| 394 |
+
#& Write frame if writer is functional
|
| 395 |
+
if self._writer_initialized:
|
| 396 |
+
# Convert frame to BGR if needed
|
| 397 |
+
if frame.dtype != np.uint8:
|
| 398 |
+
frame = frame.astype(np.uint8)
|
| 399 |
+
if len(frame.shape) == 2: # Grayscale
|
| 400 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
|
| 401 |
+
|
| 402 |
+
try:
|
| 403 |
+
self.video_writer.write(frame)
|
| 404 |
+
except Exception as e:
|
| 405 |
+
cpr_logger.error(f"[WRITE ERROR] {str(e)}")
|
| 406 |
+
self._writer_initialized = False
|
| 407 |
+
|
| 408 |
+
#& Update the cached posture warnings
|
| 409 |
+
# Don't update it before handling the four cases because the old cached warnings might be needed.
|
| 410 |
+
self.cached_posture_warnings = posture_warnings
|
| 411 |
+
|
| 412 |
+
#^ Formated Warnings
|
| 413 |
+
formatted_warnings = self._format_warnings()
|
| 414 |
+
cpr_logger.info(f"[RUN ANALYSIS] Formatted warnings: {formatted_warnings}")
|
| 415 |
+
|
| 416 |
+
#!$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
|
| 417 |
+
self.socket_server.warning_queue.put(formatted_warnings)
|
| 418 |
+
cpr_logger.info(f"[RUN ANALYSIS] Sent warnings to socket server")
|
| 419 |
+
|
| 420 |
+
self.processed_frame_counter += 1
|
| 421 |
+
cpr_logger.info(f"[RUN ANALYSIS] Processed frame counter: {self.processed_frame_counter}")
|
| 422 |
+
|
| 423 |
+
#& Check if the user wants to quit
|
| 424 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
| 425 |
+
cpr_logger.info("[RUN ANALYSIS] 'q' pressed, exiting loop.")
|
| 426 |
+
break
|
| 427 |
+
|
| 428 |
+
main_loop_end_time = time.time()
|
| 429 |
+
elapsed_time = main_loop_end_time - main_loop_start_time
|
| 430 |
+
cpr_logger.info(f"[TIMING] Main loop elapsed time: {elapsed_time:.2f}s")
|
| 431 |
+
|
| 432 |
+
except Exception as e:
|
| 433 |
+
cpr_logger.error(f"[ERROR] An error occurred during main execution loop: {str(e)}")
|
| 434 |
+
|
| 435 |
+
finally:
|
| 436 |
+
report_and_plot_start_time = time.time()
|
| 437 |
+
|
| 438 |
+
self.cap.release()
|
| 439 |
+
self.cap = None
|
| 440 |
+
|
| 441 |
+
if self.video_writer is not None:
|
| 442 |
+
self.video_writer.release()
|
| 443 |
+
cpr_logger.info(f"[VIDEO WRITER] Released writer. File should be at: {os.path.abspath(self.video_output_path)}")
|
| 444 |
+
cv2.destroyAllWindows()
|
| 445 |
+
cpr_logger.info("[RUN ANALYSIS] Released video capture and destroyed all windows")
|
| 446 |
+
|
| 447 |
+
self._calculate_rate_and_depth_for_all_chunks()
|
| 448 |
+
cpr_logger.info("[RUN ANALYSIS] Calculated weighted averages of the metrics across all chunks")
|
| 449 |
+
|
| 450 |
+
self._plot_full_motion_curve_for_all_chunks()
|
| 451 |
+
cpr_logger.info("[RUN ANALYSIS] Plotted full motion curve")
|
| 452 |
+
|
| 453 |
+
self.warnings_overlayer.add_warnings_to_processed_video(self.video_output_path, self.sampling_interval_frames, self.rate_and_depth_warnings, self.posture_warnings)
|
| 454 |
+
cpr_logger.info("[RUN ANALYSIS] Added warnings to processed video")
|
| 455 |
+
|
| 456 |
+
try:
|
| 457 |
+
if os.path.exists(self.video_output_path):
|
| 458 |
+
os.remove(self.video_output_path)
|
| 459 |
+
cpr_logger.info(f"[CLEANUP] Successfully deleted video file: {self.video_output_path}")
|
| 460 |
+
else:
|
| 461 |
+
cpr_logger.warning(f"[CLEANUP] Video file not found at: {self.video_output_path}")
|
| 462 |
+
except Exception as e:
|
| 463 |
+
cpr_logger.error(f"[ERROR] Failed to delete video file: {str(e)}")
|
| 464 |
+
|
| 465 |
+
report_and_plot_end_time = time.time()
|
| 466 |
+
report_and_plot_elapsed_time = report_and_plot_end_time - report_and_plot_start_time
|
| 467 |
+
cpr_logger.info(f"[TIMING] Report and plot elapsed time: {report_and_plot_elapsed_time:.2f}s")
|
| 468 |
+
|
| 469 |
+
def _format_warnings(self):
|
| 470 |
+
"""Combine warnings into a simple structured response"""
|
| 471 |
+
|
| 472 |
+
if self.cached_posture_warnings:
|
| 473 |
+
return {
|
| 474 |
+
"status": "warning",
|
| 475 |
+
"posture_warnings": self.cached_posture_warnings,
|
| 476 |
+
"rate_and_depth_warnings": [],
|
| 477 |
+
}
|
| 478 |
+
|
| 479 |
+
if (self.cached_rate_and_depth_warnings) and (self.return_rate_and_depth_warnings_interval_frames_counter > 0):
|
| 480 |
+
self.return_rate_and_depth_warnings_interval_frames_counter -= 1
|
| 481 |
+
|
| 482 |
+
return {
|
| 483 |
+
"status": "warning",
|
| 484 |
+
"posture_warnings": [],
|
| 485 |
+
"rate_and_depth_warnings": self.cached_rate_and_depth_warnings,
|
| 486 |
+
}
|
| 487 |
+
|
| 488 |
+
return {
|
| 489 |
+
"status": "ok",
|
| 490 |
+
"posture_warnings": [],
|
| 491 |
+
"rate_and_depth_warnings": [],
|
| 492 |
+
}
|
| 493 |
+
|
| 494 |
+
def _handle_frame_rotation(self, frame):
|
| 495 |
+
if frame.shape[1] > frame.shape[0]: # Width > Height
|
| 496 |
+
frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
|
| 497 |
+
return frame
|
| 498 |
+
|
| 499 |
+
def _process_frame(self, frame):
|
| 500 |
+
#* Warnings for real time feedback
|
| 501 |
+
warnings = []
|
| 502 |
+
|
| 503 |
+
#* Chunk Completion Check
|
| 504 |
+
has_appended_midpoint = False
|
| 505 |
+
|
| 506 |
+
#& Pose Estimation
|
| 507 |
+
pose_results = self.pose_estimator.detect_poses(frame)
|
| 508 |
+
|
| 509 |
+
#~ Handle Failed Detection or Update Previous Results
|
| 510 |
+
if not pose_results:
|
| 511 |
+
pose_results = self.prev_pose_results
|
| 512 |
+
cpr_logger.info("[POSE ESTIMATION] No pose detected, using previous results (could be None)")
|
| 513 |
+
else:
|
| 514 |
+
self.prev_pose_results = pose_results
|
| 515 |
+
|
| 516 |
+
if not pose_results:
|
| 517 |
+
cpr_logger.info("[POSE ESTIMATION] Insufficient data for processing")
|
| 518 |
+
return warnings, has_appended_midpoint
|
| 519 |
+
|
| 520 |
+
#& Rescuer and Patient Classification
|
| 521 |
+
rescuer_processed_results, patient_processed_results = self.role_classifier.classify_roles(pose_results, self.prev_rescuer_processed_results, self.prev_patient_processed_results)
|
| 522 |
+
|
| 523 |
+
#~ Handle Failed Classifications OR Update Previous Results
|
| 524 |
+
if not rescuer_processed_results:
|
| 525 |
+
rescuer_processed_results = self.prev_rescuer_processed_results
|
| 526 |
+
cpr_logger.info("[ROLE CLASSIFICATION] No rescuer detected, using previous results (could be None)")
|
| 527 |
+
else:
|
| 528 |
+
self.prev_rescuer_processed_results = rescuer_processed_results
|
| 529 |
+
|
| 530 |
+
if not patient_processed_results:
|
| 531 |
+
patient_processed_results = self.prev_patient_processed_results
|
| 532 |
+
cpr_logger.info("[ROLE CLASSIFICATION] No patient detected, using previous results (could be None)")
|
| 533 |
+
else:
|
| 534 |
+
self.prev_patient_processed_results = patient_processed_results
|
| 535 |
+
|
| 536 |
+
if not rescuer_processed_results or not patient_processed_results:
|
| 537 |
+
cpr_logger.info("[ROLE CLASSIFICATION] Insufficient data for processing")
|
| 538 |
+
return warnings, has_appended_midpoint
|
| 539 |
+
|
| 540 |
+
#^ Set Params in Role Classifier (to draw later)
|
| 541 |
+
self.role_classifier.rescuer_processed_results = rescuer_processed_results
|
| 542 |
+
self.role_classifier.patient_processed_results = patient_processed_results
|
| 543 |
+
cpr_logger.info(f"[ROLE CLASSIFICATION] Updated role classifier with new results")
|
| 544 |
+
|
| 545 |
+
#& Chest Estimation
|
| 546 |
+
chest_params = self.chest_initializer.estimate_chest_region(patient_processed_results["keypoints"], patient_processed_results["bounding_box"], frame_width=frame.shape[1], frame_height=frame.shape[0])
|
| 547 |
+
|
| 548 |
+
#~ Handle Failed Estimation or Update Previous Results
|
| 549 |
+
if not chest_params:
|
| 550 |
+
chest_params = self.prev_chest_params
|
| 551 |
+
cpr_logger.info("[CHEST ESTIMATION] No chest region detected, using previous results (could be None)")
|
| 552 |
+
else:
|
| 553 |
+
self.prev_chest_params = chest_params
|
| 554 |
+
|
| 555 |
+
if not chest_params:
|
| 556 |
+
cpr_logger.info("[CHEST ESTIMATION] Insufficient data for processing")
|
| 557 |
+
return warnings, has_appended_midpoint
|
| 558 |
+
|
| 559 |
+
#^ Set Params in Chest Initializer (to draw later)
|
| 560 |
+
self.chest_initializer.chest_params = chest_params
|
| 561 |
+
self.chest_initializer.chest_params_history.append(self.chest_initializer.chest_params)
|
| 562 |
+
|
| 563 |
+
#& Chest Expectation
|
| 564 |
+
# The estimation up to the last frame
|
| 565 |
+
expected_chest_params = self.chest_initializer.estimate_chest_region_weighted_avg(frame_width=frame.shape[1], frame_height=frame.shape[0])
|
| 566 |
+
|
| 567 |
+
#~ First "window_size" detections can't avg
|
| 568 |
+
if not expected_chest_params:
|
| 569 |
+
self.chest_initializer.expected_chest_params = self.chest_initializer.chest_params
|
| 570 |
+
else:
|
| 571 |
+
self.chest_initializer.expected_chest_params = expected_chest_params
|
| 572 |
+
|
| 573 |
+
#& Posture Analysis
|
| 574 |
+
cpr_logger.info(f"[POSTURE ANALYSIS] Analyzing posture")
|
| 575 |
+
current_warnings = self.posture_analyzer.validate_posture(rescuer_processed_results["keypoints"], self.chest_initializer.expected_chest_params)
|
| 576 |
+
cpr_logger.info(f"[POSTURE ANALYSIS] Posture analysis completed")
|
| 577 |
+
|
| 578 |
+
# Update individual warning counters
|
| 579 |
+
for warning in self.possible_warnings:
|
| 580 |
+
if warning in current_warnings:
|
| 581 |
+
self.consecutive_frames_with_posture_errors_counters[warning] += 1
|
| 582 |
+
else:
|
| 583 |
+
self.consecutive_frames_with_posture_errors_counters[warning] = 0
|
| 584 |
+
|
| 585 |
+
# Filter warnings that meet/exceed threshold
|
| 586 |
+
warnings = [
|
| 587 |
+
warning for warning in self.possible_warnings
|
| 588 |
+
if self.consecutive_frames_with_posture_errors_counters[warning] >= self.error_threshold_frames
|
| 589 |
+
]
|
| 590 |
+
|
| 591 |
+
#^ Set Params in Posture Analyzer (to draw later)
|
| 592 |
+
self.posture_analyzer.warnings = warnings
|
| 593 |
+
cpr_logger.info(f"[POSTURE ANALYSIS] Updated posture analyzer with new results")
|
| 594 |
+
|
| 595 |
+
#& Wrist Midpoint Detection
|
| 596 |
+
midpoint = self.wrists_midpoint_analyzer.detect_wrists_midpoint(rescuer_processed_results["keypoints"])
|
| 597 |
+
|
| 598 |
+
#~ Handle Failed Detection or Update Previous Results
|
| 599 |
+
if not midpoint:
|
| 600 |
+
midpoint = self.prev_midpoint
|
| 601 |
+
cpr_logger.info("[WRIST MIDPOINT DETECTION] No midpoint detected, using previous results (could be None)")
|
| 602 |
+
else:
|
| 603 |
+
self.prev_midpoint = midpoint
|
| 604 |
+
|
| 605 |
+
if not midpoint:
|
| 606 |
+
cpr_logger.info("[WRIST MIDPOINT DETECTION] Insufficient data for processing")
|
| 607 |
+
return warnings, has_appended_midpoint
|
| 608 |
+
cpr_logger.error(f"[WRIST MIDPOINT DETECTION] Midpoint detected: {midpoint}")
|
| 609 |
+
if len(warnings) == 0:
|
| 610 |
+
#^ Set Params in Role Classifier (to draw later)
|
| 611 |
+
has_appended_midpoint = True
|
| 612 |
+
self.wrists_midpoint_analyzer.midpoint = midpoint
|
| 613 |
+
self.wrists_midpoint_analyzer.midpoint_history.append(midpoint)
|
| 614 |
+
cpr_logger.info(f"[WRIST MIDPOINT DETECTION] Updated wrist midpoint analyzer with new results")
|
| 615 |
+
|
| 616 |
+
#& Shoulder Distance Calculation
|
| 617 |
+
shoulder_distance = self.shoulders_analyzer.calculate_shoulder_distance(rescuer_processed_results["keypoints"])
|
| 618 |
+
if shoulder_distance is not None:
|
| 619 |
+
self.shoulders_analyzer.shoulder_distance = shoulder_distance
|
| 620 |
+
self.shoulders_analyzer.shoulder_distance_history.append(shoulder_distance)
|
| 621 |
+
cpr_logger.info(f"[SHOULDER DISTANCE] Updated shoulder distance analyzer with new results")
|
| 622 |
+
|
| 623 |
+
return warnings, has_appended_midpoint
|
| 624 |
+
|
| 625 |
+
def _compose_frame(self, frame, is_part_of_a_posture_warnings_region):
|
| 626 |
+
# Chest Region
|
| 627 |
+
if frame is not None:
|
| 628 |
+
frame = self.chest_initializer.draw_expected_chest_region(frame)
|
| 629 |
+
cpr_logger.info(f"[VISUALIZATION] Drawn chest region")
|
| 630 |
+
|
| 631 |
+
if frame is not None:
|
| 632 |
+
if not is_part_of_a_posture_warnings_region:
|
| 633 |
+
frame = self.wrists_midpoint_analyzer.draw_midpoint(frame)
|
| 634 |
+
cpr_logger.info(f"[VISUALIZATION] Drawn midpoint")
|
| 635 |
+
|
| 636 |
+
return frame
|
| 637 |
+
|
| 638 |
+
def _calculate_rate_and_depth_for_chunk(self):
|
| 639 |
+
try:
|
| 640 |
+
result = self.metrics_calculator.handle_chunk(np.array(self.wrists_midpoint_analyzer.midpoint_history), self.chunk_start_frame_index, self.chunk_end_frame_index, self.fps, np.array(self.shoulders_analyzer.shoulder_distance_history), self.sampling_interval_frames)
|
| 641 |
+
|
| 642 |
+
if result == False:
|
| 643 |
+
cpr_logger.info("[ERROR] Failed to calculate metrics for the chunk")
|
| 644 |
+
return
|
| 645 |
+
|
| 646 |
+
except Exception as e:
|
| 647 |
+
cpr_logger.error(f"[ERROR] Metric calculation failed: {str(e)}")
|
| 648 |
+
|
| 649 |
+
def _calculate_rate_and_depth_for_all_chunks(self):
|
| 650 |
+
try:
|
| 651 |
+
self.metrics_calculator.calculate_rate_and_depth_for_all_chunk()
|
| 652 |
+
cpr_logger.info(f"[METRICS] Weighted averages calculated")
|
| 653 |
+
except Exception as e:
|
| 654 |
+
cpr_logger.error(f"[ERROR] Failed to calculate weighted averages: {str(e)}")
|
| 655 |
+
|
| 656 |
+
def _plot_full_motion_curve_for_all_chunks(self):
|
| 657 |
+
try:
|
| 658 |
+
self.graph_plotter.plot_motion_curve_for_all_chunks(self.metrics_calculator.chunks_y_preprocessed,
|
| 659 |
+
self.metrics_calculator.chunks_peaks,
|
| 660 |
+
self.metrics_calculator.chunks_depth,
|
| 661 |
+
self.metrics_calculator.chunks_rate,
|
| 662 |
+
self.metrics_calculator.chunks_start_and_end_indices,
|
| 663 |
+
self.posture_warnings,
|
| 664 |
+
self.sampling_interval_frames,
|
| 665 |
+
self.fps,
|
| 666 |
+
self.plot_output_path)
|
| 667 |
+
cpr_logger.info("[PLOT] Full motion curve plotted")
|
| 668 |
+
except Exception as e:
|
| 669 |
+
cpr_logger.error(f"[ERROR] Failed to plot full motion curve: {str(e)}")
|
| 670 |
+
|
| 671 |
+
def _get_rate_and_depth_warnings(self):
|
| 672 |
+
rate_and_depth_warnings = self.metrics_calculator.get_rate_and_depth_warnings()
|
| 673 |
+
cpr_logger.info(f"[VISUALIZATION] Rate and depth warnings data: {rate_and_depth_warnings}")
|
| 674 |
+
|
| 675 |
+
return rate_and_depth_warnings
|
| 676 |
+
|
| 677 |
+
if __name__ == "__main__":
|
| 678 |
+
cpr_logger.info(f"[MAIN] CPR Analysis Started")
|
| 679 |
+
|
| 680 |
+
# Configuration
|
| 681 |
+
requested_fps = 30
|
| 682 |
+
base_dir = r"C:\Users\Fatema Kotb\Documents\CUFE 25\Year 04\GP\Spring\El7a2ny-Graduation-Project"
|
| 683 |
+
|
| 684 |
+
# Define input path
|
| 685 |
+
input_video = os.path.join(base_dir, "CPR", "Dataset", "Batch 2", "14.mp4")
|
| 686 |
+
|
| 687 |
+
# Validate input file exists
|
| 688 |
+
if not os.path.exists(input_video):
|
| 689 |
+
cpr_logger.error(f"[ERROR] Input video not found at: {input_video}")
|
| 690 |
+
sys.exit(1)
|
| 691 |
+
|
| 692 |
+
# Extract original filename without extension
|
| 693 |
+
original_name = os.path.splitext(os.path.basename(input_video))[0]
|
| 694 |
+
cpr_logger.info(f"[CONFIG] Original video name: {original_name}")
|
| 695 |
+
|
| 696 |
+
# Create output directory if it doesn't exist
|
| 697 |
+
output_dir = os.path.join(base_dir, "CPR", "End to End", "Code Refactor", "Output")
|
| 698 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 699 |
+
|
| 700 |
+
# Set output paths using original name
|
| 701 |
+
video_output_path = os.path.join(output_dir, f"{original_name}_output.mp4")
|
| 702 |
+
plot_output_path = os.path.join(output_dir, f"{original_name}_output.png")
|
| 703 |
+
|
| 704 |
+
# Log paths for verification
|
| 705 |
+
cpr_logger.info(f"[CONFIG] Input video: {input_video}")
|
| 706 |
+
cpr_logger.info(f"[CONFIG] Video output: {video_output_path}")
|
| 707 |
+
cpr_logger.info(f"[CONFIG] Plot output: {plot_output_path}")
|
| 708 |
+
|
| 709 |
+
# Initialize and run analyzer
|
| 710 |
+
initialization_start_time = time.time()
|
| 711 |
+
analyzer = CPRAnalyzer(input_video, video_output_path, plot_output_path, requested_fps)
|
| 712 |
+
|
| 713 |
+
# Set plot output path in the analyzer
|
| 714 |
+
analyzer.plot_output_path = plot_output_path
|
| 715 |
+
|
| 716 |
+
initialization_end_time = time.time()
|
| 717 |
+
initialization_elapsed_time = initialization_end_time - initialization_start_time
|
| 718 |
+
cpr_logger.info(f"[TIMING] Initialization time: {initialization_elapsed_time:.2f}s")
|
| 719 |
+
|
| 720 |
+
try:
|
| 721 |
+
analyzer.run_analysis()
|
| 722 |
+
finally:
|
| 723 |
+
analyzer.socket_server.stop_server()
|
CPRRealTime/metrics_calculator.py
ADDED
|
@@ -0,0 +1,480 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# metrics_calculator.py
|
| 2 |
+
import numpy as np
|
| 3 |
+
from scipy.signal import savgol_filter, find_peaks
|
| 4 |
+
import matplotlib.pyplot as plt
|
| 5 |
+
import sys
|
| 6 |
+
import cv2
|
| 7 |
+
import os
|
| 8 |
+
from CPRRealTime.logging_config import cpr_logger
|
| 9 |
+
|
| 10 |
+
class MetricsCalculator:
|
| 11 |
+
"""Rate and depth calculation from motion data with improved peak detection"""
|
| 12 |
+
|
| 13 |
+
def __init__(self, shoulder_width_cm):
|
| 14 |
+
# Configuration parameters
|
| 15 |
+
self.shoulder_width_cm = shoulder_width_cm
|
| 16 |
+
|
| 17 |
+
# Parameters for cleaning the smoothed midpoints
|
| 18 |
+
self.removing_impulse_noise_window_size = 5
|
| 19 |
+
self.removing_impulse_noise_threshold = 3.0
|
| 20 |
+
|
| 21 |
+
# Parameters for one chunk
|
| 22 |
+
self.y_preprocessed = np.array([])
|
| 23 |
+
|
| 24 |
+
self.peaks = np.array([])
|
| 25 |
+
self.peaks_max = np.array([])
|
| 26 |
+
self.peaks_min = np.array([])
|
| 27 |
+
|
| 28 |
+
self.cm_px_ratio = None
|
| 29 |
+
|
| 30 |
+
self.depth = None
|
| 31 |
+
self.rate = None
|
| 32 |
+
|
| 33 |
+
self.rate_and_depth_warnings = []
|
| 34 |
+
|
| 35 |
+
# Parameters for all chunks
|
| 36 |
+
self.chunks_y_preprocessed = []
|
| 37 |
+
|
| 38 |
+
self.chunks_peaks = []
|
| 39 |
+
|
| 40 |
+
self.chunks_depth = []
|
| 41 |
+
self.chunks_rate = []
|
| 42 |
+
|
| 43 |
+
self.weighted_depth = None
|
| 44 |
+
self.weighted_rate = None
|
| 45 |
+
|
| 46 |
+
self.chunks_start_and_end_indices = []
|
| 47 |
+
|
| 48 |
+
self.chunks_rate_and_depth_warnings = []
|
| 49 |
+
|
| 50 |
+
# Parameters for validation
|
| 51 |
+
self.min_depth_threshold = 3.0 # cm
|
| 52 |
+
self.max_depth_threshold = 6.0 # cm
|
| 53 |
+
|
| 54 |
+
self.min_rate_threshold = 100.0 # cpm
|
| 55 |
+
self.max_rate_threshold = 120.0 # cpm
|
| 56 |
+
|
| 57 |
+
#^ ################# Validating #######################
|
| 58 |
+
|
| 59 |
+
def validate_midpoints_and_frames_count_in_chunk(self, y_exact, chunk_start_frame_index, chunk_end_frame_index, sampling_interval_in_frames):
|
| 60 |
+
"""
|
| 61 |
+
Validate the number of midpoints and frames in a chunk
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
y_exact (np.ndarray): The exact y-values of the midpoints.
|
| 65 |
+
chunk_start_frame_index (int): The starting frame index of the chunk.
|
| 66 |
+
chunk_end_frame_index (int): The ending frame index of the chunk.
|
| 67 |
+
sampling_interval_in_frames (int): The interval at which frames are sampled.
|
| 68 |
+
|
| 69 |
+
Raises:
|
| 70 |
+
ValueError: If the number of midpoints does not match the expected number for the given chunk.
|
| 71 |
+
"""
|
| 72 |
+
try:
|
| 73 |
+
# Calculate expected number of sampled frames
|
| 74 |
+
start = chunk_start_frame_index
|
| 75 |
+
end = chunk_end_frame_index
|
| 76 |
+
interval = sampling_interval_in_frames
|
| 77 |
+
|
| 78 |
+
# Mathematical formula to count sampled frames
|
| 79 |
+
expected_samples = (end // interval) - ((start - 1) // interval)
|
| 80 |
+
|
| 81 |
+
# Validate
|
| 82 |
+
actual_y_exact_length = len(y_exact)
|
| 83 |
+
if actual_y_exact_length != expected_samples:
|
| 84 |
+
cpr_logger.info(f"\nERROR: Mismatch in expected and actual samples")
|
| 85 |
+
cpr_logger.info(f"Expected: {expected_samples} samples (frames {start}-{end} @ every {interval} frames)")
|
| 86 |
+
cpr_logger.info(f"Actual: {actual_y_exact_length} midoints points recieived")
|
| 87 |
+
sys.exit(1)
|
| 88 |
+
|
| 89 |
+
except Exception as e:
|
| 90 |
+
cpr_logger.error(f"\nCRITICAL VALIDATION ERROR: {str(e)}")
|
| 91 |
+
sys.exit(1)
|
| 92 |
+
|
| 93 |
+
#^ ################# Preprocessing #######################
|
| 94 |
+
|
| 95 |
+
def _smooth_midpoints(self, midpoints):
|
| 96 |
+
"""
|
| 97 |
+
Smooth the y-values of the midpoints using Savitzky-Golay filter
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
y_exact (np.ndarray): The exact y-values of the midpoints.
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
np.ndarray: The smoothed y-values.
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
if len(midpoints) > 5: # Ensure enough data points
|
| 107 |
+
try:
|
| 108 |
+
y_smooth = savgol_filter(
|
| 109 |
+
midpoints[:, 1],
|
| 110 |
+
window_length=3,
|
| 111 |
+
polyorder=2,
|
| 112 |
+
mode='nearest'
|
| 113 |
+
)
|
| 114 |
+
return y_smooth
|
| 115 |
+
except Exception as e:
|
| 116 |
+
cpr_logger.error(f"Smoothing error: {e}")
|
| 117 |
+
y_smooth = midpoints[:, 1] # Fallback to original
|
| 118 |
+
return y_smooth
|
| 119 |
+
else:
|
| 120 |
+
y_smooth = midpoints[:, 1] # Not enough points
|
| 121 |
+
return y_smooth
|
| 122 |
+
|
| 123 |
+
def _clean_midpoints(self, y_smooth):
|
| 124 |
+
"""
|
| 125 |
+
Clean the smoothed y-values to remove impulse noise using median filtering
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
y_smooth (np.ndarray): The smoothed y-values.
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
np.ndarray: The cleaned y-values.
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
if len(y_smooth) < self.removing_impulse_noise_window_size:
|
| 135 |
+
return y_smooth # Not enough points for processing
|
| 136 |
+
|
| 137 |
+
y_clean = np.array(y_smooth, dtype=float) # Copy to avoid modifying original
|
| 138 |
+
half_window = self.removing_impulse_noise_window_size // 2
|
| 139 |
+
|
| 140 |
+
for i in range(len(y_smooth)):
|
| 141 |
+
# Get local window (handle boundaries)
|
| 142 |
+
start = max(0, i - half_window)
|
| 143 |
+
end = min(len(y_smooth), i + half_window + 1)
|
| 144 |
+
window = y_smooth[start:end]
|
| 145 |
+
|
| 146 |
+
# Calculate local median and MAD (robust statistics)
|
| 147 |
+
med = np.median(window)
|
| 148 |
+
mad = 1.4826 * np.median(np.abs(window - med)) # Median Absolute Deviation
|
| 149 |
+
|
| 150 |
+
# Detect and replace outliers
|
| 151 |
+
if abs(y_smooth[i] - med) > self.removing_impulse_noise_threshold * mad:
|
| 152 |
+
# Replace with median of immediate neighbors (better than global median)
|
| 153 |
+
left = y_smooth[max(0, i-1)]
|
| 154 |
+
right = y_smooth[min(len(y_smooth)-1, i+1)]
|
| 155 |
+
y_clean[i] = np.median([left, right])
|
| 156 |
+
|
| 157 |
+
return y_clean
|
| 158 |
+
|
| 159 |
+
def preprocess_midpoints(self, midpoints):
|
| 160 |
+
"""
|
| 161 |
+
Preprocess the y-values of the midpoints by smoothing and cleaning
|
| 162 |
+
|
| 163 |
+
Sets:
|
| 164 |
+
y_preprocessed (np.ndarray): The preprocessed y-values.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
y_exact (np.ndarray): The exact y-values of the midpoints.
|
| 168 |
+
|
| 169 |
+
Returns:
|
| 170 |
+
bool: True if preprocessing was successful, False otherwise.
|
| 171 |
+
"""
|
| 172 |
+
|
| 173 |
+
y_smooth = self._smooth_midpoints(midpoints)
|
| 174 |
+
y_clean = self._clean_midpoints(y_smooth)
|
| 175 |
+
|
| 176 |
+
self.y_preprocessed = y_clean
|
| 177 |
+
|
| 178 |
+
return len(self.y_preprocessed) > 0 # Return True if preprocessing was successful
|
| 179 |
+
|
| 180 |
+
#^ ################# Processing #######################
|
| 181 |
+
|
| 182 |
+
def detect_midpoints_peaks(self):
|
| 183 |
+
"""
|
| 184 |
+
Detect peaks in the preprocessed y-values using dynamic distance
|
| 185 |
+
|
| 186 |
+
Sets:
|
| 187 |
+
peaks (np.ndarray): The detected peaks.
|
| 188 |
+
peaks_max (np.ndarray): The detected max peaks.
|
| 189 |
+
peaks_min (np.ndarray): The detected min peaks.
|
| 190 |
+
|
| 191 |
+
Returns:
|
| 192 |
+
bool: True if peaks were detected, False otherwise.
|
| 193 |
+
"""
|
| 194 |
+
|
| 195 |
+
if self.y_preprocessed.size == 0:
|
| 196 |
+
cpr_logger.info("No smoothed values found for peak detection")
|
| 197 |
+
return False
|
| 198 |
+
|
| 199 |
+
try:
|
| 200 |
+
distance = min(1, len(self.y_preprocessed)) # Dynamic distance based on data length
|
| 201 |
+
|
| 202 |
+
# Detect max peaks with default prominence
|
| 203 |
+
self.peaks_max, _ = find_peaks(self.y_preprocessed, distance=distance)
|
| 204 |
+
|
| 205 |
+
# Detect min peaks with reduced or no prominence requirement
|
| 206 |
+
self.peaks_min, _ = find_peaks(
|
| 207 |
+
-self.y_preprocessed,
|
| 208 |
+
distance=distance,
|
| 209 |
+
prominence=(0.2, None) # Adjust based on your data's characteristics
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
self.peaks = np.sort(np.concatenate((self.peaks_max, self.peaks_min)))
|
| 213 |
+
|
| 214 |
+
# Log the smoothed values and detected peaks
|
| 215 |
+
cpr_logger.info(f"Smoothed values: {self.y_preprocessed}")
|
| 216 |
+
cpr_logger.info(f"Detected peaks: {self.peaks}")
|
| 217 |
+
cpr_logger.info(f"Detected max peaks: {self.peaks_max}")
|
| 218 |
+
cpr_logger.info(f"Detected min peaks: {self.peaks_min}")
|
| 219 |
+
|
| 220 |
+
return len(self.peaks) > 0
|
| 221 |
+
except Exception as e:
|
| 222 |
+
cpr_logger.error(f"Peak detection error: {e}")
|
| 223 |
+
return False
|
| 224 |
+
|
| 225 |
+
def calculate_cm_px_ratio(self, shoulder_distances):
|
| 226 |
+
"""
|
| 227 |
+
Calculate the ratio of cm to pixels based on shoulder distances
|
| 228 |
+
|
| 229 |
+
Sets:
|
| 230 |
+
cm_px_ratio (float): The ratio of cm to pixels.
|
| 231 |
+
|
| 232 |
+
Args:
|
| 233 |
+
shoulder_distances (list): List of shoulder distances in pixels.
|
| 234 |
+
"""
|
| 235 |
+
|
| 236 |
+
if len(shoulder_distances) > 0:
|
| 237 |
+
avg_shoulder_width_px = np.mean(shoulder_distances)
|
| 238 |
+
self.cm_px_ratio = self.shoulder_width_cm / avg_shoulder_width_px
|
| 239 |
+
else:
|
| 240 |
+
self.cm_px_ratio = None
|
| 241 |
+
cpr_logger.info("No shoulder distances available for cm/px ratio calculation")
|
| 242 |
+
|
| 243 |
+
def calculate_rate_and_depth_for_chunk(self, original_fps, sampling_interval_in_frames=1):
|
| 244 |
+
"""
|
| 245 |
+
Calculate the rate and depth of the motion data for a chunk.
|
| 246 |
+
|
| 247 |
+
Sets:
|
| 248 |
+
depth (float): The calculated depth in cm.
|
| 249 |
+
rate (float): The calculated rate in cpm.
|
| 250 |
+
|
| 251 |
+
Args:
|
| 252 |
+
original_fps (float): The original frames per second of the video.
|
| 253 |
+
sampling_interval_in_frames (int): Number of frames skipped between samples.
|
| 254 |
+
"""
|
| 255 |
+
try:
|
| 256 |
+
|
| 257 |
+
# Without Adjustment: A peak distance of 5 (downsampled frames) would incorrectly be interpreted as 5/30 = 0.167 sec (too short).
|
| 258 |
+
# With Adjustment: The same peak distance 5 (downsampled frames) correctly represents 5/10 = 0.5 sec.
|
| 259 |
+
|
| 260 |
+
effective_fps = original_fps / sampling_interval_in_frames # Correctly reduced FPS
|
| 261 |
+
|
| 262 |
+
# Depth calculation (unchanged)
|
| 263 |
+
depth = None
|
| 264 |
+
if len(self.peaks) > 1:
|
| 265 |
+
depth = np.mean(np.abs(np.diff(self.y_preprocessed[self.peaks]))) * self.cm_px_ratio
|
| 266 |
+
|
| 267 |
+
# Rate calculation (now uses effective_fps)
|
| 268 |
+
rate = None
|
| 269 |
+
if len(self.peaks_max) > 1:
|
| 270 |
+
# Peak indices are from the downsampled signal, so we use effective_fps
|
| 271 |
+
peak_intervals = np.diff(self.peaks_max) # Already in downsampled frames
|
| 272 |
+
rate = (1 / (np.mean(peak_intervals) / effective_fps)) * 60 # Correct CPM
|
| 273 |
+
|
| 274 |
+
# Handle cases with no valid data
|
| 275 |
+
if depth is None or rate is None:
|
| 276 |
+
depth = 0
|
| 277 |
+
rate = 0
|
| 278 |
+
self.peaks = np.array([])
|
| 279 |
+
|
| 280 |
+
self.depth = depth
|
| 281 |
+
self.rate = rate
|
| 282 |
+
except Exception as e:
|
| 283 |
+
cpr_logger.error(f"Error calculating rate and depth: {e}")
|
| 284 |
+
|
| 285 |
+
def assign_chunk_data(self, chunk_start_frame_index, chunk_end_frame_index):
|
| 286 |
+
"""
|
| 287 |
+
Capture chunk data for later analysis
|
| 288 |
+
|
| 289 |
+
Sets:
|
| 290 |
+
chunks_depth (list): List of depths for each chunk.
|
| 291 |
+
chunks_rate (list): List of rates for each chunk.
|
| 292 |
+
chunks_start_and_end_indices (list): List of start and end indices for each chunk.
|
| 293 |
+
chunks_y_preprocessed (list): List of preprocessed y-values for each chunk.
|
| 294 |
+
chunks_peaks (list): List of detected peaks for each chunk.
|
| 295 |
+
|
| 296 |
+
Args:
|
| 297 |
+
chunk_start_frame_index (int): The starting frame index of the chunk.
|
| 298 |
+
chunk_end_frame_index (int): The ending frame index of the chunk.
|
| 299 |
+
"""
|
| 300 |
+
self.chunks_depth.append(self.depth)
|
| 301 |
+
self.chunks_rate.append(self.rate)
|
| 302 |
+
self.chunks_start_and_end_indices.append((chunk_start_frame_index, chunk_end_frame_index))
|
| 303 |
+
|
| 304 |
+
self.chunks_y_preprocessed.append(self.y_preprocessed.copy())
|
| 305 |
+
self.chunks_peaks.append(self.peaks.copy())
|
| 306 |
+
|
| 307 |
+
self.current_chunk_start = chunk_start_frame_index
|
| 308 |
+
self.current_chunk_end = chunk_end_frame_index
|
| 309 |
+
|
| 310 |
+
self.chunks_rate_and_depth_warnings.append(self.rate_and_depth_warnings.copy())
|
| 311 |
+
|
| 312 |
+
def calculate_rate_and_depth_for_all_chunk(self):
|
| 313 |
+
"""
|
| 314 |
+
Calculate the weighted average rate and depth for all chunks
|
| 315 |
+
|
| 316 |
+
Sets:
|
| 317 |
+
weighted_depth (float): The weighted average depth in cm.
|
| 318 |
+
weighted_rate (float): The weighted average rate in cpm.
|
| 319 |
+
"""
|
| 320 |
+
|
| 321 |
+
if not self.chunks_depth or not self.chunks_rate or not self.chunks_start_and_end_indices:
|
| 322 |
+
cpr_logger.info("[WARNING] No chunk data available for averaging")
|
| 323 |
+
return None
|
| 324 |
+
|
| 325 |
+
if not (len(self.chunks_depth) == len(self.chunks_rate) == len(self.chunks_start_and_end_indices)):
|
| 326 |
+
cpr_logger.info("[ERROR] Mismatched chunk data lists")
|
| 327 |
+
return None
|
| 328 |
+
|
| 329 |
+
total_weight = 0
|
| 330 |
+
weighted_depth_sum = 0
|
| 331 |
+
weighted_rate_sum = 0
|
| 332 |
+
|
| 333 |
+
for depth, rate, (start, end) in zip(self.chunks_depth,
|
| 334 |
+
self.chunks_rate,
|
| 335 |
+
self.chunks_start_and_end_indices):
|
| 336 |
+
|
| 337 |
+
# Calculate chunk duration (+1 because inclusive)
|
| 338 |
+
chunk_duration = end - start + 1
|
| 339 |
+
|
| 340 |
+
weighted_depth_sum += depth * chunk_duration
|
| 341 |
+
weighted_rate_sum += rate * chunk_duration
|
| 342 |
+
total_weight += chunk_duration
|
| 343 |
+
|
| 344 |
+
if total_weight == 0:
|
| 345 |
+
self.weighted_depth = None
|
| 346 |
+
self.weighted_rate = None
|
| 347 |
+
|
| 348 |
+
cpr_logger.info("[ERROR] No valid chunks for averaging")
|
| 349 |
+
else:
|
| 350 |
+
self.weighted_depth = weighted_depth_sum / total_weight
|
| 351 |
+
self.weighted_rate = weighted_rate_sum / total_weight
|
| 352 |
+
|
| 353 |
+
cpr_logger.info(f"[RESULTS] Weighted average depth: {self.weighted_depth:.1f} cm")
|
| 354 |
+
cpr_logger.info(f"[RESULTS] Weighted average rate: {self.weighted_rate:.1f} cpm")
|
| 355 |
+
|
| 356 |
+
#^ ################# Warnings #######################
|
| 357 |
+
|
| 358 |
+
def _get_rate_and_depth_status(self):
|
| 359 |
+
"""Internal validation logic"""
|
| 360 |
+
|
| 361 |
+
depth_status = "normal"
|
| 362 |
+
rate_status = "normal"
|
| 363 |
+
|
| 364 |
+
if self.depth < self.min_depth_threshold and self.depth > 0:
|
| 365 |
+
depth_status = "low"
|
| 366 |
+
elif self.depth > self.max_depth_threshold:
|
| 367 |
+
depth_status = "high"
|
| 368 |
+
|
| 369 |
+
if self.rate < self.min_rate_threshold and self.rate > 0:
|
| 370 |
+
rate_status = "low"
|
| 371 |
+
elif self.rate > self.max_rate_threshold:
|
| 372 |
+
rate_status = "high"
|
| 373 |
+
|
| 374 |
+
return depth_status, rate_status
|
| 375 |
+
|
| 376 |
+
def get_rate_and_depth_warnings(self):
|
| 377 |
+
"""Get performance warnings based on depth and rate"""
|
| 378 |
+
|
| 379 |
+
depth_status, rate_status = self._get_rate_and_depth_status()
|
| 380 |
+
|
| 381 |
+
warnings = []
|
| 382 |
+
if depth_status == "low":
|
| 383 |
+
warnings.append("Depth too low!")
|
| 384 |
+
elif depth_status == "high":
|
| 385 |
+
warnings.append("Depth too high!")
|
| 386 |
+
|
| 387 |
+
if rate_status == "low":
|
| 388 |
+
warnings.append("Rate too slow!")
|
| 389 |
+
elif rate_status == "high":
|
| 390 |
+
warnings.append("Rate too fast!")
|
| 391 |
+
|
| 392 |
+
self.rate_and_depth_warnings = warnings
|
| 393 |
+
|
| 394 |
+
return warnings
|
| 395 |
+
|
| 396 |
+
#^ ################# Handle Chunk #######################
|
| 397 |
+
|
| 398 |
+
def handle_chunk(self, midpoints, chunk_start_frame_index, chunk_end_frame_index, fps, shoulder_distances, sampling_interval_in_frames):
|
| 399 |
+
"""
|
| 400 |
+
Handle a chunk of motion data by validating, preprocessing, and calculating metrics
|
| 401 |
+
for the chunk.
|
| 402 |
+
|
| 403 |
+
Args:
|
| 404 |
+
y_exact (np.ndarray): The exact y-values of the midpoints.
|
| 405 |
+
chunk_start_frame_index (int): The starting frame index of the chunk.
|
| 406 |
+
chunk_end_frame_index (int): The ending frame index of the chunk.
|
| 407 |
+
fps (float): The frames per second of the video.
|
| 408 |
+
shoulder_distances (list): List of shoulder distances in pixels.
|
| 409 |
+
|
| 410 |
+
Returns:
|
| 411 |
+
bool: True if the chunk was processed successfully, False otherwise.
|
| 412 |
+
"""
|
| 413 |
+
|
| 414 |
+
if len(midpoints) == 0:
|
| 415 |
+
cpr_logger.info("No midpoints received, skipping chunk")
|
| 416 |
+
cpr_logger.info(f"Chunk {chunk_start_frame_index}-{chunk_end_frame_index} - No midpoints received")
|
| 417 |
+
sys.exit(1)
|
| 418 |
+
|
| 419 |
+
# The program is terminated if the validation fails
|
| 420 |
+
self.validate_midpoints_and_frames_count_in_chunk(midpoints, chunk_start_frame_index, chunk_end_frame_index, sampling_interval_in_frames)
|
| 421 |
+
|
| 422 |
+
preprocessing_reult = self.preprocess_midpoints(midpoints)
|
| 423 |
+
if not preprocessing_reult:
|
| 424 |
+
cpr_logger.info("Preprocessing failed, skipping chunk")
|
| 425 |
+
return False
|
| 426 |
+
|
| 427 |
+
self.detect_midpoints_peaks()
|
| 428 |
+
if not self.detect_midpoints_peaks():
|
| 429 |
+
cpr_logger.info("Peak detection failed, skipping chunk")
|
| 430 |
+
|
| 431 |
+
self.peaks = np.array([])
|
| 432 |
+
self.peaks_max = np.array([])
|
| 433 |
+
self.peaks_min = np.array([])
|
| 434 |
+
|
| 435 |
+
self.depth = 0
|
| 436 |
+
self.rate = 0
|
| 437 |
+
|
| 438 |
+
return False
|
| 439 |
+
|
| 440 |
+
self.calculate_cm_px_ratio(shoulder_distances)
|
| 441 |
+
if self.cm_px_ratio is None:
|
| 442 |
+
cpr_logger.info("cm/px ratio calculation failed, skipping chunk")
|
| 443 |
+
|
| 444 |
+
self.depth = 0
|
| 445 |
+
self.rate = 0
|
| 446 |
+
|
| 447 |
+
return False
|
| 448 |
+
|
| 449 |
+
self.calculate_rate_and_depth_for_chunk(fps, sampling_interval_in_frames)
|
| 450 |
+
if self.depth is None or self.rate is None:
|
| 451 |
+
cpr_logger.info("Rate and depth calculation failed, skipping chunk")
|
| 452 |
+
return False
|
| 453 |
+
else:
|
| 454 |
+
cpr_logger.info(f"Chunk {chunk_start_frame_index}-{chunk_end_frame_index} - Depth: {self.depth:.1f} cm, Rate: {self.rate:.1f} cpm")
|
| 455 |
+
|
| 456 |
+
self.get_rate_and_depth_warnings()
|
| 457 |
+
|
| 458 |
+
self.assign_chunk_data(chunk_start_frame_index, chunk_end_frame_index)
|
| 459 |
+
cpr_logger.info(f"Chunk {chunk_start_frame_index}-{chunk_end_frame_index} processed successfully")
|
| 460 |
+
return True
|
| 461 |
+
|
| 462 |
+
#^ ################# Comments #######################
|
| 463 |
+
# Between every two consecutive mini chunks, there wil be "sampling interval" frames unaccounted for.
|
| 464 |
+
# This is because when we reach the "reporting interval" number of frames, we terminate the first mini chunk.
|
| 465 |
+
# But we only start the next mini chunk when we detect the next successfully processed frame.
|
| 466 |
+
# Which is "sampling interval" frames later at the earliest.
|
| 467 |
+
# We can't just initialize the next mini chunk at the "reporting interval" frame, because we need to wait for the next successful frame.
|
| 468 |
+
# Becuase maybe the next frame is a frame with posture errors.
|
| 469 |
+
# For better visualization, we connect between the last point of the previous chunk and the first point of the next chunk if they are "sampling interval" frames apart.
|
| 470 |
+
# But that is only for visualization, all calculations are done on the original frames.
|
| 471 |
+
|
| 472 |
+
# Chunks that are too short can fail any stage of the "handle chunk" process.
|
| 473 |
+
# If they do, we vizualize what we have and ignore the rest.
|
| 474 |
+
# For example, a chunk with < 2 peaks will not be able to calculate the rate.
|
| 475 |
+
# So we will set it to zero and display the midpoints and detected peaks.
|
| 476 |
+
# If there are no peaks, we will set the rate to zero and display the midpoints.
|
| 477 |
+
|
| 478 |
+
# Problems with chunks could be:
|
| 479 |
+
# - Less than 3 seconds.
|
| 480 |
+
# - Not enough peaks to calculate depth and rate
|
CPRRealTime/pose_estimation.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# pose_estimation.py
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
from ultralytics import YOLO
|
| 5 |
+
from CPRRealTime.keypoints import CocoKeypoints
|
| 6 |
+
from CPRRealTime.logging_config import cpr_logger
|
| 7 |
+
|
| 8 |
+
class PoseEstimator:
|
| 9 |
+
"""Human pose estimation using YOLO"""
|
| 10 |
+
|
| 11 |
+
def __init__(self, min_confidence, model_path="yolo11n-pose.pt"):
|
| 12 |
+
#self.model = YOLO(model_path).to("")
|
| 13 |
+
self.model = YOLO(model_path).to("cuda")
|
| 14 |
+
|
| 15 |
+
if next(self.model.model.parameters()).is_cuda:
|
| 16 |
+
print("✅ YOLO model successfully loaded on CUDA (GPU).")
|
| 17 |
+
else:
|
| 18 |
+
print("❌ YOLO model is not on CUDA. Check your setup.")
|
| 19 |
+
|
| 20 |
+
self.min_confidence = min_confidence
|
| 21 |
+
|
| 22 |
+
def detect_poses(self, frame):
|
| 23 |
+
"""Detect human poses in a frame"""
|
| 24 |
+
try:
|
| 25 |
+
results = self.model(frame, verbose=False, conf=self.min_confidence, show=False, iou=0.2)
|
| 26 |
+
if not results or len(results[0].keypoints.xy) == 0:
|
| 27 |
+
return None
|
| 28 |
+
return results[0]
|
| 29 |
+
except Exception as e:
|
| 30 |
+
cpr_logger.error(f"Pose detection error: {e}")
|
| 31 |
+
return None
|
| 32 |
+
|
| 33 |
+
def get_keypoints(self, results, person_idx=0):
|
| 34 |
+
"""Extract keypoints for a detected person"""
|
| 35 |
+
try:
|
| 36 |
+
if not results or len(results.keypoints.xy) <= person_idx:
|
| 37 |
+
return None
|
| 38 |
+
return results.keypoints.xy[person_idx].cpu().numpy()
|
| 39 |
+
except Exception as e:
|
| 40 |
+
cpr_logger.error(f"Keypoint extraction error: {e}")
|
| 41 |
+
return None
|
| 42 |
+
|
| 43 |
+
def draw_keypoints(self, frame, results):
|
| 44 |
+
"""Draw detected keypoints on frame"""
|
| 45 |
+
try:
|
| 46 |
+
return results.plot()
|
| 47 |
+
except Exception as e:
|
| 48 |
+
cpr_logger.error(f"Keypoint drawing error: {e}")
|
| 49 |
+
return frame
|
CPRRealTime/posture_analyzer.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# posture_analyzer.py
|
| 2 |
+
import math
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
from CPRRealTime.keypoints import CocoKeypoints
|
| 6 |
+
from CPRRealTime.logging_config import cpr_logger
|
| 7 |
+
|
| 8 |
+
class PostureAnalyzer:
|
| 9 |
+
"""Posture analysis and visualization with comprehensive validation"""
|
| 10 |
+
|
| 11 |
+
def __init__(self, right_arm_angle_threshold, left_arm_angle_threshold, wrist_distance_threshold, history_length_to_average):
|
| 12 |
+
self.history_length_to_average = history_length_to_average
|
| 13 |
+
|
| 14 |
+
self.right_arm_angles = []
|
| 15 |
+
self.left_arm_angles = []
|
| 16 |
+
self.wrist_distances = []
|
| 17 |
+
|
| 18 |
+
self.right_arm_angle_threshold = right_arm_angle_threshold
|
| 19 |
+
self.left_arm_angle_threshold = left_arm_angle_threshold
|
| 20 |
+
self.wrist_distance_threshold = wrist_distance_threshold
|
| 21 |
+
|
| 22 |
+
def _calculate_angle(self, a, b, c):
|
| 23 |
+
"""Calculate angle between three points"""
|
| 24 |
+
try:
|
| 25 |
+
ang = math.degrees(math.atan2(c[1]-b[1], c[0]-b[0]) -
|
| 26 |
+
math.atan2(a[1]-b[1], a[0]-b[0]))
|
| 27 |
+
return ang + 360 if ang < 0 else ang
|
| 28 |
+
except Exception as e:
|
| 29 |
+
cpr_logger.error(f"Angle calculation error: {e}")
|
| 30 |
+
return 0
|
| 31 |
+
|
| 32 |
+
def _check_bended_right_arm(self, keypoints):
|
| 33 |
+
"""Check for right arm bending (returns warning)"""
|
| 34 |
+
warnings = []
|
| 35 |
+
try:
|
| 36 |
+
shoulder = keypoints[CocoKeypoints.RIGHT_SHOULDER.value]
|
| 37 |
+
elbow = keypoints[CocoKeypoints.RIGHT_ELBOW.value]
|
| 38 |
+
wrist = keypoints[CocoKeypoints.RIGHT_WRIST.value]
|
| 39 |
+
|
| 40 |
+
right_angle = self._calculate_angle(wrist, elbow, shoulder)
|
| 41 |
+
|
| 42 |
+
self.right_arm_angles.append(right_angle)
|
| 43 |
+
|
| 44 |
+
avg_right = np.mean(self.right_arm_angles[-self.history_length_to_average:] if self.right_arm_angles else 0)
|
| 45 |
+
|
| 46 |
+
if avg_right > self.right_arm_angle_threshold:
|
| 47 |
+
warnings.append("Right arm bent!")
|
| 48 |
+
|
| 49 |
+
return warnings
|
| 50 |
+
|
| 51 |
+
except Exception as e:
|
| 52 |
+
cpr_logger.error(f"Right arm check error: {e}")
|
| 53 |
+
|
| 54 |
+
return warnings
|
| 55 |
+
|
| 56 |
+
def _check_bended_left_arm(self, keypoints):
|
| 57 |
+
"""Check for left arm bending (returns warning)"""
|
| 58 |
+
warnings = []
|
| 59 |
+
try:
|
| 60 |
+
shoulder = keypoints[CocoKeypoints.LEFT_SHOULDER.value]
|
| 61 |
+
elbow = keypoints[CocoKeypoints.LEFT_ELBOW.value]
|
| 62 |
+
wrist = keypoints[CocoKeypoints.LEFT_WRIST.value]
|
| 63 |
+
|
| 64 |
+
left_angle = self._calculate_angle(wrist, elbow, shoulder)
|
| 65 |
+
|
| 66 |
+
self.left_arm_angles.append(left_angle)
|
| 67 |
+
|
| 68 |
+
avg_left = np.mean(self.left_arm_angles[-self.history_length_to_average:] if self.left_arm_angles else 0)
|
| 69 |
+
|
| 70 |
+
if avg_left < self.left_arm_angle_threshold:
|
| 71 |
+
warnings.append("Left arm bent!")
|
| 72 |
+
|
| 73 |
+
return warnings
|
| 74 |
+
|
| 75 |
+
except Exception as e:
|
| 76 |
+
cpr_logger.error(f"Left arm check error: {e}")
|
| 77 |
+
|
| 78 |
+
return warnings
|
| 79 |
+
|
| 80 |
+
def _check_hands_on_chest(self, keypoints, chest_params):
|
| 81 |
+
"""Check individual hand positions and return specific warnings"""
|
| 82 |
+
|
| 83 |
+
# Get the wrist keypoints
|
| 84 |
+
left_wrist = keypoints[CocoKeypoints.LEFT_WRIST.value]
|
| 85 |
+
right_wrist = keypoints[CocoKeypoints.RIGHT_WRIST.value]
|
| 86 |
+
|
| 87 |
+
warnings = []
|
| 88 |
+
try:
|
| 89 |
+
if chest_params is None:
|
| 90 |
+
return ["Both hands not on chest!"] # Fallback warning
|
| 91 |
+
|
| 92 |
+
cx, cy, cw, ch = chest_params
|
| 93 |
+
left_in = right_in = False
|
| 94 |
+
|
| 95 |
+
# Check left hand
|
| 96 |
+
if left_wrist is not None:
|
| 97 |
+
left_in = (cx - cw/2 < left_wrist[0] < cx + cw/2) and \
|
| 98 |
+
(cy - ch/2 < left_wrist[1] < cy + ch/2)
|
| 99 |
+
|
| 100 |
+
# Check right hand
|
| 101 |
+
if right_wrist is not None:
|
| 102 |
+
right_in = (cx - cw/2 < right_wrist[0] < cx + cw/2) and \
|
| 103 |
+
(cy - ch/2 < right_wrist[1] < cy + ch/2)
|
| 104 |
+
|
| 105 |
+
# Determine warnings
|
| 106 |
+
if not left_in and not right_in:
|
| 107 |
+
warnings.append("Both hands not on chest!")
|
| 108 |
+
else:
|
| 109 |
+
if not left_in:
|
| 110 |
+
warnings.append("Left hand not on chest!")
|
| 111 |
+
if not right_in:
|
| 112 |
+
warnings.append("Right hand not on chest!")
|
| 113 |
+
|
| 114 |
+
except Exception as e:
|
| 115 |
+
cpr_logger.error(f"Hands check error: {e}")
|
| 116 |
+
|
| 117 |
+
return warnings
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def validate_posture(self, keypoints, chest_params):
|
| 121 |
+
"""Run all posture validations (returns aggregated warnings)"""
|
| 122 |
+
warnings = []
|
| 123 |
+
|
| 124 |
+
warnings += self._check_hands_on_chest(keypoints, chest_params)
|
| 125 |
+
|
| 126 |
+
if ("Right hand not on chest!" not in warnings) and ("Both hands not on chest!" not in warnings):
|
| 127 |
+
warnings += self._check_bended_right_arm(keypoints)
|
| 128 |
+
|
| 129 |
+
if ("Left hand not on chest!" not in warnings) and ("Both hands not on chest!" not in warnings):
|
| 130 |
+
warnings += self._check_bended_left_arm(keypoints)
|
| 131 |
+
|
| 132 |
+
return warnings
|
CPRRealTime/role_classifier.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# role_classifier.py
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
from ultralytics.utils.plotting import Annotator # Import YOLO's annotator
|
| 5 |
+
from CPRRealTime.logging_config import cpr_logger
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class RoleClassifier:
|
| 9 |
+
"""Role classification and tracking using image processing"""
|
| 10 |
+
|
| 11 |
+
def __init__(self, proximity_thresh=0.3):
|
| 12 |
+
self.proximity_thresh = proximity_thresh
|
| 13 |
+
self.rescuer_id = None
|
| 14 |
+
self.rescuer_processed_results = None
|
| 15 |
+
self.patient_processed_results = None
|
| 16 |
+
|
| 17 |
+
def _calculate_verticality_score(self, bounding_box):
|
| 18 |
+
"""Calculate posture verticality score (0=horizontal, 1=vertical) using bounding box aspect ratio."""
|
| 19 |
+
try:
|
| 20 |
+
x1, y1, x2, y2 = bounding_box
|
| 21 |
+
width = abs(x2 - x1)
|
| 22 |
+
height = abs(y2 - y1)
|
| 23 |
+
|
| 24 |
+
# Handle edge cases with invalid dimensions
|
| 25 |
+
if width == 0 or height == 0:
|
| 26 |
+
return -1
|
| 27 |
+
|
| 28 |
+
return 1 if height > width else 0 # 1 for vertical, 0 for horizontal
|
| 29 |
+
|
| 30 |
+
except (TypeError, ValueError) as e:
|
| 31 |
+
cpr_logger.error(f"Verticality score calculation error: {e}")
|
| 32 |
+
return -1
|
| 33 |
+
|
| 34 |
+
def _calculate_bounding_box_center(self, bounding_box):
|
| 35 |
+
"""Calculate the center coordinates of a bounding box.
|
| 36 |
+
"""
|
| 37 |
+
x1, y1, x2, y2 = bounding_box
|
| 38 |
+
return (x1 + x2) / 2, (y1 + y2) / 2
|
| 39 |
+
|
| 40 |
+
def _calculate_distance(self, point1, point2):
|
| 41 |
+
"""Calculate Euclidean distance between two points"""
|
| 42 |
+
return ((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2)**0.5
|
| 43 |
+
|
| 44 |
+
def _calculate_bbox_areas(self, rescuer_bbox, patient_bbox):
|
| 45 |
+
"""
|
| 46 |
+
Calculate bounding box areas for rescuer and patient.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
rescuer_bbox: [x1, y1, x2, y2] coordinates of rescuer's bounding box
|
| 50 |
+
patient_bbox: [x1, y1, x2, y2] coordinates of patient's bounding box
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
Tuple: (rescuer_area, patient_area) in pixels
|
| 54 |
+
"""
|
| 55 |
+
def compute_area(bbox):
|
| 56 |
+
if bbox is None:
|
| 57 |
+
return 0
|
| 58 |
+
width = bbox[2] - bbox[0] # x2 - x1
|
| 59 |
+
height = bbox[3] - bbox[1] # y2 - y1
|
| 60 |
+
return abs(width * height) # Absolute value to handle negative coordinates
|
| 61 |
+
|
| 62 |
+
return compute_area(rescuer_bbox), compute_area(patient_bbox)
|
| 63 |
+
|
| 64 |
+
def classify_roles(self, results, prev_rescuer_processed_results=None, prev_patient_processed_results=None):
|
| 65 |
+
"""
|
| 66 |
+
Classify roles of rescuer and patient based on detected keypoints and bounding boxes.
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
processed_results = []
|
| 70 |
+
|
| 71 |
+
# Calculate combined area threshold if previous boxes exist
|
| 72 |
+
threshold = None
|
| 73 |
+
if prev_rescuer_processed_results and prev_patient_processed_results:
|
| 74 |
+
prev_rescuer_bbox = prev_rescuer_processed_results["bounding_box"]
|
| 75 |
+
prev_patient_bbox = prev_patient_processed_results["bounding_box"]
|
| 76 |
+
|
| 77 |
+
rescuer_area = (prev_rescuer_bbox[2]-prev_rescuer_bbox[0])*(prev_rescuer_bbox[3]-prev_rescuer_bbox[1])
|
| 78 |
+
patient_area = (prev_patient_bbox[2]-prev_patient_bbox[0])*(prev_patient_bbox[3]-prev_patient_bbox[1])
|
| 79 |
+
threshold = rescuer_area + patient_area
|
| 80 |
+
|
| 81 |
+
for i, (box, keypoints) in enumerate(zip(results.boxes.xywh.cpu().numpy(),
|
| 82 |
+
results.keypoints.xy.cpu().numpy())):
|
| 83 |
+
try:
|
| 84 |
+
# Convert box to [x1,y1,x2,y2] format
|
| 85 |
+
x_center, y_center, width, height = box
|
| 86 |
+
bounding_box = [
|
| 87 |
+
x_center - width/2, # x1
|
| 88 |
+
y_center - height/2, # y1
|
| 89 |
+
x_center + width/2, # x2
|
| 90 |
+
y_center + height/2 # y2
|
| 91 |
+
]
|
| 92 |
+
|
| 93 |
+
# Skip if box exceeds area threshold (when threshold exists)
|
| 94 |
+
if threshold:
|
| 95 |
+
box_area = width * height
|
| 96 |
+
if box_area > threshold * 1.2: # 20% tolerance
|
| 97 |
+
cpr_logger.info(f"Filtered oversized box {i} (area: {box_area:.1f} > threshold: {threshold:.1f})")
|
| 98 |
+
continue
|
| 99 |
+
|
| 100 |
+
# Calculate features
|
| 101 |
+
verticality_score = self._calculate_verticality_score(bounding_box)
|
| 102 |
+
#!We already have the center coordinates from the bounding box, no need to recalculate it.
|
| 103 |
+
bounding_box_center = self._calculate_bounding_box_center(bounding_box)
|
| 104 |
+
|
| 105 |
+
# Store valid results
|
| 106 |
+
processed_results.append({
|
| 107 |
+
'original_index': i,
|
| 108 |
+
'bounding_box': bounding_box,
|
| 109 |
+
'bounding_box_center': bounding_box_center,
|
| 110 |
+
'verticality_score': verticality_score,
|
| 111 |
+
'keypoints': keypoints,
|
| 112 |
+
})
|
| 113 |
+
|
| 114 |
+
except Exception as e:
|
| 115 |
+
cpr_logger.error(f"Error processing detection {i}: {e}")
|
| 116 |
+
continue
|
| 117 |
+
|
| 118 |
+
# Step 2: Identify the patient (horizontal posture)
|
| 119 |
+
patient_candidates = [res for res in processed_results
|
| 120 |
+
if res['verticality_score'] == 0]
|
| 121 |
+
|
| 122 |
+
# If more than one horizontal person, select person with lowest center (likely lying down)
|
| 123 |
+
if len(patient_candidates) > 1:
|
| 124 |
+
patient_candidates = sorted(patient_candidates,
|
| 125 |
+
key=lambda x: x['bounding_box_center'][1])[:1] # Sort by y-coordinate
|
| 126 |
+
|
| 127 |
+
patient = patient_candidates[0] if patient_candidates else None
|
| 128 |
+
|
| 129 |
+
# Step 3: Identify the rescuer
|
| 130 |
+
rescuer = None
|
| 131 |
+
if patient:
|
| 132 |
+
# Find vertical people who aren't the patient
|
| 133 |
+
potential_rescuers = [
|
| 134 |
+
res for res in processed_results
|
| 135 |
+
if res['verticality_score'] == 1
|
| 136 |
+
#! Useless condition because the patient was horizontal
|
| 137 |
+
and res['original_index'] != patient['original_index']
|
| 138 |
+
]
|
| 139 |
+
|
| 140 |
+
if potential_rescuers:
|
| 141 |
+
# Select rescuer closest to patient
|
| 142 |
+
rescuer = min(potential_rescuers,
|
| 143 |
+
key=lambda x: self._calculate_distance(
|
| 144 |
+
x['bounding_box_center'],
|
| 145 |
+
patient['bounding_box_center']))
|
| 146 |
+
|
| 147 |
+
return rescuer, patient
|
| 148 |
+
|
| 149 |
+
def draw_rescuer_and_patient(self, frame):
|
| 150 |
+
# Create annotator object
|
| 151 |
+
annotator = Annotator(frame)
|
| 152 |
+
|
| 153 |
+
# Draw rescuer (A) with green box and keypoints
|
| 154 |
+
if self.rescuer_processed_results:
|
| 155 |
+
try:
|
| 156 |
+
x1, y1, x2, y2 = map(int, self.rescuer_processed_results["bounding_box"])
|
| 157 |
+
annotator.box_label((x1, y1, x2, y2), "Rescuer A", color=(0, 255, 0))
|
| 158 |
+
|
| 159 |
+
if "keypoints" in self.rescuer_processed_results:
|
| 160 |
+
keypoints = self.rescuer_processed_results["keypoints"]
|
| 161 |
+
annotator.kpts(keypoints, shape=frame.shape[:2])
|
| 162 |
+
except Exception as e:
|
| 163 |
+
cpr_logger.error(f"Error drawing rescuer: {str(e)}")
|
| 164 |
+
|
| 165 |
+
# Draw patient (B) with red box and keypoints
|
| 166 |
+
if self.patient_processed_results:
|
| 167 |
+
try:
|
| 168 |
+
x1, y1, x2, y2 = map(int, self.patient_processed_results["bounding_box"])
|
| 169 |
+
annotator.box_label((x1, y1, x2, y2), "Patient B", color=(0, 0, 255))
|
| 170 |
+
|
| 171 |
+
if "keypoints" in self.patient_processed_results:
|
| 172 |
+
keypoints = self.patient_processed_results["keypoints"]
|
| 173 |
+
annotator.kpts(keypoints, shape=frame.shape[:2])
|
| 174 |
+
except Exception as e:
|
| 175 |
+
cpr_logger.error(f"Error drawing patient: {str(e)}")
|
| 176 |
+
|
| 177 |
+
return annotator.result()
|
| 178 |
+
|
CPRRealTime/shoulders_analyzer.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from CPRRealTime.keypoints import CocoKeypoints
|
| 3 |
+
from CPRRealTime.logging_config import cpr_logger
|
| 4 |
+
|
| 5 |
+
class ShouldersAnalyzer:
|
| 6 |
+
"""Analyzes shoulder distances and posture"""
|
| 7 |
+
|
| 8 |
+
def __init__(self):
|
| 9 |
+
self.shoulder_distance = None
|
| 10 |
+
self.shoulder_distance_history = []
|
| 11 |
+
|
| 12 |
+
def calculate_shoulder_distance(self, rescuer_keypoints):
|
| 13 |
+
"""Calculate and store shoulder distance"""
|
| 14 |
+
if rescuer_keypoints is None:
|
| 15 |
+
return
|
| 16 |
+
|
| 17 |
+
try:
|
| 18 |
+
left = rescuer_keypoints[CocoKeypoints.LEFT_SHOULDER.value]
|
| 19 |
+
right = rescuer_keypoints[CocoKeypoints.RIGHT_SHOULDER.value]
|
| 20 |
+
|
| 21 |
+
distance = np.linalg.norm(np.array(left) - np.array(right))
|
| 22 |
+
|
| 23 |
+
return distance
|
| 24 |
+
except Exception as e:
|
| 25 |
+
cpr_logger.error(f"Shoulder distance error: {e}")
|
| 26 |
+
return
|
| 27 |
+
|
| 28 |
+
def reset_shoulder_distances(self):
|
| 29 |
+
"""Reset shoulder distances"""
|
| 30 |
+
self.shoulder_distance_history = []
|
CPRRealTime/threaded_camera.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import threading
|
| 2 |
+
from queue import Queue
|
| 3 |
+
import queue
|
| 4 |
+
import cv2
|
| 5 |
+
from CPRRealTime.logging_config import cpr_logger
|
| 6 |
+
|
| 7 |
+
class ThreadedCamera:
|
| 8 |
+
def __init__(self, source, requested_fps = 30):
|
| 9 |
+
|
| 10 |
+
# The constructor of OpenCV's VideoCapture class automatically opens the camera
|
| 11 |
+
self.cap = cv2.VideoCapture(source)
|
| 12 |
+
if not self.cap.isOpened():
|
| 13 |
+
raise ValueError(f"[VIDEO CAPTURE] Unable to open camera source: {source}")
|
| 14 |
+
cpr_logger.info(f"[VIDEO CAPTURE] Camera source opened: {source}")
|
| 15 |
+
|
| 16 |
+
# Attempt to configure the camera to the requested FPS
|
| 17 |
+
# Which is set to the value we have been working on with recorded videos
|
| 18 |
+
# .set() returns True if the camera acknowledged the request, not if it actually achieved the FPS.
|
| 19 |
+
set_success = self.cap.set(cv2.CAP_PROP_FPS, requested_fps)
|
| 20 |
+
|
| 21 |
+
# Get the actual FPS from the camera
|
| 22 |
+
# This is the FPS that the camera is actually using, which may differ from the requested FPS.
|
| 23 |
+
actual_fps = self.cap.get(cv2.CAP_PROP_FPS)
|
| 24 |
+
self.fps = actual_fps
|
| 25 |
+
|
| 26 |
+
cpr_logger.info(f"[VIDEO CAPTURE] Requested FPS: {requested_fps}, Set Success: {set_success}, Actual FPS: {actual_fps}")
|
| 27 |
+
|
| 28 |
+
# The buffer should be able to hold a lag of up to 2 seconds
|
| 29 |
+
number_of_seconds_to_buffer = 5
|
| 30 |
+
queue_size = int(actual_fps * number_of_seconds_to_buffer)
|
| 31 |
+
self.q = Queue(maxsize=queue_size)
|
| 32 |
+
cpr_logger.info(f"[VIDEO CAPTURE] Queue size: {queue_size}")
|
| 33 |
+
|
| 34 |
+
# Set a flag to indicate that the camera is running
|
| 35 |
+
self.running = threading.Event()
|
| 36 |
+
self.running.set() # Initial state = running
|
| 37 |
+
cpr_logger.info(f"[VIDEO CAPTURE] Camera running: {self.running.is_set()}")
|
| 38 |
+
|
| 39 |
+
self.number_of_total_frames = 0
|
| 40 |
+
self.number_of_dropped_frames = 0
|
| 41 |
+
|
| 42 |
+
self.thread = None
|
| 43 |
+
|
| 44 |
+
def start_capture(self):
|
| 45 |
+
# Clear any existing frames in queue
|
| 46 |
+
while not self.q.empty():
|
| 47 |
+
self.q.get()
|
| 48 |
+
|
| 49 |
+
# threading.Thread() initialize a new thread
|
| 50 |
+
# target=self._reader specify the method (_reader) the thread will execute
|
| 51 |
+
self.thread = threading.Thread(target=self._reader)
|
| 52 |
+
cpr_logger.info(f"[VIDEO CAPTURE] Thread initialized: {self.thread}")
|
| 53 |
+
|
| 54 |
+
# Set the thread as a daemon thread:
|
| 55 |
+
# Daemon threads automatically exit when the main program exits
|
| 56 |
+
# They run in the background and don't block program termination
|
| 57 |
+
self.thread.daemon = True
|
| 58 |
+
cpr_logger.info(f"[VIDEO CAPTURE] Thread daemon: {self.thread.daemon}")
|
| 59 |
+
|
| 60 |
+
# Start the thread execution:
|
| 61 |
+
# Call the _reader method in parallel with the main program
|
| 62 |
+
self.thread.start()
|
| 63 |
+
|
| 64 |
+
def _reader(self):
|
| 65 |
+
while self.running.is_set():
|
| 66 |
+
ret, frame = self.cap.read()
|
| 67 |
+
if not ret:
|
| 68 |
+
cpr_logger.info("Camera disconnected")
|
| 69 |
+
self.q.put(None) # Sentinel for clean exit
|
| 70 |
+
break
|
| 71 |
+
|
| 72 |
+
try:
|
| 73 |
+
self.number_of_total_frames += 1
|
| 74 |
+
self.q.put(frame, timeout=0.1)
|
| 75 |
+
except queue.Full:
|
| 76 |
+
cpr_logger.info("Frame dropped")
|
| 77 |
+
self.number_of_dropped_frames += 1
|
| 78 |
+
|
| 79 |
+
def read(self):
|
| 80 |
+
return self.q.get()
|
| 81 |
+
|
| 82 |
+
def release(self):
|
| 83 |
+
#! Not an error
|
| 84 |
+
cpr_logger.error(f"[VIDEO CAPTURE] Total frames: {self.number_of_total_frames}, Dropped frames: {self.number_of_dropped_frames}")
|
| 85 |
+
|
| 86 |
+
self.running.clear()
|
| 87 |
+
|
| 88 |
+
# First release the capture to unblock pending reads
|
| 89 |
+
self.cap.release() # MOVED THIS LINE UP
|
| 90 |
+
|
| 91 |
+
# Then join the thread
|
| 92 |
+
self.thread.join(timeout=1.0)
|
| 93 |
+
|
| 94 |
+
if self.thread.is_alive():
|
| 95 |
+
cpr_logger.info("Warning: Thread didn't terminate cleanly")
|
| 96 |
+
# Removed redundant self.cap.release()
|
| 97 |
+
|
| 98 |
+
def isOpened(self):
|
| 99 |
+
return self.running.is_set() and self.cap.isOpened()
|
| 100 |
+
|
| 101 |
+
def __del__(self):
|
| 102 |
+
if self.running.is_set(): # Only release if not already done
|
| 103 |
+
self.release()
|
CPRRealTime/warnings_overlayer.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
|
| 6 |
+
from CPRRealTime.logging_config import cpr_logger
|
| 7 |
+
|
| 8 |
+
class WarningsOverlayer:
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.WARNING_CONFIG = {
|
| 11 |
+
# Posture Warnings
|
| 12 |
+
"Right arm bent!": {
|
| 13 |
+
"color": (52, 110, 235),
|
| 14 |
+
"position": (50, 150)
|
| 15 |
+
},
|
| 16 |
+
"Left arm bent!": {
|
| 17 |
+
"color": (52, 110, 235),
|
| 18 |
+
"position": (50, 200)
|
| 19 |
+
},
|
| 20 |
+
"Left hand not on chest!": {
|
| 21 |
+
"color": (161, 127, 18),
|
| 22 |
+
"position": (50, 250)
|
| 23 |
+
},
|
| 24 |
+
"Right hand not on chest!": {
|
| 25 |
+
"color": (161, 127, 18),
|
| 26 |
+
"position": (50, 300)
|
| 27 |
+
},
|
| 28 |
+
"Both hands not on chest!": {
|
| 29 |
+
"color": (161, 127, 18),
|
| 30 |
+
"position": (50, 350)
|
| 31 |
+
},
|
| 32 |
+
|
| 33 |
+
# Rate/Depth Warnings
|
| 34 |
+
"Depth too low!": {
|
| 35 |
+
"color": (125, 52, 235),
|
| 36 |
+
"position": (50, 400)
|
| 37 |
+
},
|
| 38 |
+
"Depth too high!": {
|
| 39 |
+
"color": (125, 52, 235),
|
| 40 |
+
"position": (50, 450)
|
| 41 |
+
},
|
| 42 |
+
"Rate too slow!": {
|
| 43 |
+
"color": (235, 52, 214),
|
| 44 |
+
"position": (50, 500)
|
| 45 |
+
},
|
| 46 |
+
"Rate too fast!": {
|
| 47 |
+
"color": (235, 52, 214),
|
| 48 |
+
"position": (50, 550)
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
def add_warnings_to_processed_video(self, video_output_path, sampling_interval_frames, rate_and_depth_warnings, posture_warnings):
|
| 53 |
+
"""Process both warning types with identical handling"""
|
| 54 |
+
cpr_logger.info("\n[POST-PROCESS] Starting warning overlay")
|
| 55 |
+
|
| 56 |
+
# Read processed video with original parameters
|
| 57 |
+
cap = cv2.VideoCapture(video_output_path)
|
| 58 |
+
if not cap.isOpened():
|
| 59 |
+
cpr_logger.info("[ERROR] Failed to open processed video")
|
| 60 |
+
return
|
| 61 |
+
|
| 62 |
+
# Get original video properties
|
| 63 |
+
original_fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))
|
| 64 |
+
processed_fps = cap.get(cv2.CAP_PROP_FPS)
|
| 65 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 66 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 67 |
+
|
| 68 |
+
# Create final writer with ORIGINAL codec and parameters
|
| 69 |
+
base = os.path.splitext(video_output_path)[0]
|
| 70 |
+
final_path = os.path.abspath(f"{base}_final.mp4")
|
| 71 |
+
writer = cv2.VideoWriter(final_path, original_fourcc, processed_fps, (width, height))
|
| 72 |
+
|
| 73 |
+
# Combine all warnings into unified list
|
| 74 |
+
all_warnings = []
|
| 75 |
+
|
| 76 |
+
# Process posture warnings
|
| 77 |
+
for entry in posture_warnings:
|
| 78 |
+
if warnings := entry.get('posture_warnings'):
|
| 79 |
+
start = entry['start_frame'] // sampling_interval_frames
|
| 80 |
+
end = entry['end_frame'] // sampling_interval_frames
|
| 81 |
+
all_warnings.append((int(start), int(end), warnings))
|
| 82 |
+
|
| 83 |
+
# Process rate/depth warnings
|
| 84 |
+
for entry in rate_and_depth_warnings:
|
| 85 |
+
if warnings := entry.get('rate_and_depth_warnings'):
|
| 86 |
+
start = entry['start_frame'] // sampling_interval_frames
|
| 87 |
+
end = entry['end_frame'] // sampling_interval_frames
|
| 88 |
+
all_warnings.append((int(start), int(end), warnings))
|
| 89 |
+
|
| 90 |
+
# Video processing loop
|
| 91 |
+
frame_idx = 0
|
| 92 |
+
while True:
|
| 93 |
+
ret, frame = cap.read()
|
| 94 |
+
if not ret:
|
| 95 |
+
break
|
| 96 |
+
|
| 97 |
+
# Check active warnings for current frame
|
| 98 |
+
active_warnings = []
|
| 99 |
+
for start, end, warnings in all_warnings:
|
| 100 |
+
if start <= frame_idx <= end:
|
| 101 |
+
active_warnings.extend(warnings)
|
| 102 |
+
|
| 103 |
+
# Draw all warnings using unified config
|
| 104 |
+
self._draw_warnings(frame, active_warnings)
|
| 105 |
+
|
| 106 |
+
writer.write(frame)
|
| 107 |
+
frame_idx += 1
|
| 108 |
+
|
| 109 |
+
cap.release()
|
| 110 |
+
writer.release()
|
| 111 |
+
cpr_logger.info(f"\n[POST-PROCESS] Final output saved to: {final_path}")
|
| 112 |
+
|
| 113 |
+
def _draw_warnings(self, frame, active_warnings):
|
| 114 |
+
"""Draw warnings using unified configuration"""
|
| 115 |
+
drawn_positions = set() # Prevent overlapping
|
| 116 |
+
|
| 117 |
+
for warning_text in active_warnings:
|
| 118 |
+
if config := self.WARNING_CONFIG.get(warning_text):
|
| 119 |
+
x, y = config['position']
|
| 120 |
+
|
| 121 |
+
# Auto-stack if position occupied
|
| 122 |
+
while (x, y) in drawn_positions:
|
| 123 |
+
y += 50 # Move down by 50px
|
| 124 |
+
|
| 125 |
+
self._draw_warning_banner(
|
| 126 |
+
frame=frame,
|
| 127 |
+
text=warning_text,
|
| 128 |
+
color=config['color'],
|
| 129 |
+
position=(x, y)
|
| 130 |
+
)
|
| 131 |
+
drawn_positions.add((x, y))
|
| 132 |
+
|
| 133 |
+
def _draw_warning_banner(self, frame, text, color, position):
|
| 134 |
+
"""Base drawing function for warning banners"""
|
| 135 |
+
(text_width, text_height), _ = cv2.getTextSize(
|
| 136 |
+
text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2)
|
| 137 |
+
|
| 138 |
+
x, y = position
|
| 139 |
+
# Background rectangle
|
| 140 |
+
cv2.rectangle(frame,
|
| 141 |
+
(x - 10, y - text_height - 10),
|
| 142 |
+
(x + text_width + 10, y + 10),
|
| 143 |
+
color, -1)
|
| 144 |
+
# Text
|
| 145 |
+
cv2.putText(frame, text, (x, y),
|
| 146 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
|
| 147 |
+
|
CPRRealTime/wrists_midpoint_analyzer.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
from CPRRealTime.keypoints import CocoKeypoints
|
| 4 |
+
from CPRRealTime.logging_config import cpr_logger
|
| 5 |
+
|
| 6 |
+
class WristsMidpointAnalyzer:
|
| 7 |
+
"""Analyzes and tracks wrist midpoints for rescuer"""
|
| 8 |
+
|
| 9 |
+
def __init__(self, allowed_distance_between_wrists=170):
|
| 10 |
+
self.allowed_distance_between_wrists = allowed_distance_between_wrists
|
| 11 |
+
self.midpoint = None
|
| 12 |
+
self.midpoint_history = []
|
| 13 |
+
|
| 14 |
+
def detect_wrists_midpoint(self, rescuer_keypoints):
|
| 15 |
+
"""Calculate midpoint between wrists in pixel coordinates"""
|
| 16 |
+
try:
|
| 17 |
+
if rescuer_keypoints is None:
|
| 18 |
+
return None
|
| 19 |
+
|
| 20 |
+
# Get wrist coordinates
|
| 21 |
+
lw = rescuer_keypoints[CocoKeypoints.LEFT_WRIST.value]
|
| 22 |
+
rw = rescuer_keypoints[CocoKeypoints.RIGHT_WRIST.value]
|
| 23 |
+
|
| 24 |
+
# If the distance between wrists is too large, return None
|
| 25 |
+
distance = np.linalg.norm(np.array(lw) - np.array(rw))
|
| 26 |
+
if distance > self.allowed_distance_between_wrists:
|
| 27 |
+
return None
|
| 28 |
+
|
| 29 |
+
# Calculate midpoint
|
| 30 |
+
midpoint = (
|
| 31 |
+
int((lw[0] + rw[0]) / 2),
|
| 32 |
+
int((lw[1] + rw[1]) / 2)
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
return midpoint
|
| 36 |
+
|
| 37 |
+
except Exception as e:
|
| 38 |
+
cpr_logger.error(f"Midpoint tracking error: {e}")
|
| 39 |
+
return None
|
| 40 |
+
|
| 41 |
+
def draw_midpoint(self, frame):
|
| 42 |
+
"""Visualize the midpoint on frame"""
|
| 43 |
+
|
| 44 |
+
if self.midpoint is None:
|
| 45 |
+
return frame
|
| 46 |
+
|
| 47 |
+
try:
|
| 48 |
+
# Draw visualization
|
| 49 |
+
cv2.circle(frame, self.midpoint, 8, (0, 255, 0), -1)
|
| 50 |
+
cv2.putText(
|
| 51 |
+
frame, "MIDPOINT",
|
| 52 |
+
(self.midpoint[0] + 5, self.midpoint[1] - 10),
|
| 53 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
return frame
|
| 57 |
+
except Exception as e:
|
| 58 |
+
cpr_logger.error(f"Midpoint drawing error: {e}")
|
| 59 |
+
return frame
|
| 60 |
+
|
| 61 |
+
def reset_midpoint_history(self):
|
| 62 |
+
"""Reset midpoint history"""
|
| 63 |
+
self.midpoint_history = []
|
backupbackend.zip → CPRRealTime/yolo11n-pose.pt
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:869e83fcdffdc7371fa4e34cd8e51c838cc729571d1635e5141e3075e9319dc0
|
| 3 |
+
size 6255593
|
main.py
CHANGED
|
@@ -10,11 +10,12 @@ import cloudinary
|
|
| 10 |
import cloudinary.uploader
|
| 11 |
from cloudinary.utils import cloudinary_url
|
| 12 |
from SkinBurns_Classification import extract_features
|
| 13 |
-
from SkinBurns_Segmentation import
|
| 14 |
import requests
|
| 15 |
import joblib
|
| 16 |
import numpy as np
|
| 17 |
-
from ECG import
|
|
|
|
| 18 |
from ultralytics import YOLO
|
| 19 |
import tensorflow as tf
|
| 20 |
from fastapi import HTTPException
|
|
@@ -22,10 +23,21 @@ from fastapi import WebSocket, WebSocketDisconnect
|
|
| 22 |
import base64
|
| 23 |
import cv2
|
| 24 |
import time
|
| 25 |
-
from CPR.CPRAnalyzer import CPRAnalyzer
|
| 26 |
import tempfile
|
| 27 |
-
|
| 28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
|
| 31 |
app = FastAPI()
|
|
@@ -144,8 +156,52 @@ async def predict_burn(file: UploadFile = File(...)):
|
|
| 144 |
except Exception as e:
|
| 145 |
return JSONResponse(content={"error": str(e)}, status_code=500)
|
| 146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
|
| 148 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
# ✅ Optimize and transform image URL
|
| 150 |
@app.get("/cloudinary/transform")
|
| 151 |
def transform_image():
|
|
@@ -160,35 +216,60 @@ def transform_image():
|
|
| 160 |
return {"error": str(e)}
|
| 161 |
|
| 162 |
@app.post("/classify-ecg")
|
| 163 |
-
async def
|
| 164 |
model = joblib.load('voting_classifier.pkl')
|
| 165 |
-
|
| 166 |
-
temp_dir = f"temp_ecg_{uuid.uuid4()}"
|
| 167 |
-
os.makedirs(temp_dir, exist_ok=True)
|
| 168 |
|
| 169 |
try:
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
|
| 175 |
-
#
|
| 176 |
-
|
| 177 |
-
if len(base_names) != 1:
|
| 178 |
-
return JSONResponse(content={"error": "Files must have the same base name"}, status_code=400)
|
| 179 |
|
| 180 |
-
|
| 181 |
-
|
| 182 |
|
| 183 |
-
result = classify_new_ecg(file_path, model)
|
| 184 |
return {"result": result}
|
| 185 |
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 192 |
|
| 193 |
|
| 194 |
@app.post("/process_video")
|
|
@@ -204,77 +285,35 @@ async def process_video(file: UploadFile = File(...)):
|
|
| 204 |
with open(video_path, "wb") as buffer:
|
| 205 |
shutil.copyfileobj(file.file, buffer)
|
| 206 |
|
| 207 |
-
print("[
|
| 208 |
-
start_time = time.time()
|
| 209 |
-
|
| 210 |
-
cap = cv2.VideoCapture(video_path)
|
| 211 |
-
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 212 |
-
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 213 |
-
duration_seconds = total_frames / fps
|
| 214 |
-
chunk_duration = 10 # seconds
|
| 215 |
-
frames_per_chunk = int(fps * chunk_duration)
|
| 216 |
-
|
| 217 |
-
chunks = []
|
| 218 |
-
chunk_index = 0
|
| 219 |
-
current_frame = 0
|
| 220 |
-
|
| 221 |
-
while current_frame < total_frames:
|
| 222 |
-
# Read the chunk into memory
|
| 223 |
-
frames = []
|
| 224 |
-
for _ in range(frames_per_chunk):
|
| 225 |
-
ret, frame = cap.read()
|
| 226 |
-
if not ret:
|
| 227 |
-
break
|
| 228 |
-
frames.append(frame)
|
| 229 |
-
current_frame += 1
|
| 230 |
-
|
| 231 |
-
if not frames:
|
| 232 |
-
break
|
| 233 |
-
|
| 234 |
-
# Save chunk to temp video
|
| 235 |
-
temp_chunk_path = os.path.join(tempfile.gettempdir(), f"chunk_{chunk_index}.mp4")
|
| 236 |
-
height, width = frames[0].shape[:2]
|
| 237 |
-
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 238 |
-
out = cv2.VideoWriter(temp_chunk_path, fourcc, fps, (width, height))
|
| 239 |
-
for f in frames:
|
| 240 |
-
out.write(f)
|
| 241 |
-
out.release()
|
| 242 |
-
|
| 243 |
-
# Analyze chunk
|
| 244 |
-
print(f"[CHUNK {chunk_index}] Processing chunk at {temp_chunk_path}")
|
| 245 |
-
analyzer = CPRAnalyzer(temp_chunk_path)
|
| 246 |
-
analyzer.run_analysis()
|
| 247 |
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
|
|
|
| 251 |
|
| 252 |
-
|
| 253 |
-
for w in warnings:
|
| 254 |
filename = w['image_url'] # just the filename
|
| 255 |
local_path = os.path.join("screenshots", filename)
|
| 256 |
upload_result = cloudinary.uploader.upload(local_path, folder="posture_warnings")
|
| 257 |
w['image_url'] = upload_result['secure_url']
|
| 258 |
|
| 259 |
-
# Estimate score (adjust this logic as needed)
|
| 260 |
-
penalty = len(warnings) * 0.1
|
| 261 |
-
rate_score = min(metrics["average_compression_rate"] / 120, 1.0)
|
| 262 |
-
depth_score = min(metrics["average_compression_depth"] / 5.0, 1.0)
|
| 263 |
-
average_score = max(0.0, (rate_score + depth_score)/2 - penalty)
|
| 264 |
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
"average_depth": round(metrics["average_compression_depth"], 1),
|
| 269 |
-
"posture_warnings": warnings
|
| 270 |
-
})
|
| 271 |
|
| 272 |
-
|
|
|
|
| 273 |
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 278 |
|
| 279 |
|
| 280 |
@app.post("/process_image")
|
|
@@ -316,43 +355,264 @@ async def process_image(file: UploadFile = File(...)):
|
|
| 316 |
})
|
| 317 |
|
| 318 |
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
await websocket.accept()
|
| 325 |
try:
|
| 326 |
while True:
|
| 327 |
-
|
| 328 |
-
|
| 329 |
|
| 330 |
-
#
|
| 331 |
-
np_arr = np.frombuffer(
|
| 332 |
frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
|
| 333 |
-
|
| 334 |
-
#
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 354 |
else:
|
| 355 |
-
|
| 356 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 357 |
except WebSocketDisconnect:
|
| 358 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
import cloudinary.uploader
|
| 11 |
from cloudinary.utils import cloudinary_url
|
| 12 |
from SkinBurns_Classification import extract_features
|
| 13 |
+
from SkinBurns_Segmentation import segment_burn
|
| 14 |
import requests
|
| 15 |
import joblib
|
| 16 |
import numpy as np
|
| 17 |
+
from ECG.ECG_Classify import classify_ecg
|
| 18 |
+
from ECG.ECG_MultiClass import analyze_ecg_pdf
|
| 19 |
from ultralytics import YOLO
|
| 20 |
import tensorflow as tf
|
| 21 |
from fastapi import HTTPException
|
|
|
|
| 23 |
import base64
|
| 24 |
import cv2
|
| 25 |
import time
|
| 26 |
+
#from CPR.CPRAnalyzer import CPRAnalyzer
|
| 27 |
import tempfile
|
| 28 |
+
import matplotlib.pyplot as plt
|
| 29 |
+
import json
|
| 30 |
+
import asyncio
|
| 31 |
+
import concurrent.futures
|
| 32 |
+
from CPRRealTime.main import CPRAnalyzer
|
| 33 |
+
from threading import Thread
|
| 34 |
+
from starlette.responses import StreamingResponse
|
| 35 |
+
import threading
|
| 36 |
+
import queue
|
| 37 |
+
from CPRRealTime.analysis_socket_server import AnalysisSocketServer # adjust if needed
|
| 38 |
+
from CPRRealTime.logging_config import cpr_logger
|
| 39 |
+
import logging
|
| 40 |
+
import sys
|
| 41 |
|
| 42 |
|
| 43 |
app = FastAPI()
|
|
|
|
| 156 |
except Exception as e:
|
| 157 |
return JSONResponse(content={"error": str(e)}, status_code=500)
|
| 158 |
|
| 159 |
+
@app.post("/segment_burn")
|
| 160 |
+
async def segment_burn_endpoint(reference: UploadFile = File(...), patient: UploadFile = File(...)):
|
| 161 |
+
try:
|
| 162 |
+
# Save the reference image temporarily
|
| 163 |
+
reference_path = f"temp_ref_{reference.filename}"
|
| 164 |
+
reference_bytes = await reference.read()
|
| 165 |
+
with open(reference_path, "wb") as ref_file:
|
| 166 |
+
ref_file.write(reference_bytes)
|
| 167 |
+
|
| 168 |
+
# Save the patient image temporarily
|
| 169 |
+
patient_path = f"temp_patient_{patient.filename}"
|
| 170 |
+
patient_bytes = await patient.read()
|
| 171 |
+
with open(patient_path, "wb") as pat_file:
|
| 172 |
+
pat_file.write(patient_bytes)
|
| 173 |
+
|
| 174 |
+
# Call the segmentation logic
|
| 175 |
+
burn_crop_clean, burn_crop_debug = segment_burn(patient_path, reference_path)
|
| 176 |
+
|
| 177 |
+
# Save the cropped outputs
|
| 178 |
+
burn_crop_clean_path = f"temp_burn_crop_clean_{uuid.uuid4()}.png"
|
| 179 |
+
burn_crop_debug_path = f"temp_burn_crop_debug_{uuid.uuid4()}.png"
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
plt.imsave(burn_crop_clean_path, burn_crop_clean)
|
| 183 |
+
plt.imsave(burn_crop_debug_path, burn_crop_debug)
|
| 184 |
+
|
| 185 |
+
# Upload to Cloudinary
|
| 186 |
+
crop_clean_upload = cloudinary.uploader.upload(burn_crop_clean_path, public_id=f"ref_{reference.filename}")
|
| 187 |
+
crop_debug_upload = cloudinary.uploader.upload(burn_crop_debug_path, public_id=f"pat_{patient.filename}")
|
| 188 |
+
crop_clean_url = crop_clean_upload["secure_url"]
|
| 189 |
+
crop_debug_url = crop_debug_upload["secure_url"]
|
| 190 |
+
|
| 191 |
+
# Clean up temp files
|
| 192 |
+
|
| 193 |
+
os.remove(burn_crop_clean_path)
|
| 194 |
+
os.remove(burn_crop_debug_path)
|
| 195 |
|
| 196 |
|
| 197 |
+
return {
|
| 198 |
+
"crop_clean_url": crop_clean_url,
|
| 199 |
+
"crop_debug_url": crop_debug_url
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
except Exception as e:
|
| 203 |
+
return JSONResponse(content={"error": str(e)}, status_code=500)
|
| 204 |
+
|
| 205 |
# ✅ Optimize and transform image URL
|
| 206 |
@app.get("/cloudinary/transform")
|
| 207 |
def transform_image():
|
|
|
|
| 216 |
return {"error": str(e)}
|
| 217 |
|
| 218 |
@app.post("/classify-ecg")
|
| 219 |
+
async def classify_ecg_endpoint(file: UploadFile = File(...)):
|
| 220 |
model = joblib.load('voting_classifier.pkl')
|
| 221 |
+
# Load the model
|
|
|
|
|
|
|
| 222 |
|
| 223 |
try:
|
| 224 |
+
# Save the uploaded file temporarily
|
| 225 |
+
temp_file_path = f"temp_{file.filename}"
|
| 226 |
+
with open(temp_file_path, "wb") as temp_file:
|
| 227 |
+
temp_file.write(await file.read())
|
| 228 |
|
| 229 |
+
# Call the ECG classification function
|
| 230 |
+
result = classify_ecg(temp_file_path, model, debug=True, is_pdf=True)
|
|
|
|
|
|
|
| 231 |
|
| 232 |
+
# Remove the temporary file
|
| 233 |
+
os.remove(temp_file_path)
|
| 234 |
|
|
|
|
| 235 |
return {"result": result}
|
| 236 |
|
| 237 |
+
except Exception as e:
|
| 238 |
+
return JSONResponse(content={"error": str(e)}, status_code=500)
|
| 239 |
+
|
| 240 |
+
@app.post("/diagnose-ecg")
|
| 241 |
+
async def diagnose_ecg(file: UploadFile = File(...)):
|
| 242 |
+
try:
|
| 243 |
+
# Save the uploaded file temporarily
|
| 244 |
+
temp_file_path = f"temp_{file.filename}"
|
| 245 |
+
with open(temp_file_path, "wb") as temp_file:
|
| 246 |
+
temp_file.write(await file.read())
|
| 247 |
+
|
| 248 |
+
model_path = 'deep-multiclass.h5' # Update with actual path
|
| 249 |
+
mlb_path = 'deep-multiclass.pkl' # Update with actual path
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
# Call the ECG classification function
|
| 253 |
+
result = analyze_ecg_pdf(
|
| 254 |
+
temp_file_path,
|
| 255 |
+
model_path,
|
| 256 |
+
mlb_path,
|
| 257 |
+
cleanup=False, # Keep the digitized file
|
| 258 |
+
debug=False, # Print debug information
|
| 259 |
+
visualize=False # Visualize the digitized signal
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
# Remove the temporary file
|
| 264 |
+
os.remove(temp_file_path)
|
| 265 |
+
|
| 266 |
+
if result and result["diagnosis"]:
|
| 267 |
+
return {"result": result["diagnosis"]}
|
| 268 |
+
else:
|
| 269 |
+
return {"result": "No diagnosis"}
|
| 270 |
+
|
| 271 |
+
except Exception as e:
|
| 272 |
+
return JSONResponse(content={"error": str(e)}, status_code=500)
|
| 273 |
|
| 274 |
|
| 275 |
@app.post("/process_video")
|
|
|
|
| 285 |
with open(video_path, "wb") as buffer:
|
| 286 |
shutil.copyfileobj(file.file, buffer)
|
| 287 |
|
| 288 |
+
print(f"\n[API] CPR Analysis Started on {video_path}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 289 |
|
| 290 |
+
# Run analyzer
|
| 291 |
+
start_time = time.time()
|
| 292 |
+
analyzer = CPRAnalyzer(video_path)
|
| 293 |
+
wholevideoURL, graphURL, warnings,chunks = analyzer.run_analysis() # Expects tuple return
|
| 294 |
|
| 295 |
+
for w in warnings:
|
|
|
|
| 296 |
filename = w['image_url'] # just the filename
|
| 297 |
local_path = os.path.join("screenshots", filename)
|
| 298 |
upload_result = cloudinary.uploader.upload(local_path, folder="posture_warnings")
|
| 299 |
w['image_url'] = upload_result['secure_url']
|
| 300 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 301 |
|
| 302 |
+
print(f"[API] CPR Analysis Completed on {video_path}")
|
| 303 |
+
analysis_time = time.time() - start_time
|
| 304 |
+
print(f"[TIMING] Analysis time: {analysis_time:.2f}s")
|
|
|
|
|
|
|
|
|
|
| 305 |
|
| 306 |
+
if wholevideoURL is None:
|
| 307 |
+
raise HTTPException(status_code=500, detail="No chunk data was generated from the video.")
|
| 308 |
|
| 309 |
+
# Return chunks and error regions
|
| 310 |
+
return JSONResponse(content={
|
| 311 |
+
"videoURL": wholevideoURL,
|
| 312 |
+
"graphURL": graphURL,
|
| 313 |
+
"warnings": warnings,
|
| 314 |
+
"chunks": chunks,
|
| 315 |
+
|
| 316 |
+
})
|
| 317 |
|
| 318 |
|
| 319 |
@app.post("/process_image")
|
|
|
|
| 355 |
})
|
| 356 |
|
| 357 |
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
@app.websocket("/ws/process_image")
|
| 362 |
+
async def websocket_process_image(websocket: WebSocket):
|
| 363 |
await websocket.accept()
|
| 364 |
try:
|
| 365 |
while True:
|
| 366 |
+
# 1) Receive raw JPEG bytes from Flutter
|
| 367 |
+
data: bytes = await websocket.receive_bytes()
|
| 368 |
|
| 369 |
+
# 2) (Optional) Decode to BGR for any pre-display or debugging
|
| 370 |
+
np_arr = np.frombuffer(data, np.uint8)
|
| 371 |
frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
|
| 372 |
+
|
| 373 |
+
# 3) Write to temp file for YOLO ingestion
|
| 374 |
+
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tmp:
|
| 375 |
+
tmp.write(data)
|
| 376 |
+
tmp_path = tmp.name
|
| 377 |
+
|
| 378 |
+
# 4) Run inference without any built-in display
|
| 379 |
+
try:
|
| 380 |
+
results = model(source=tmp_path, show=False, save=False)
|
| 381 |
+
except Exception as e:
|
| 382 |
+
os.unlink(tmp_path)
|
| 383 |
+
await websocket.send_text(json.dumps({
|
| 384 |
+
"error": f"YOLO error: {e}"
|
| 385 |
+
}))
|
| 386 |
+
continue
|
| 387 |
+
|
| 388 |
+
# cleanup temp file
|
| 389 |
+
os.unlink(tmp_path)
|
| 390 |
+
|
| 391 |
+
# 5) Clear any stray windows
|
| 392 |
+
cv2.destroyAllWindows()
|
| 393 |
+
|
| 394 |
+
# 6) Get the single annotated BGR image
|
| 395 |
+
annotated: np.ndarray = results[0].plot()
|
| 396 |
+
|
| 397 |
+
# 7) Show exactly one window in color
|
| 398 |
+
cv2.imshow("Pose / Segmentation", annotated)
|
| 399 |
+
cv2.waitKey(1) # small delay to allow window refresh
|
| 400 |
+
|
| 401 |
+
# 8) Build and send JSON back to Flutter
|
| 402 |
+
if not results or len(results) == 0 or results[0].keypoints is None:
|
| 403 |
+
payload = {"message": "No keypoints detected"}
|
| 404 |
+
else:
|
| 405 |
+
keypoints = results[0].keypoints.xy.tolist()
|
| 406 |
+
confidences = (
|
| 407 |
+
results[0].boxes.conf.tolist()
|
| 408 |
+
if results[0].boxes is not None
|
| 409 |
+
else []
|
| 410 |
+
)
|
| 411 |
+
payload = {
|
| 412 |
+
"message": "Image processed successfully",
|
| 413 |
+
"KeypointsXY": keypoints,
|
| 414 |
+
"confidences": confidences,
|
| 415 |
+
}
|
| 416 |
+
|
| 417 |
+
await websocket.send_text(json.dumps(payload))
|
| 418 |
+
|
| 419 |
+
except WebSocketDisconnect:
|
| 420 |
+
print("Client disconnected")
|
| 421 |
+
finally:
|
| 422 |
+
# Ensure the window is closed on disconnect
|
| 423 |
+
cv2.destroyAllWindows()
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
@app.websocket("/ws/process_video")
|
| 429 |
+
async def websocket_process_video(websocket: WebSocket):
|
| 430 |
+
|
| 431 |
+
await websocket.accept()
|
| 432 |
+
|
| 433 |
+
frame_buffer = []
|
| 434 |
+
frame_limit = 50
|
| 435 |
+
frame_size = (640, 480) # Adjust if needed
|
| 436 |
+
fps = 30 # Adjust if needed
|
| 437 |
+
loop = asyncio.get_event_loop()
|
| 438 |
+
|
| 439 |
+
# Progress reporting during analysis
|
| 440 |
+
async def progress_callback(data):
|
| 441 |
+
await websocket.send_text(json.dumps(data))
|
| 442 |
+
|
| 443 |
+
def sync_callback(data):
|
| 444 |
+
asyncio.run_coroutine_threadsafe(progress_callback(data), loop)
|
| 445 |
+
|
| 446 |
+
def save_frames_to_video(frames, path):
|
| 447 |
+
out = cv2.VideoWriter(path, cv2.VideoWriter_fourcc(*'mp4v'), fps, frame_size)
|
| 448 |
+
for frame in frames:
|
| 449 |
+
resized = cv2.resize(frame, frame_size)
|
| 450 |
+
out.write(resized)
|
| 451 |
+
out.release()
|
| 452 |
+
|
| 453 |
+
def run_analysis_on_buffer(frames):
|
| 454 |
+
try:
|
| 455 |
+
tmp_path = "temp_video.mp4"
|
| 456 |
+
save_frames_to_video(frames, tmp_path)
|
| 457 |
+
|
| 458 |
+
# Notify: video saved
|
| 459 |
+
asyncio.run_coroutine_threadsafe(
|
| 460 |
+
websocket.send_text(json.dumps({
|
| 461 |
+
"status": "info",
|
| 462 |
+
"message": "Video saved. Starting CPR analysis..."
|
| 463 |
+
})),
|
| 464 |
+
loop
|
| 465 |
+
)
|
| 466 |
+
|
| 467 |
+
# Run analysis
|
| 468 |
+
analyzer = CPRAnalyzer(video_path=tmp_path)
|
| 469 |
+
analyzer.run_analysis(progress_callback=sync_callback)
|
| 470 |
+
|
| 471 |
+
except Exception as e:
|
| 472 |
+
asyncio.run_coroutine_threadsafe(
|
| 473 |
+
websocket.send_text(json.dumps({"error": str(e)})),
|
| 474 |
+
loop
|
| 475 |
+
)
|
| 476 |
+
|
| 477 |
+
try:
|
| 478 |
+
while True:
|
| 479 |
+
data: bytes = await websocket.receive_bytes()
|
| 480 |
+
np_arr = np.frombuffer(data, np.uint8)
|
| 481 |
+
frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
|
| 482 |
+
if frame is None:
|
| 483 |
+
continue
|
| 484 |
+
|
| 485 |
+
frame_buffer.append(frame)
|
| 486 |
+
print(f"Frame added to buffer: {len(frame_buffer)}")
|
| 487 |
+
|
| 488 |
+
if len(frame_buffer) == frame_limit:
|
| 489 |
+
# Notify Flutter that we're switching to processing
|
| 490 |
+
await websocket.send_text(json.dumps({
|
| 491 |
+
"status": "ready",
|
| 492 |
+
"message": "Prepare Right CPR: First 150 frames received. Starting processing."
|
| 493 |
+
}))
|
| 494 |
+
|
| 495 |
+
# Copy and clear buffer
|
| 496 |
+
buffer_copy = frame_buffer[:]
|
| 497 |
+
frame_buffer.clear()
|
| 498 |
+
|
| 499 |
+
# Launch background processing
|
| 500 |
+
executor = concurrent.futures.ThreadPoolExecutor()
|
| 501 |
+
loop.run_in_executor(executor, run_analysis_on_buffer, buffer_copy)
|
| 502 |
else:
|
| 503 |
+
# Tell Flutter to send the next frame
|
| 504 |
+
await websocket.send_text(json.dumps({
|
| 505 |
+
"status": "continue",
|
| 506 |
+
"message": f"Frame {len(frame_buffer)} received. Send next."
|
| 507 |
+
}))
|
| 508 |
+
|
| 509 |
+
except WebSocketDisconnect:
|
| 510 |
+
print("Client disconnected")
|
| 511 |
+
|
| 512 |
+
except Exception as e:
|
| 513 |
+
await websocket.send_text(json.dumps({"error": str(e)}))
|
| 514 |
+
|
| 515 |
+
finally:
|
| 516 |
+
cv2.destroyAllWindows()
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
logger = logging.getLogger("cpr_logger")
|
| 520 |
+
clients = set()
|
| 521 |
+
analyzer_thread = None
|
| 522 |
+
analysis_started = False
|
| 523 |
+
socket_server: AnalysisSocketServer = None # Global reference
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
async def forward_results_from_queue(websocket: WebSocket, warning_queue):
|
| 527 |
+
try:
|
| 528 |
+
while True:
|
| 529 |
+
warnings = await asyncio.to_thread(warning_queue.get)
|
| 530 |
+
serialized = json.dumps(warnings)
|
| 531 |
+
await websocket.send_text(serialized)
|
| 532 |
+
except asyncio.CancelledError:
|
| 533 |
+
logger.info("[WebSocket] Forwarding task cancelled")
|
| 534 |
+
except Exception as e:
|
| 535 |
+
logger.error(f"[WebSocket] Error forwarding data: {e}")
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
def run_cpr_analysis(source, requested_fps, output_path):
|
| 539 |
+
global socket_server
|
| 540 |
+
cpr_logger.info(f"[MAIN] CPR Analysis Started")
|
| 541 |
+
|
| 542 |
+
# Configuration
|
| 543 |
+
requested_fps = 30
|
| 544 |
+
input_video = source
|
| 545 |
+
|
| 546 |
+
# Create output directory if it doesn't exist
|
| 547 |
+
output_dir = r"D:\BackendGp\Deploy_El7a2ny_Application\CPRRealTime\outputs"
|
| 548 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 549 |
+
|
| 550 |
+
# Set output paths using original name
|
| 551 |
+
video_output_path = os.path.join(output_dir, f"output.mp4")
|
| 552 |
+
plot_output_path = os.path.join(output_dir, f"output.png")
|
| 553 |
+
|
| 554 |
+
# Log paths for verification
|
| 555 |
+
cpr_logger.info(f"[CONFIG] Input video: {input_video}")
|
| 556 |
+
cpr_logger.info(f"[CONFIG] Video output: {video_output_path}")
|
| 557 |
+
cpr_logger.info(f"[CONFIG] Plot output: {plot_output_path}")
|
| 558 |
+
|
| 559 |
+
# Initialize and run analyzer
|
| 560 |
+
initialization_start_time = time.time()
|
| 561 |
+
analyzer = CPRAnalyzer(input_video, video_output_path, plot_output_path, requested_fps)
|
| 562 |
+
socket_server = analyzer.socket_server # <- get reference to its queue
|
| 563 |
+
|
| 564 |
+
# Set plot output path in the analyzer
|
| 565 |
+
analyzer.plot_output_path = plot_output_path
|
| 566 |
+
|
| 567 |
+
initialization_end_time = time.time()
|
| 568 |
+
initialization_elapsed_time = initialization_end_time - initialization_start_time
|
| 569 |
+
cpr_logger.info(f"[TIMING] Initialization time: {initialization_elapsed_time:.2f}s")
|
| 570 |
+
|
| 571 |
+
try:
|
| 572 |
+
analyzer.run_analysis()
|
| 573 |
+
finally:
|
| 574 |
+
analyzer.socket_server.stop_server()
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
@app.websocket("/ws/test")
|
| 578 |
+
async def websocket_analysis(websocket: WebSocket):
|
| 579 |
+
global analyzer_thread, analysis_started, socket_server
|
| 580 |
+
|
| 581 |
+
await websocket.accept()
|
| 582 |
+
clients.add(websocket)
|
| 583 |
+
logger.info("[WebSocket] Flutter connected")
|
| 584 |
+
|
| 585 |
+
# Start the analyzer only once
|
| 586 |
+
if not analysis_started:
|
| 587 |
+
source = "http://192.168.137.33:8080/video" # Replace with your video source
|
| 588 |
+
requested_fps = 30
|
| 589 |
+
output_path = r"D:\CPR\End to End\Code Refactor\output\output.mp4"
|
| 590 |
+
|
| 591 |
+
analyzer_thread = threading.Thread(
|
| 592 |
+
target=run_cpr_analysis,
|
| 593 |
+
args=(source, requested_fps, output_path),
|
| 594 |
+
daemon=True
|
| 595 |
+
)
|
| 596 |
+
analyzer_thread.start()
|
| 597 |
+
analysis_started = True
|
| 598 |
+
|
| 599 |
+
logger.info("[WebSocket] Analysis thread started")
|
| 600 |
+
|
| 601 |
+
# Wait until socket server is initialized
|
| 602 |
+
while socket_server is None or not socket_server.warning_queue:
|
| 603 |
+
await asyncio.sleep(0.1)
|
| 604 |
+
|
| 605 |
+
# Start async task to stream warnings from queue to Flutter
|
| 606 |
+
forward_task = asyncio.create_task(forward_results_from_queue(websocket, socket_server.warning_queue))
|
| 607 |
+
|
| 608 |
+
try:
|
| 609 |
+
while True:
|
| 610 |
+
# Keep connection alive (optional)
|
| 611 |
+
await asyncio.sleep(1)
|
| 612 |
except WebSocketDisconnect:
|
| 613 |
+
logger.warning("[WebSocket] Client disconnected")
|
| 614 |
+
forward_task.cancel()
|
| 615 |
+
finally:
|
| 616 |
+
clients.discard(websocket)
|
| 617 |
+
|
| 618 |
+
|
test.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
from ultralytics import YOLO
|
| 3 |
+
|
| 4 |
+
# Load YOLOv8-pose model (adjust path as needed)
|
| 5 |
+
model = YOLO("yolo11n-pose.pt")
|
| 6 |
+
|
| 7 |
+
# Stream URL (MJPEG HTTP stream)
|
| 8 |
+
stream_url = "http://192.168.137.33:8080/video"
|
| 9 |
+
|
| 10 |
+
# Open video capture from stream
|
| 11 |
+
cap = cv2.VideoCapture(stream_url)
|
| 12 |
+
|
| 13 |
+
if not cap.isOpened():
|
| 14 |
+
print("Error: Cannot open video stream")
|
| 15 |
+
exit()
|
| 16 |
+
|
| 17 |
+
# Get properties (fallback to defaults if zero)
|
| 18 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 19 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 20 |
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 21 |
+
|
| 22 |
+
if width == 0 or height == 0:
|
| 23 |
+
# Typical fallback for MJPEG streams that don't report width/height properly
|
| 24 |
+
width, height = 640, 480
|
| 25 |
+
if fps == 0:
|
| 26 |
+
fps = 20 # Set a reasonable default FPS for streams
|
| 27 |
+
|
| 28 |
+
print(f"Stream opened: {width}x{height} at {fps} FPS")
|
| 29 |
+
|
| 30 |
+
# Since we rotate 90 degrees clockwise, swap width and height for output video
|
| 31 |
+
out = cv2.VideoWriter("output_pose.mp4", cv2.VideoWriter_fourcc(*"mp4v"), fps, (height, width))
|
| 32 |
+
|
| 33 |
+
while True:
|
| 34 |
+
ret, frame = cap.read()
|
| 35 |
+
if not ret:
|
| 36 |
+
print("Failed to grab frame")
|
| 37 |
+
break
|
| 38 |
+
|
| 39 |
+
# Rotate frame 90 degrees clockwise
|
| 40 |
+
rotated_frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
|
| 41 |
+
|
| 42 |
+
# Run YOLO pose estimation on rotated frame
|
| 43 |
+
results = model(rotated_frame, verbose=False, conf=0.3, iou=0.1)
|
| 44 |
+
|
| 45 |
+
# Draw pose annotations
|
| 46 |
+
annotated_frame = results[0].plot()
|
| 47 |
+
|
| 48 |
+
# Write annotated frame to output video
|
| 49 |
+
out.write(annotated_frame)
|
| 50 |
+
|
| 51 |
+
# Show annotated frame
|
| 52 |
+
cv2.imshow("Pose Detection (rotated)", annotated_frame)
|
| 53 |
+
|
| 54 |
+
# Press 'q' to quit
|
| 55 |
+
if cv2.waitKey(1) & 0xFF == ord("q"):
|
| 56 |
+
break
|
| 57 |
+
|
| 58 |
+
# Cleanup
|
| 59 |
+
cap.release()
|
| 60 |
+
out.release()
|
| 61 |
+
cv2.destroyAllWindows()
|