Hussein El-Hadidy commited on
Commit
cca6a52
·
1 Parent(s): b9388ea

Added CPR Refactored

Browse files
CPR/CPRAnalyzer.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # main.py
2
+ import cv2
3
+ import time
4
+ import tkinter as tk # For screen size detection
5
+ from datetime import datetime
6
+
7
+ from pose_estimation import PoseEstimator
8
+ from role_classifier import RoleClassifier
9
+ from chest_initializer import ChestInitializer
10
+ from metrics_calculator import MetricsCalculator
11
+ from posture_analyzer import PostureAnalyzer
12
+
13
+ class CPRAnalyzer:
14
+ """Main CPR analysis pipeline with execution tracing"""
15
+
16
+ def __init__(self, video_path):
17
+ print(f"\n[INIT] Initializing CPR Analyzer for: {video_path}")
18
+ self.cap = cv2.VideoCapture(video_path)
19
+ if not self.cap.isOpened():
20
+ print("[ERROR] Failed to open video file")
21
+ return
22
+
23
+ self.frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
24
+ print(f"[INFO] Video properties: {self.frame_count} frames, "
25
+ f"{self.cap.get(cv2.CAP_PROP_FPS):.1f} FPS")
26
+
27
+ # Get screen dimensions
28
+ root = tk.Tk()
29
+ self.screen_width = root.winfo_screenwidth()
30
+ self.screen_height = root.winfo_screenheight()
31
+ root.destroy()
32
+
33
+ print(f"[DISPLAY] Detected screen resolution: {self.screen_width}x{self.screen_height}")
34
+
35
+ # Initialize components
36
+ self.pose_estimator = PoseEstimator()
37
+ self.role_classifier = RoleClassifier()
38
+ self.chest_initializer = ChestInitializer()
39
+ self.metrics_calculator = MetricsCalculator(shoulder_width_cm=45)
40
+ self.posture_analyzer = PostureAnalyzer()
41
+
42
+ # Window configuration
43
+ self.window_name = "CPR Analysis"
44
+ cv2.namedWindow(self.window_name, cv2.WINDOW_NORMAL)
45
+ print("[INIT] System components initialized successfully")
46
+
47
+ def run_analysis(self):
48
+ """Main processing loop with execution tracing"""
49
+
50
+ print("\n[PHASE] Starting initialization phase")
51
+ start_time = time.time()
52
+
53
+ if not self._initialize_system():
54
+ return
55
+
56
+ # Get the first frame to determine orientation
57
+ ret, frame = self.cap.read()
58
+ if ret:
59
+ self._handle_chest_point_rotation(frame)
60
+ self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0) # Reset to beginning
61
+
62
+ print(f"[INIT] Completed in {time.time()-start_time:.2f}s\n")
63
+ print("[PHASE] Starting main processing loop")
64
+
65
+ frame_counter = 0
66
+ while self.cap.isOpened():
67
+ ret, frame = self.cap.read()
68
+ if not ret:
69
+ print("\n[INFO] End of video stream reached")
70
+ break
71
+
72
+ frame = self._handle_frame_rotation(frame)
73
+ print(f"\n[FRAME {int(self.cap.get(cv2.CAP_PROP_POS_FRAMES))}/{self.frame_count}] Processing")
74
+
75
+ frame = self._process_frame(frame)
76
+
77
+ self._display_frame(frame)
78
+
79
+ if cv2.waitKey(1) == ord('q'):
80
+ print("\n[USER] Analysis interrupted by user")
81
+ break
82
+
83
+ self._finalize_analysis()
84
+
85
+ def _initialize_system(self):
86
+ """Initialize system components with status tracking"""
87
+ print("[INIT] Starting chest point initialization")
88
+ init_start = time.time()
89
+
90
+ if not self.chest_initializer.initialize(self.cap, self.pose_estimator, self.role_classifier):
91
+ print("[ERROR] Chest initialization failed - check video content")
92
+ return False
93
+
94
+ self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
95
+ print(f"[INIT] Chest point initialized at {self.chest_initializer.chest_point}")
96
+ print(f"[INIT] Sampled {len(self.chest_initializer.shoulder_samples)} valid shoulder positions")
97
+ print(f"[INIT] Initialization completed in {time.time()-init_start:.2f}s")
98
+ return True
99
+
100
+ def _handle_chest_point_rotation(self, frame):
101
+ """Handle chest point rotation once during initialization"""
102
+ if frame.shape[1] > frame.shape[0]: # Landscape orientation
103
+ print(f"[ROTATE] Detected landscape video")
104
+
105
+ if self.chest_initializer.chest_point:
106
+ print(f"[ROTATE] Adjusting chest point coordinates")
107
+ cx, cy = self.chest_initializer.chest_point
108
+ # For 90° clockwise rotation:
109
+ # New x = original height - original y
110
+ # New y = original x
111
+ new_cx = frame.shape[0] - cy
112
+ new_cy = cx
113
+ self.chest_initializer.chest_point = (int(new_cx), int(new_cy))
114
+ print(f"[ROTATE] New chest point: {self.chest_initializer.chest_point}")
115
+
116
+ def _handle_frame_rotation(self, frame):
117
+ """Handle frame rotation without adjusting chest point"""
118
+ if frame.shape[1] > frame.shape[0]: # Width > Height
119
+ frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
120
+ return frame
121
+
122
+ def _process_frame(self, frame):
123
+ """Process frame with execution tracing"""
124
+ processing_start = time.time()
125
+
126
+ # Pose estimation
127
+ pose_results = self.pose_estimator.detect_poses(frame)
128
+ if not pose_results:
129
+ print("[PROCESS] No poses detected in frame")
130
+ return frame
131
+
132
+ print(f"[POSE] Detected {len(pose_results.boxes)} people")
133
+ frame = self.pose_estimator.draw_keypoints(frame, pose_results)
134
+ frame = self._analyze_rescuer(frame, pose_results)
135
+
136
+ # Draw chest point
137
+ frame = self.chest_initializer.draw_chest_marker(frame)
138
+ print(f"[PROCESS] Frame processed in {(time.time()-processing_start)*1000:.1f}ms")
139
+ return frame
140
+
141
+ def _analyze_rescuer(self, frame, pose_results):
142
+ """Analyze rescuer with detailed logging"""
143
+ rescuer_id = self.role_classifier.find_rescuer(pose_results, frame.shape[:2])
144
+ if rescuer_id is None:
145
+ print("[RESCUER] No rescuer identified in frame")
146
+ return frame
147
+
148
+ print(f"[RESCUER] Identified at index {rescuer_id}")
149
+ keypoints = self.pose_estimator.get_keypoints(pose_results, rescuer_id)
150
+
151
+ # Draw rescuer bounding box
152
+ frame = self._draw_rescuer_box(frame, pose_results, rescuer_id)
153
+
154
+ # Posture analysis
155
+ warnings = self.posture_analyzer.validate_posture(keypoints, self.chest_initializer.chest_point)
156
+ frame = self.posture_analyzer.display_warnings(frame, warnings)
157
+
158
+ if warnings:
159
+ print(f"[WARNING] Posture issues: {', '.join(warnings)}")
160
+ else:
161
+ # Track midpoints
162
+ frame = self.role_classifier.track_rescuer_midpoints(keypoints, frame)
163
+ print(f"[TRACKING] Midpoint added at {self.role_classifier.midpoints[-1]}")
164
+ self.role_classifier.update_shoulder_distance()
165
+
166
+ return frame
167
+
168
+ def _draw_rescuer_box(self, frame, results, rescuer_id):
169
+ """Draw rescuer box with dimension logging"""
170
+ boxes = results.boxes.xyxy.cpu().numpy()
171
+ if rescuer_id < len(boxes):
172
+ x1, y1, x2, y2 = boxes[rescuer_id].astype(int)
173
+ w, h = x2-x1, y2-y1
174
+ print(f"[BOX] Rescuer bounding box: {w}x{h} pixels")
175
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 3)
176
+ cv2.putText(frame, "RESCUER", (x1, y1-10),
177
+ cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
178
+ return frame
179
+
180
+ def _display_frame(self, frame):
181
+ """Adaptive window sizing with aspect ratio preservation"""
182
+ display_start = time.time()
183
+
184
+ # Get original frame dimensions
185
+ h, w = frame.shape[:2]
186
+ if w == 0 or h == 0:
187
+ return
188
+
189
+ # Calculate maximum possible scale while maintaining aspect ratio
190
+ scale_w = self.screen_width / w
191
+ scale_h = self.screen_height / h
192
+ scale = min(scale_w, scale_h) * 0.9 # 90% of max to leave some margin
193
+
194
+ # Calculate new dimensions
195
+ new_w = int(w * scale)
196
+ new_h = int(h * scale)
197
+
198
+ # Resize and display
199
+ resized = cv2.resize(frame, (new_w, new_h), interpolation=cv2.INTER_AREA)
200
+
201
+ # Center window
202
+ pos_x = (self.screen_width - new_w) // 2
203
+ pos_y = (self.screen_height - new_h) // 2
204
+ cv2.moveWindow(self.window_name, pos_x, pos_y)
205
+
206
+ cv2.imshow(self.window_name, resized)
207
+ print(f"[DISPLAY] Resized to {new_w}x{new_h} (scale: {scale:.2f}) in {(time.time()-display_start)*1000:.1f}ms")
208
+
209
+ def _finalize_analysis(self):
210
+ """Final analysis with detailed reporting"""
211
+ print("\n[PHASE] Starting final analysis")
212
+ start_time = time.time()
213
+
214
+ try:
215
+ print("[METRICS] Smoothing midpoint data...")
216
+ self.metrics_calculator.smooth_midpoints(self.role_classifier.midpoints)
217
+
218
+ print("[METRICS] Detecting compression peaks...")
219
+ self.metrics_calculator.detect_peaks()
220
+
221
+ print("[METRICS] Calculating depth and rate...")
222
+ depth, rate = self.metrics_calculator.calculate_metrics(
223
+ self.role_classifier.shoulder_distances,
224
+ self.cap.get(cv2.CAP_PROP_FPS))
225
+
226
+ print(f"[RESULTS] Compression Depth: {depth:.1f} cm")
227
+ print(f"[RESULTS] Compression Rate: {rate:.1f} cpm")
228
+
229
+ print("[VISUAL] Generating motion curve plot...")
230
+ self.metrics_calculator.plot_motion_curve()
231
+
232
+ except Exception as e:
233
+ print(f"[ERROR] Metric calculation failed: {str(e)}")
234
+
235
+ finally:
236
+ self.cap.release()
237
+ cv2.destroyAllWindows()
238
+ print(f"\n[ANALYSIS] Completed in {time.time()-start_time:.1f}s")
239
+ print("[CLEANUP] Resources released")
240
+
241
+
242
+
CPR/chest_initializer.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # chest_initializer.py
2
+ import cv2
3
+ import numpy as np
4
+ from keypoints import CocoKeypoints
5
+
6
+ class ChestInitializer:
7
+ """Handles chest point detection with separated debug visualization"""
8
+
9
+ def __init__(self, num_init_frames=15, min_confidence=0.2, aspect_ratio_thresh=1.2):
10
+ self.num_init_frames = num_init_frames
11
+ self.min_confidence = min_confidence
12
+ self.aspect_ratio_thresh = aspect_ratio_thresh
13
+ self.chest_point = None
14
+ self.shoulder_samples = []
15
+ self.hip_samples = []
16
+ self.debug_window = "Chest Detection Debug"
17
+
18
+ def initialize(self, cap, pose_estimator, role_classifier):
19
+ """Main initialization routine"""
20
+ success = self._sample_initial_frames(cap, pose_estimator)
21
+ self._calculate_chest_point(cap)
22
+ cv2.destroyWindow(self.debug_window)
23
+ return success
24
+
25
+ def _sample_initial_frames(self, cap, pose_estimator):
26
+ """Collect valid shoulder positions"""
27
+ cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
28
+ valid_samples = 0
29
+
30
+ for i in range(self.num_init_frames):
31
+ ret, frame = cap.read()
32
+ if not ret:
33
+ break
34
+
35
+ results = pose_estimator.detect_poses(frame) #!could remove this to prevent double detection
36
+ if not results:
37
+ continue
38
+
39
+ debug_frame = results.plot()
40
+ shoulders = self._detect_valid_shoulders(results, frame.shape)
41
+
42
+ if shoulders:
43
+ left_shoulder, right_shoulder, left_hip, right_hip = shoulders
44
+ self.shoulder_samples.append((left_shoulder, right_shoulder))
45
+ self.hip_samples.append((left_hip, right_hip))
46
+ valid_samples += 1
47
+ debug_frame = self._draw_debug_info(
48
+ debug_frame,
49
+ left_shoulder,
50
+ right_shoulder,
51
+ frame.shape,
52
+ i+1
53
+ )
54
+
55
+ cv2.imshow(self.debug_window, debug_frame)
56
+ if cv2.waitKey(400) == ord('q'):
57
+ break
58
+
59
+ return valid_samples > 0
60
+
61
+ def _draw_debug_info(self, frame, left_shoulder, right_shoulder, frame_shape, frame_num):
62
+ """Helper function for drawing debug information"""
63
+ # Convert normalized coordinates to pixel values
64
+ lx, ly = (left_shoulder * np.array([frame_shape[1], frame_shape[0]])).astype(int)
65
+ rx, ry = (right_shoulder * np.array([frame_shape[1], frame_shape[0]])).astype(int)
66
+
67
+ # Draw shoulder points
68
+ cv2.circle(frame, (lx, ly), 5, (0, 0, 255), -1)
69
+ cv2.circle(frame, (rx, ry), 5, (0, 0, 255), -1)
70
+
71
+ # Add informational text
72
+ cv2.putText(frame, f"CHEST pts - Frame {frame_num}", (lx, ly - 10),
73
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
74
+
75
+ return frame
76
+
77
+ def _detect_valid_shoulders(self, results, frame_shape):
78
+ """Validate detected shoulders using aspect ratio and confidence"""
79
+ boxes = results.boxes.xywh.cpu().numpy()
80
+ keypoints = results.keypoints.xyn.cpu().numpy()
81
+ confs = results.keypoints.conf.cpu().numpy()
82
+
83
+ for i, (box, kp) in enumerate(zip(boxes, keypoints)):
84
+ x, y, w, h = box
85
+ # Aspect ratio validation
86
+ if h < w * self.aspect_ratio_thresh:
87
+ continue
88
+
89
+ # Confidence check
90
+ if (confs[i][CocoKeypoints.LEFT_SHOULDER.value] < self.min_confidence or
91
+ confs[i][CocoKeypoints.RIGHT_SHOULDER.value] < self.min_confidence):
92
+ continue
93
+
94
+ return (kp[CocoKeypoints.LEFT_SHOULDER.value],
95
+ kp[CocoKeypoints.RIGHT_SHOULDER.value],
96
+ kp[CocoKeypoints.LEFT_HIP.value],
97
+ kp[CocoKeypoints.RIGHT_HIP.value])
98
+
99
+ return None
100
+
101
+ def _calculate_chest_point(self, cap):
102
+ """Calculate final chest point from valid samples"""
103
+ if not self.shoulder_samples:
104
+ return
105
+
106
+ avg_left = np.median([s[0] for s in self.shoulder_samples], axis=0)
107
+ avg_right = np.median([s[1] for s in self.shoulder_samples], axis=0)
108
+ avg_left_hip = np.median([h[0] for h in self.hip_samples], axis=0)
109
+ avg_right_hip = np.median([h[1] for h in self.hip_samples], axis=0)
110
+
111
+ frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
112
+ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
113
+
114
+ avg_left_px = avg_left * np.array([frame_width, frame_height])
115
+ avg_right_px = avg_right * np.array([frame_width, frame_height])
116
+ avg_left_hip_px = avg_left_hip * np.array([frame_width, frame_height])
117
+ avg_right_hip_px = avg_right_hip * np.array([frame_width, frame_height])
118
+
119
+ #midpoint = (avg_left_px + avg_right_px) / 2
120
+ #shoulder_dist = np.linalg.norm(avg_left_px - avg_right_px)
121
+ #downward_offset = 0.4 * shoulder_dist
122
+ #self.chest_point = (int(midpoint[0]), int(midpoint[1] + downward_offset))
123
+
124
+ if avg_left_px[1] < avg_right_px[1]:
125
+ shoulder = np.array(avg_left_px)
126
+ hip = np.array(avg_left_hip_px)
127
+ else:
128
+ shoulder = np.array(avg_right_px)
129
+ hip = np.array(avg_right_hip_px)
130
+
131
+ alpha = 0.412 # Relative chest position between shoulder and hip
132
+ offset = 10 # move 10 pixels upward into the body
133
+ self.chest_point = (
134
+ int(shoulder[0] + alpha * (hip[0] - shoulder[0])),
135
+ int(shoulder[1] + alpha * (hip[1] - shoulder[1])) - offset
136
+ )
137
+
138
+ # Visualize the chest point in the debug window for 2 seconds
139
+ cap.set(cv2.CAP_PROP_POS_FRAMES, 0) # Reset to the first frame
140
+ ret, frame = cap.read()
141
+ if ret:
142
+ frame_with_marker = self.draw_chest_marker(frame)
143
+ cv2.imshow(self.debug_window, frame_with_marker)
144
+ cv2.waitKey(2000) # Wait for 2 seconds
145
+
146
+ def draw_chest_marker(self, frame):
147
+ """Draw chest point with visualization"""
148
+ print(f"Chest point: {self.chest_point}")
149
+ if self.chest_point:
150
+ cv2.circle(frame, self.chest_point, 8, (0, 55, 120), -1)
151
+ cv2.putText(frame, "Chest", (self.chest_point[0] + 5, self.chest_point[1] - 10),
152
+ cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
153
+ return frame
CPR/keypoints.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # keypoints.py
2
+ from enum import Enum
3
+
4
+ class CocoKeypoints(Enum):
5
+ """Enum for COCO keypoints (17 points)"""
6
+ NOSE = 0
7
+ LEFT_EYE = 1
8
+ RIGHT_EYE = 2
9
+ LEFT_EAR = 3
10
+ RIGHT_EAR = 4
11
+ LEFT_SHOULDER = 5
12
+ RIGHT_SHOULDER = 6
13
+ LEFT_ELBOW = 7
14
+ RIGHT_ELBOW = 8
15
+ LEFT_WRIST = 9
16
+ RIGHT_WRIST = 10
17
+ LEFT_HIP = 11
18
+ RIGHT_HIP = 12
19
+ LEFT_KNEE = 13
20
+ RIGHT_KNEE = 14
21
+ LEFT_ANKLE = 15
22
+ RIGHT_ANKLE = 16
CPR/metrics_calculator.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # metrics_calculator.py
2
+ import numpy as np
3
+ from scipy.signal import savgol_filter, find_peaks
4
+ import matplotlib.pyplot as plt
5
+
6
+ class MetricsCalculator:
7
+ """Rate and depth calculation from motion data with improved peak detection"""
8
+
9
+ def __init__(self, shoulder_width_cm=45):
10
+ self.shoulder_width_cm = shoulder_width_cm
11
+ self.peaks = np.array([])
12
+ self.peaks_max = np.array([])
13
+ self.peaks_min = np.array([])
14
+ self.y_smoothed = np.array([])
15
+ self.cm_px_ratio = None
16
+ self.midpoints_list = np.array([])
17
+ self.shoulder_distances = []
18
+
19
+ def smooth_midpoints(self, midpoints):
20
+ """Apply Savitzky-Golay filter to smooth motion data"""
21
+ self.midpoints_list = np.array(midpoints)
22
+
23
+ if len(self.midpoints_list) > 5: # Ensure enough data points
24
+ try:
25
+ self.y_smoothed = savgol_filter(
26
+ self.midpoints_list[:, 1],
27
+ window_length=10,
28
+ polyorder=2,
29
+ mode='nearest'
30
+ )
31
+ return True
32
+ except Exception as e:
33
+ print(f"Smoothing error: {e}")
34
+ self.y_smoothed = self.midpoints_list[:, 1] # Fallback to original
35
+ return False
36
+ else:
37
+ self.y_smoothed = self.midpoints_list[:, 1] # Not enough points
38
+ return False
39
+
40
+ def detect_peaks(self):
41
+ """Improved peak detection with separate max/min peaks"""
42
+ if self.y_smoothed.size == 0:
43
+ print("No smoothed values found for peak detection")
44
+ return False
45
+
46
+ try:
47
+ self.peaks_max, _ = find_peaks(self.y_smoothed, distance=10)
48
+ self.peaks_min, _ = find_peaks(-self.y_smoothed, distance=10)
49
+ self.peaks = np.sort(np.concatenate((self.peaks_max, self.peaks_min)))
50
+ return len(self.peaks) > 0
51
+ except Exception as e:
52
+ print(f"Peak detection error: {e}")
53
+ return False
54
+
55
+ def calculate_metrics(self, shoulder_distances, fps):
56
+ """Calculate compression metrics with improved calculations"""
57
+ self.shoulder_distances = shoulder_distances
58
+
59
+ try:
60
+ # Calculate pixel to cm ratio
61
+ if len(self.shoulder_distances) > 0:
62
+ avg_dist = np.mean(self.shoulder_distances)
63
+ self.cm_px_ratio = self.shoulder_width_cm / avg_dist
64
+ else:
65
+ print("No shoulder distances available")
66
+ return None, None
67
+
68
+ # Depth calculation using all peaks
69
+ depth = None
70
+ if len(self.peaks) > 1:
71
+ depth = np.mean(np.abs(np.diff(self.y_smoothed[self.peaks]))) * self.cm_px_ratio
72
+
73
+ # Rate calculation using only compression peaks (peaks_max)
74
+ rate = None
75
+ if len(self.peaks_max) > 1:
76
+ rate = 1 / (np.mean(np.diff(self.peaks_max)) / fps) * 60 # Convert to CPM
77
+
78
+ return depth, rate
79
+
80
+ except Exception as e:
81
+ print(f"Metric calculation error: {e}")
82
+ return None, None
83
+
84
+ def plot_motion_curve(self):
85
+ """Enhanced visualization with original and smoothed data"""
86
+ if self.midpoints_list.size == 0:
87
+ print("No midpoint data to plot")
88
+ return
89
+
90
+ plt.figure(figsize=(12, 6))
91
+
92
+ # Plot original and smoothed data
93
+ plt.plot(self.midpoints_list[:, 1],
94
+ label="Original Motion",
95
+ color="red",
96
+ linestyle="dashed",
97
+ alpha=0.6)
98
+
99
+ plt.plot(self.y_smoothed,
100
+ label="Smoothed Motion",
101
+ color="blue",
102
+ linewidth=2)
103
+
104
+ # Plot peaks if detected
105
+ if self.peaks.size > 0:
106
+ plt.plot(self.peaks,
107
+ self.y_smoothed[self.peaks],
108
+ "x",
109
+ color="green",
110
+ markersize=10,
111
+ label="Peaks")
112
+ else:
113
+ print("No peaks to plot")
114
+
115
+ plt.xlabel("Frame Number")
116
+ plt.ylabel("Vertical Position (px)")
117
+ plt.title("Compression Motion Analysis")
118
+ plt.grid(True)
119
+ plt.legend()
120
+ plt.show()
CPR/pose_estimation.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pose_estimation.py
2
+ import cv2
3
+ import numpy as np
4
+ from ultralytics import YOLO
5
+ from keypoints import CocoKeypoints
6
+
7
+ class PoseEstimator:
8
+ """Human pose estimation using YOLO"""
9
+
10
+ def __init__(self, model_path="yolo11n-pose.pt", min_confidence=0.2):
11
+ self.model = YOLO(model_path)
12
+ self.min_confidence = min_confidence
13
+
14
+ def detect_poses(self, frame):
15
+ """Detect human poses in a frame"""
16
+ try:
17
+ results = self.model(frame, verbose=False)
18
+ if not results or len(results[0].keypoints.xy) == 0:
19
+ return None
20
+ return results[0]
21
+ except Exception as e:
22
+ print(f"Pose detection error: {e}")
23
+ return None
24
+
25
+ def get_keypoints(self, results, person_idx=0):
26
+ """Extract keypoints for a detected person"""
27
+ try:
28
+ if not results or len(results.keypoints.xy) <= person_idx:
29
+ return None
30
+ return results.keypoints.xy[person_idx].cpu().numpy()
31
+ except Exception as e:
32
+ print(f"Keypoint extraction error: {e}")
33
+ return None
34
+
35
+ def draw_keypoints(self, frame, results):
36
+ """Draw detected keypoints on frame"""
37
+ try:
38
+ return results.plot()
39
+ except Exception as e:
40
+ print(f"Keypoint drawing error: {e}")
41
+ return frame
CPR/posture_analyzer.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # posture_analyzer.py
2
+ import math
3
+ import cv2
4
+ import numpy as np
5
+ from keypoints import CocoKeypoints
6
+
7
+ class PostureAnalyzer:
8
+ """Posture analysis and visualization with comprehensive validation"""
9
+
10
+ def __init__(self, right_arm_angle_threshold=210, left_arm_angle_threshold=150, wrist_distance_threshold=170):
11
+ self.right_arm_angles = []
12
+ self.left_arm_angles = []
13
+ self.wrist_distances = []
14
+ self.right_arm_angle_threshold = right_arm_angle_threshold
15
+ self.left_arm_angle_threshold = left_arm_angle_threshold
16
+ self.wrist_distance_threshold = wrist_distance_threshold
17
+ self.warning_positions = {
18
+ 'arm_angles': (50, 50),
19
+ 'one_handed': (50, 100)
20
+ }
21
+
22
+ def calculate_angle(self, a, b, c):
23
+ """Calculate angle between three points"""
24
+ try:
25
+ ang = math.degrees(math.atan2(c[1]-b[1], c[0]-b[0]) -
26
+ math.atan2(a[1]-b[1], a[0]-b[0]))
27
+ return ang + 360 if ang < 0 else ang
28
+ except Exception as e:
29
+ print(f"Angle calculation error: {e}")
30
+ return 0
31
+
32
+ def check_bended_arms(self, keypoints):
33
+ """Check for proper arm positioning (returns warnings)"""
34
+ warnings = []
35
+ try:
36
+ # Right arm analysis
37
+ shoulder = keypoints[CocoKeypoints.RIGHT_SHOULDER.value]
38
+ elbow = keypoints[CocoKeypoints.RIGHT_ELBOW.value]
39
+ wrist = keypoints[CocoKeypoints.RIGHT_WRIST.value]
40
+ right_angle = self.calculate_angle(wrist, elbow, shoulder)
41
+ self.right_arm_angles.append(right_angle)
42
+
43
+ # Left arm analysis
44
+ shoulder = keypoints[CocoKeypoints.LEFT_SHOULDER.value]
45
+ elbow = keypoints[CocoKeypoints.LEFT_ELBOW.value]
46
+ wrist = keypoints[CocoKeypoints.LEFT_WRIST.value]
47
+ left_angle = self.calculate_angle(wrist, elbow, shoulder)
48
+ self.left_arm_angles.append(left_angle)
49
+
50
+ # Analyze angles with moving average
51
+ avg_right = np.mean(self.right_arm_angles[-10:] if self.right_arm_angles else 0)
52
+ avg_left = np.mean(self.left_arm_angles[-10:] if self.left_arm_angles else 0)
53
+
54
+ if avg_right > self.right_arm_angle_threshold:
55
+ warnings.append("Right arm bent")
56
+ if avg_left < self.left_arm_angle_threshold:
57
+ warnings.append("Left arm bent")
58
+
59
+ except Exception as e:
60
+ print(f"Arm angle check error: {e}")
61
+
62
+ return warnings
63
+
64
+ def check_missing_arms(self, keypoints):
65
+ """Check for one-handed CPR pattern (returns warning)"""
66
+ try:
67
+ left_wrist = keypoints[CocoKeypoints.LEFT_WRIST.value]
68
+ right_wrist = keypoints[CocoKeypoints.RIGHT_WRIST.value]
69
+
70
+ wrist_distance = np.linalg.norm(left_wrist - right_wrist)
71
+ self.wrist_distances.append(wrist_distance)
72
+
73
+ avg_distance = np.mean(self.wrist_distances[-10:] if self.wrist_distances else 0)
74
+
75
+ if avg_distance > self.wrist_distance_threshold:
76
+ return ["One-handed CPR detected!"]
77
+ except Exception as e:
78
+ print(f"One-handed check error: {e}")
79
+
80
+ return []
81
+
82
+ def check_correct_hand_position(self, keypoints, chest_point):
83
+ """Check if hands are in correct position (returns warning)"""
84
+ try:
85
+ left_wrist = keypoints[CocoKeypoints.LEFT_WRIST.value]
86
+ right_wrist = keypoints[CocoKeypoints.RIGHT_WRIST.value]
87
+
88
+ # Calculate distance from chest to wrists
89
+ left_distance = np.linalg.norm(left_wrist - chest_point)
90
+ right_distance = np.linalg.norm(right_wrist - chest_point)
91
+
92
+ print(f"Left wrist distance: {left_distance}, Right wrist distance: {right_distance}")
93
+
94
+ if left_distance > 100 and right_distance > 100:
95
+ return ["Hands not in correct position"]
96
+ except Exception as e:
97
+ print(f"Hand position check error: {e}")
98
+
99
+ return []
100
+
101
+ def validate_posture(self, keypoints, chest_point):
102
+ """Run all posture validations (returns aggregated warnings)"""
103
+ warnings = []
104
+ warnings += self.check_bended_arms(keypoints)
105
+ warnings += self.check_missing_arms(keypoints)
106
+ warnings += self.check_correct_hand_position(keypoints, chest_point)
107
+ return warnings
108
+
109
+ def display_warnings(self, frame, warnings):
110
+
111
+ """Display all posture warnings at the top of the frame"""
112
+
113
+ try:
114
+ if warnings:
115
+ # Join all warnings with commas
116
+ warning_text = ", ".join(warnings)
117
+ # Display the combined warning text at the top
118
+ cv2.putText(frame, warning_text,
119
+ (10, 30), # Top-left corner
120
+ cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
121
+ except Exception as e:
122
+ print(f"Warning display error: {e}")
123
+
124
+ return frame
CPR/quick_test.ipynb ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "c819609e",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "# Example notebook usage\n",
11
+ "from pose_estimation import PoseEstimator\n",
12
+ "from role_classifier import RoleClassifier\n",
13
+ "from metrics_calculator import MetricsCalculator\n",
14
+ "\n",
15
+ "# Test pose estimation\n",
16
+ "estimator = PoseEstimator()\n",
17
+ "frame = cv2.imread(\"test_frame.jpg\")\n",
18
+ "results = estimator.detect_poses(frame)\n",
19
+ "annotated = estimator.draw_keypoints(frame, results)\n",
20
+ "cv2.imshow(\"Pose Test\", annotated)\n",
21
+ "\n",
22
+ "# Test metrics calculation\n",
23
+ "calculator = MetricsCalculator()\n",
24
+ "calculator.smooth_midpoints(midpoints_data)\n",
25
+ "calculator.detect_peaks()\n",
26
+ "calculator.plot_motion_curve()"
27
+ ]
28
+ }
29
+ ],
30
+ "metadata": {
31
+ "language_info": {
32
+ "name": "python"
33
+ }
34
+ },
35
+ "nbformat": 4,
36
+ "nbformat_minor": 5
37
+ }
CPR/role_classifier.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # role_classifier.py
2
+ import cv2
3
+ import numpy as np
4
+ from keypoints import CocoKeypoints
5
+
6
+ class RoleClassifier:
7
+ """Role classification and tracking using image processing"""
8
+
9
+ def __init__(self, proximity_thresh=0.3):
10
+ self.proximity_thresh = proximity_thresh
11
+ self.rescuer_id = None
12
+ self.midpoints = []
13
+ self.shoulder_distances = []
14
+ self.rescuer_keypoints = None
15
+
16
+ def find_rescuer(self, pose_results, frame_shape):
17
+ """Identify rescuer based on proximity to horizontal objects"""
18
+ try:
19
+ boxes = pose_results.boxes.xywh.cpu().numpy()
20
+ horizontal_objects = [b for b in boxes if b[2] > b[3]*1.5]
21
+
22
+ if not horizontal_objects:
23
+ return None
24
+
25
+ people = []
26
+ for i, box in enumerate(boxes):
27
+ if box[3] > box[2]*1.2 and len(pose_results.keypoints) > i:
28
+ people.append((i, box[0], box[1]))
29
+
30
+ min_distance = float('inf')
31
+ for (i, px, py) in people:
32
+ for (hx, hy, hw, hh) in horizontal_objects:
33
+ distance = np.sqrt(((px-hx)/frame_shape[1])**2 + ((py-hy)/frame_shape[0])**2)
34
+ if distance < min_distance and distance < self.proximity_thresh:
35
+ min_distance = distance
36
+ self.rescuer_id = i
37
+ return self.rescuer_id
38
+ except Exception as e:
39
+ print(f"Rescuer finding error: {e}")
40
+ return None
41
+
42
+ def track_rescuer_midpoints(self, keypoints, frame):
43
+ """Track and draw rescuer's wrist midpoints (using absolute pixel coordinates)"""
44
+ try:
45
+ # Store keypoints for shoulder distance calculation
46
+ self.rescuer_keypoints = keypoints
47
+
48
+ # Get wrist coordinates directly in pixels
49
+ lw = keypoints[CocoKeypoints.LEFT_WRIST.value]
50
+ rw = keypoints[CocoKeypoints.RIGHT_WRIST.value]
51
+
52
+ # Calculate midpoint in pixel space
53
+ midpoint = (
54
+ int((lw[0] + rw[0]) / 2),
55
+ int((lw[1] + rw[1]) / 2)
56
+ )
57
+ self.midpoints.append(midpoint)
58
+
59
+ # Draw tracking
60
+ cv2.circle(frame, midpoint, 8, (0,255,0), -1)
61
+ cv2.putText(frame, "MIDPOINT", (midpoint[0]+5, midpoint[1]-10),
62
+ cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,0), 2)
63
+ return frame
64
+ except Exception as e:
65
+ print(f"Midpoint tracking error: {e}")
66
+ return frame
67
+
68
+ def update_shoulder_distance(self):
69
+ """Helper function to calculate and store shoulder distance"""
70
+ if self.rescuer_keypoints is None:
71
+ return
72
+
73
+ try:
74
+ left_shoulder = self.rescuer_keypoints[CocoKeypoints.LEFT_SHOULDER.value]
75
+ right_shoulder = self.rescuer_keypoints[CocoKeypoints.RIGHT_SHOULDER.value]
76
+
77
+ shoulder_dist = np.linalg.norm(left_shoulder - right_shoulder)
78
+ self.shoulder_distances.append(shoulder_dist)
79
+ print(f"Shoulder distance: {shoulder_dist:.2f} pixels")
80
+ except Exception as e:
81
+ print(f"Shoulder distance calculation error: {e}")
CPR/yolo11n-pose.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:869e83fcdffdc7371fa4e34cd8e51c838cc729571d1635e5141e3075e9319dc0
3
+ size 6255593
app.py CHANGED
@@ -21,6 +21,8 @@ from fastapi import HTTPException
21
  from fastapi import WebSocket, WebSocketDisconnect
22
  import base64
23
  import cv2
 
 
24
 
25
 
26
 
@@ -199,21 +201,15 @@ async def process_video(file: UploadFile = File(...)):
199
  with open(video_path, "wb") as buffer:
200
  shutil.copyfileobj(file.file, buffer)
201
 
202
- model = YOLO("yolo11n-pose_float16.tflite") # fixed variable name
 
203
 
204
- print("Model loaded successfully")
 
 
 
 
205
 
206
- # Run YOLO detection
207
- try:
208
- results = model(
209
- source=video_path,
210
- show=True, # Set True if running locally and want to view
211
- save=True,
212
- project="runs/detect",
213
- name="testResult"
214
- )
215
- except Exception as e:
216
- raise HTTPException(status_code=500, detail=f"YOLO processing error: {str(e)}")
217
 
218
  return JSONResponse(content={"message": "Video processed successfully", "result_dir": "runs/detect/testResult"})
219
 
 
21
  from fastapi import WebSocket, WebSocketDisconnect
22
  import base64
23
  import cv2
24
+ import time
25
+ from CPR import CPRAnalyzer
26
 
27
 
28
 
 
201
  with open(video_path, "wb") as buffer:
202
  shutil.copyfileobj(file.file, buffer)
203
 
204
+ start_time = time.time()
205
+ print("[START] CPR Analysis started")
206
 
207
+ analyzer = CPRAnalyzer(video_path)
208
+ analyzer.run_analysis()
209
+
210
+ end_time = time.time()
211
+ print(f"[END] Total execution time: {end_time - start_time:.2f}s")
212
 
 
 
 
 
 
 
 
 
 
 
 
213
 
214
  return JSONResponse(content={"message": "Video processed successfully", "result_dir": "runs/detect/testResult"})
215
 
requirements.txt CHANGED
@@ -102,4 +102,5 @@ wfdb==4.3.0
102
  wrapt==1.17.2
103
  yarl==1.20.0
104
  websockets
105
- xgboost
 
 
102
  wrapt==1.17.2
103
  yarl==1.20.0
104
  websockets
105
+ xgboost
106
+ time