Hussein El-Hadidy commited on
Commit
89a012c
·
1 Parent(s): ac55d8d

Modified education to eduacation-2

Browse files
CPR/CPRAnalyzer.py CHANGED
@@ -1,8 +1,10 @@
1
  # main.py
2
  import cv2
3
  import time
4
- import tkinter as tk # For screen size detection
5
- from datetime import datetime
 
 
6
 
7
  from CPR.pose_estimation import PoseEstimator
8
  from CPR.role_classifier import RoleClassifier
@@ -11,57 +13,67 @@ from CPR.metrics_calculator import MetricsCalculator
11
  from CPR.posture_analyzer import PostureAnalyzer
12
  from CPR.wrists_midpoint_analyzer import WristsMidpointAnalyzer
13
  from CPR.shoulders_analyzer import ShouldersAnalyzer
14
- import os
15
- import uuid # For unique filenames
16
 
 
17
 
18
  class CPRAnalyzer:
19
  """Main CPR analysis pipeline with execution tracing"""
20
 
21
- def __init__(self, video_path):
22
- print(f"\n[INIT] Initializing CPR Analyzer for: {video_path}")
 
 
 
 
 
 
 
 
23
 
24
- self.video_path = video_path
25
  #& Open video file
26
- self.cap = cv2.VideoCapture(video_path)
27
  if not self.cap.isOpened():
28
- print("[ERROR] Failed to open video file")
29
  return
30
- print("[INIT] Video file opened successfully")
31
 
32
  #& Get video properties
33
  self.frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
34
  self.fps = self.cap.get(cv2.CAP_PROP_FPS)
35
  print(f"[INIT] Video has {self.frame_count} frames at {self.fps:.2f} FPS")
36
 
37
- #& Get screen dimensions
38
- root = tk.Tk()
39
- self.screen_width = root.winfo_screenwidth()
40
- self.screen_height = root.winfo_screenheight()
41
- root.destroy()
42
- print(f"[INIT] Detected screen resolution: {self.screen_width}x{self.screen_height}")
 
 
43
 
44
  #& Initialize system components
45
  self.pose_estimator = PoseEstimator(min_confidence=0.5)
46
  self.role_classifier = RoleClassifier()
47
  self.chest_initializer = ChestInitializer()
48
- self.metrics_calculator = MetricsCalculator(self.frame_count, shoulder_width_cm=45*0.65)
49
 
 
50
  # if avg_right > self.right_arm_angle_threshold: error
51
  # if avg_left < self.left_arm_angle_threshold: error
52
 
53
  self.posture_analyzer = PostureAnalyzer(right_arm_angle_threshold=220, left_arm_angle_threshold=160, wrist_distance_threshold=170, history_length_to_average=10)
54
  self.wrists_midpoint_analyzer = WristsMidpointAnalyzer()
55
  self.shoulders_analyzer = ShouldersAnalyzer()
56
- print("[INIT] System components initialized")
57
-
58
- self.collected_warnings = {}
59
-
60
 
61
- #& Configure display window
62
- self.window_name = "CPR Analysis"
63
- cv2.namedWindow(self.window_name, cv2.WINDOW_NORMAL)
64
- print(f"[INIT] Window '{self.window_name}' created")
65
 
66
  #& Keep track of previous results for continuity
67
  self.prev_rescuer_processed_results = None
@@ -69,175 +81,397 @@ class CPRAnalyzer:
69
  self.prev_chest_params = None
70
  self.prev_midpoint = None
71
  self.prev_pose_results = None
72
- print("[INIT] Previous results initialized")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
  #& Workaround for minor glitches
75
- self.consecutive_frames_with_posture_errors = 0
76
- self.max_consecutive_frames_with_posture_errors = 10
 
 
 
 
 
 
 
 
77
 
78
  #& Initialize variables for reporting warnings
79
- self.posture_errors_for_current_error_region = set()
80
 
81
- #& Frequent depth and rate calculations
82
- self.reporting_interval_in_seconds = 5
83
- self.reporting_interval_in_frames = int(self.fps * self.reporting_interval_in_seconds)
84
- print(f"[INIT] Reporting interval set to {self.reporting_interval_in_seconds} seconds ({self.reporting_interval_in_frames} frames)")
85
 
86
- def run_analysis(self):
87
- try:
88
- print("\n[RUN ANALYSIS] Starting analysis")
89
 
90
- main_loop_start_time = time.time()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
- #& Initialize Variables
93
- # Handling chunks
94
- first_time_to_have_a_proccessed_frame = True
95
- waiting_to_start_new_chunk = False
96
- # Hndling mini chunks
97
- mini_chunk_start_frame_index = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
- print("[RUN ANALYSIS] Starting main execution loop")
100
  #& Main execution loop
 
 
101
  while self.cap.isOpened():
102
- #& Get frame number
103
- # Retrieve the current position of the video frame being processed in the video capture object (self.cap).
104
- frame_counter = self.cap.get(cv2.CAP_PROP_POS_FRAMES)
105
- print(f"\n[FRAME {int(frame_counter)}/{self.frame_count}]")
 
 
 
 
106
 
107
- #& Read frame
108
- ret, frame = self.cap.read()
109
- if not ret:
110
- print("[ERROR] Failed to read frame or end of video reached")
111
- break
112
- print(f"[RUN ANALYSIS] Read frame")
 
 
113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  #& Rotate frame
115
  frame = self._handle_frame_rotation(frame)
116
- print(f"[RUN ANALYSIS] Rotated frame")
117
 
118
  #& Process frame
119
- # Processing a frame means updating the values for the current and previous detections both in the CPR Analyzer and the system components it includes.
120
- # The returned flags are:
121
- # - is_complete_chunk: True if a "Posture Error" occurs in the frame, False otherwise.
122
- # - accept_frame: True if the frame is accepted for further processing, False otherwise.
123
- # Not that a frame containing an error could be accepted if the number of consecutive frames with errors is less than the threshold.
124
- is_complete_chunk, accept_frame = self._process_frame(frame)
125
- print(f"[RUN ANALYSIS] Processed frame")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
 
 
 
 
 
 
 
 
127
  #& Compose frame
128
- # This function is responsible for drawing the data detected during the processing of the frame on it.
129
  # The frame would not be displayed yet, just composed.
130
- processed_frame = self._compose_frame(frame, accept_frame)
131
- print(f"[RUN ANALYSIS] Composed frame")
132
-
133
- #& Set the chunk start frame index for the first chunk
134
- # Along the video when a failure in any step of the processing occurs, the variables are populated with the previous results to keep the analysis going.
135
- # The problem occurs when the first few frames have a failure in the processing, and the variables are not populated yet.
136
- # This is why the first chunk starts from the first frame that has been processed successfully.
137
- if (processed_frame is not None) and first_time_to_have_a_proccessed_frame:
138
- first_time_to_have_a_proccessed_frame = False
139
- chunk_start_frame_index = frame_counter
140
- mini_chunk_start_frame_index = frame_counter
141
- print(f"[RUN ANALYSIS] First processed frame detected")
142
-
143
- #& Set the chunk start frame index for the all chunks after the first one & append the errors detected in the error region before this chunk if any
144
- # When a "Posture Error" occurs, a chunk is considered complete, and the program becomes ready to start a new chunk.
145
- # is_complete_chunk is returned as true for every frame that has a "Posture Error" in it, and false for every other frame.
146
- # This is why we need to wait for a frame with a false is_complete_chunk to start a new chunk.
147
- if (waiting_to_start_new_chunk) and (not is_complete_chunk):
148
- waiting_to_start_new_chunk = False
149
- chunk_start_frame_index = frame_counter
150
- mini_chunk_start_frame_index = frame_counter
151
- print(f"[RUN ANALYSIS] A new chunk is starting")
152
-
153
- if len(self.posture_errors_for_current_error_region) > 0:
154
- self.posture_analyzer.posture_errors_for_all_error_region.append(self.posture_errors_for_current_error_region.copy())
155
- self.posture_errors_for_current_error_region.clear()
156
- print(f"[RUN ANALYSIS] Reset posture errors for current error region")
157
-
158
- #& Process the current chunk or mini chunk if the conditions are met
159
- process_chunk = (is_complete_chunk or frame_counter == self.frame_count - 1) and (not waiting_to_start_new_chunk)
160
- process_mini_chunk = (frame_counter % self.reporting_interval_in_frames == 0) and (frame_counter != 0) and (mini_chunk_start_frame_index is not None) and (not is_complete_chunk)
161
-
162
- if process_chunk:
163
- print(f"[RUN ANALYSIS] Chunk completion detected")
164
-
165
- # The difference here results from the fact a first middle chunk is terminated by a "Posture Error" which is a frame not included in the chunk.
166
- # While the last chunk is terminated by the end of the video, which is a frame included in the chunk.
167
- if is_complete_chunk:
168
- chunk_end_frame_index = frame_counter - 1
169
- elif frame_counter == self.frame_count - 1:
170
- chunk_end_frame_index = frame_counter
171
- print(f"[RUN ANALYSIS] Determined the last frame of the chunk")
172
-
173
- depth, rate = self._calculate_rate_and_depth_for_chunk(chunk_start_frame_index, chunk_end_frame_index)
174
- print(f"[RUN ANALYSIS] Calculated metrics for the chunk")
175
-
176
- elif process_mini_chunk:
177
- print(f"[RUN ANALYSIS] Mini chunk completion detected")
178
-
179
- mini_chunk_end_frame_index = frame_counter
180
- print(f"[RUN ANALYSIS] Determined the last frame of the mini chunk")
181
-
182
- depth, rate = self._calculate_rate_and_depth_for_chunk(mini_chunk_start_frame_index, mini_chunk_end_frame_index)
183
- print(f"[RUN ANALYSIS] Calculated metrics for the mini chunk")
184
-
185
- if process_chunk or process_mini_chunk:
186
- waiting_to_start_new_chunk = True
187
-
188
- self.shoulders_analyzer.reset_shoulder_distances()
189
- self.wrists_midpoint_analyzer.reset_midpoint_history()
190
- print(f"[RUN ANALYSIS] Reset shoulder distances and midpoint history")
191
-
192
- #& Display frame
193
- if processed_frame is not None:
194
- self._display_frame(processed_frame)
195
  else:
196
- self._display_frame(frame)
197
- print(f"[RUN ANALYSIS] Displayed frame")
198
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
  #& Check if the user wants to quit
200
- if cv2.waitKey(1) == ord('q'):
201
- print("\n[RUN ANALYSIS] Analysis interrupted by user")
202
  break
203
 
204
  main_loop_end_time = time.time()
205
  elapsed_time = main_loop_end_time - main_loop_start_time
206
- print(f"[TIMING] Main loop elapsed time: {elapsed_time:.2f}s")
207
 
208
  except Exception as e:
209
- print(f"[ERROR] An error occurred during main execution loop: {str(e)}")
210
 
211
  finally:
212
  report_and_plot_start_time = time.time()
213
 
214
- #& Cleanup, calculate averages, and plot full motion curve
215
  self.cap.release()
 
 
 
 
 
216
  cv2.destroyAllWindows()
217
- print("[RUN ANALYSIS] Released video capture and destroyed all windows")
218
 
219
  self._calculate_rate_and_depth_for_all_chunks()
220
- print("[RUN ANALYSIS] Calculated weighted averages of the metrics across all chunks")
 
 
 
 
221
 
222
- graphResults = self._plot_full_motion_curve_for_all_chunks()
223
- print("[RUN ANALYSIS] Plotted full motion curve")
 
 
 
 
 
 
 
 
 
224
 
225
  report_and_plot_end_time = time.time()
226
  report_and_plot_elapsed_time = report_and_plot_end_time - report_and_plot_start_time
227
- print(f"[TIMING] Report and plot elapsed time: {report_and_plot_elapsed_time:.2f}s")
228
-
229
- return self.metrics_calculator.annotate_video_with_chunks(self.video_path, self.posture_analyzer.posture_errors_for_all_error_region), graphResults, self.get_posture_warning_results(), self.metrics_calculator.get_json_chunk_data()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
 
231
  def _handle_frame_rotation(self, frame):
232
- #! Till now, the code has only been testes on portrait videos.
233
  if frame.shape[1] > frame.shape[0]: # Width > Height
234
  frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
235
  return frame
236
 
237
  def _process_frame(self, frame):
 
 
 
238
  #* Chunk Completion Check
239
- is_complete_chunk = False
240
- accept_frame = True
241
 
242
  #& Pose Estimation
243
  pose_results = self.pose_estimator.detect_poses(frame)
@@ -245,13 +479,13 @@ class CPRAnalyzer:
245
  #~ Handle Failed Detection or Update Previous Results
246
  if not pose_results:
247
  pose_results = self.prev_pose_results
248
- print("[POSE ESTIMATION] No pose detected, using previous results (could be None)")
249
  else:
250
  self.prev_pose_results = pose_results
251
 
252
  if not pose_results:
253
- print("[POSE ESTIMATION] Insufficient data for processing")
254
- return is_complete_chunk, accept_frame
255
 
256
  #& Rescuer and Patient Classification
257
  rescuer_processed_results, patient_processed_results = self.role_classifier.classify_roles(pose_results, self.prev_rescuer_processed_results, self.prev_patient_processed_results)
@@ -259,24 +493,24 @@ class CPRAnalyzer:
259
  #~ Handle Failed Classifications OR Update Previous Results
260
  if not rescuer_processed_results:
261
  rescuer_processed_results = self.prev_rescuer_processed_results
262
- print("[ROLE CLASSIFICATION] No rescuer detected, using previous results (could be None)")
263
  else:
264
  self.prev_rescuer_processed_results = rescuer_processed_results
265
 
266
  if not patient_processed_results:
267
  patient_processed_results = self.prev_patient_processed_results
268
- print("[ROLE CLASSIFICATION] No patient detected, using previous results (could be None)")
269
  else:
270
  self.prev_patient_processed_results = patient_processed_results
271
 
272
  if not rescuer_processed_results or not patient_processed_results:
273
- print("[ROLE CLASSIFICATION] Insufficient data for processing")
274
- return is_complete_chunk, accept_frame
275
 
276
  #^ Set Params in Role Classifier (to draw later)
277
  self.role_classifier.rescuer_processed_results = rescuer_processed_results
278
  self.role_classifier.patient_processed_results = patient_processed_results
279
- print(f"[ROLE CLASSIFICATION] Updated role classifier with new results")
280
 
281
  #& Chest Estimation
282
  chest_params = self.chest_initializer.estimate_chest_region(patient_processed_results["keypoints"], patient_processed_results["bounding_box"], frame_width=frame.shape[1], frame_height=frame.shape[0])
@@ -284,13 +518,13 @@ class CPRAnalyzer:
284
  #~ Handle Failed Estimation or Update Previous Results
285
  if not chest_params:
286
  chest_params = self.prev_chest_params
287
- print("[CHEST ESTIMATION] No chest region detected, using previous results (could be None)")
288
  else:
289
  self.prev_chest_params = chest_params
290
 
291
  if not chest_params:
292
- print("[CHEST ESTIMATION] Insufficient data for processing")
293
- return is_complete_chunk, accept_frame
294
 
295
  #^ Set Params in Chest Initializer (to draw later)
296
  self.chest_initializer.chest_params = chest_params
@@ -307,39 +541,26 @@ class CPRAnalyzer:
307
  self.chest_initializer.expected_chest_params = expected_chest_params
308
 
309
  #& Posture Analysis
310
- # The midpoind of the last frame
311
- warnings = self.posture_analyzer.validate_posture(rescuer_processed_results["keypoints"], self.prev_midpoint, self.chest_initializer.expected_chest_params)
312
-
313
- ##############################################################
314
- for warning in warnings:
315
- if warning not in self.collected_warnings:
316
- self.collected_warnings[warning] = []
317
- if len(self.collected_warnings[warning]) < 2:
318
- # Save the frame to disk or memory
319
- filename = f"warning_{uuid.uuid4().hex}.jpg"
320
- file_path = os.path.join("screenshots", filename)
321
- os.makedirs("screenshots", exist_ok=True)
322
- cv2.imwrite(file_path, frame)
323
- self.collected_warnings[warning].append(file_path)
324
- print(f"[CAPTURE] Saved warning screenshot: {file_path}")
325
-
326
- ##############################################################
327
-
328
- if warnings:
329
- print(f"[POSTURE ANALYSIS] Posture issues: {', '.join(warnings)}")
330
- self.consecutive_frames_with_posture_errors += 1
331
- else:
332
- print("[POSTURE ANALYSIS] No posture issues detected")
333
- self.consecutive_frames_with_posture_errors = 0
334
-
335
- accept_frame = self.consecutive_frames_with_posture_errors < self.max_consecutive_frames_with_posture_errors
336
-
337
- if accept_frame:
338
- warnings = [] # Reset warnings if the frame is accepted
339
 
340
  #^ Set Params in Posture Analyzer (to draw later)
341
  self.posture_analyzer.warnings = warnings
342
- print(f"[POSTURE ANALYSIS] Updated posture analyzer with new results")
343
 
344
  #& Wrist Midpoint Detection
345
  midpoint = self.wrists_midpoint_analyzer.detect_wrists_midpoint(rescuer_processed_results["keypoints"])
@@ -347,136 +568,117 @@ class CPRAnalyzer:
347
  #~ Handle Failed Detection or Update Previous Results
348
  if not midpoint:
349
  midpoint = self.prev_midpoint
350
- print("[WRIST MIDPOINT DETECTION] No midpoint detected, using previous results (could be None)")
351
  else:
352
  self.prev_midpoint = midpoint
353
 
354
  if not midpoint:
355
- print("[WRIST MIDPOINT DETECTION] Insufficient data for processing")
356
- return is_complete_chunk, accept_frame
357
-
358
- if accept_frame:
359
  #^ Set Params in Role Classifier (to draw later)
 
360
  self.wrists_midpoint_analyzer.midpoint = midpoint
361
  self.wrists_midpoint_analyzer.midpoint_history.append(midpoint)
362
- print(f"[WRIST MIDPOINT DETECTION] Updated wrist midpoint analyzer with new results")
363
 
364
  #& Shoulder Distance Calculation
365
  shoulder_distance = self.shoulders_analyzer.calculate_shoulder_distance(rescuer_processed_results["keypoints"])
366
  if shoulder_distance is not None:
367
  self.shoulders_analyzer.shoulder_distance = shoulder_distance
368
  self.shoulders_analyzer.shoulder_distance_history.append(shoulder_distance)
369
- print(f"[SHOULDER DISTANCE] Updated shoulder distance analyzer with new results")
370
- else:
371
- #* Chunk Completion Check
372
- is_complete_chunk = True
373
- num_warnings_before = len(self.posture_errors_for_current_error_region)
374
 
375
- for warning in warnings:
376
- self.posture_errors_for_current_error_region.add(warning)
377
-
378
- num_warnings_after = len(self.posture_errors_for_current_error_region)
379
-
380
- if num_warnings_after > num_warnings_before:
381
- print(f"[POSTURE ANALYSIS] Added warning to current error region: {warning}")
382
 
383
- return is_complete_chunk, accept_frame
384
-
385
- def _compose_frame(self, frame, accept_frame):
386
  # Chest Region
387
  if frame is not None:
388
  frame = self.chest_initializer.draw_expected_chest_region(frame)
389
- print(f"[VISUALIZATION] Drawn chest region")
390
-
391
- # Warning Messages
392
- if frame is not None:
393
- frame = self.posture_analyzer.display_warnings(frame)
394
- print(f"[VISUALIZATION] Drawn warnings")
395
 
396
  if frame is not None:
397
- if accept_frame:
398
- # Midpoint
399
  frame = self.wrists_midpoint_analyzer.draw_midpoint(frame)
400
- print(f"[VISUALIZATION] Drawn midpoint")
401
-
402
  return frame
403
 
404
- def _display_frame(self, frame):
405
- # Get original frame dimensions
406
- h, w = frame.shape[:2]
407
- if w == 0 or h == 0:
408
- return
409
-
410
- # Calculate maximum possible scale while maintaining aspect ratio
411
- scale_w = self.screen_width / w
412
- scale_h = self.screen_height / h
413
- scale = min(scale_w, scale_h) * 0.9 # 90% of max to leave some margin
414
-
415
- # Calculate new dimensions
416
- new_w = int(w * scale)
417
- new_h = int(h * scale)
418
-
419
- # Resize and display
420
- resized = cv2.resize(frame, (new_w, new_h), interpolation=cv2.INTER_AREA)
421
-
422
- # Center window
423
- pos_x = (self.screen_width - new_w) // 2
424
- pos_y = (self.screen_height - new_h) // 2
425
- cv2.moveWindow(self.window_name, pos_x, pos_y)
426
-
427
- cv2.imshow(self.window_name, resized)
428
- print(f"[DISPLAY FRAME] Resized to {new_w}x{new_h} (scale: {scale:.2f})")
429
-
430
- def _calculate_rate_and_depth_for_chunk(self, chunk_start_frame_index, chunk_end_frame_index):
431
  try:
432
- self.metrics_calculator.smooth_midpoints(self.wrists_midpoint_analyzer.midpoint_history)
433
- print("[METRICS] Smoothed midpoints")
434
-
435
- self.metrics_calculator.detect_peaks()
436
- print("[METRICS] Detected peaks")
437
-
438
- depth, rate = self.metrics_calculator.calculate_metrics(
439
- self.shoulders_analyzer.shoulder_distance_history,
440
- self.cap.get(cv2.CAP_PROP_FPS),
441
- chunk_start_frame_index,
442
- chunk_end_frame_index)
443
- print("[METRICS] Calculated metrics")
444
-
445
- if depth is None or rate is None:
446
- print("[ERROR] Depth or rate calculation failed, likely due to insufficient data points (<2 peaks)")
447
-
448
- return depth, rate
449
 
450
  except Exception as e:
451
- print(f"[ERROR] Metric calculation failed: {str(e)}")
452
 
453
  def _calculate_rate_and_depth_for_all_chunks(self):
454
  try:
455
- self.metrics_calculator.calculate_weighted_averages()
456
- print(f"[METRICS] Weighted averages calculated")
457
  except Exception as e:
458
- print(f"[ERROR] Failed to calculate weighted averages: {str(e)}")
459
 
460
  def _plot_full_motion_curve_for_all_chunks(self):
461
  try:
462
- print("[PLOT] Full motion curve plotted")
463
- return self.metrics_calculator.plot_motion_curve_for_all_chunks(self.posture_analyzer.posture_errors_for_all_error_region)
 
 
 
 
 
 
 
 
464
  except Exception as e:
465
- print(f"[ERROR] Failed to plot full motion curve: {str(e)}")
 
 
 
 
 
 
 
 
 
466
 
 
 
 
 
 
 
 
467
 
468
- def get_posture_warning_results(self):
469
- """Return a list of posture warning entries with image URLs and descriptions"""
470
- result = []
471
-
472
- for description, paths in self.collected_warnings.items():
473
- for path in paths:
474
- # You might want to convert local paths to URLs if you're serving them via FastAPI
475
- if (len(self.posture_analyzer.posture_errors_for_all_error_region) > 0):
476
- # If the error region is not empty, append the image URL and description
477
- result.append({
478
- "image_url": f"{os.path.basename(path)}", # Adjust if hosted elsewhere
479
- "description": description
480
- })
481
-
482
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # main.py
2
  import cv2
3
  import time
4
+ import math
5
+ import numpy as np
6
+ import os # Added for path handling
7
+ import sys
8
 
9
  from CPR.pose_estimation import PoseEstimator
10
  from CPR.role_classifier import RoleClassifier
 
13
  from CPR.posture_analyzer import PostureAnalyzer
14
  from CPR.wrists_midpoint_analyzer import WristsMidpointAnalyzer
15
  from CPR.shoulders_analyzer import ShouldersAnalyzer
16
+ from CPR.graph_plotter import GraphPlotter
17
+ from CPR.warnings_overlayer import WarningsOverlayer
18
 
19
+ from CPR.logging_config import cpr_logger
20
 
21
  class CPRAnalyzer:
22
  """Main CPR analysis pipeline with execution tracing"""
23
 
24
+ def __init__(self, input_video, video_output_path, plot_output_path, requested_fps):
25
+
26
+ cpr_logger.info(f"[INIT] Initializing CPR Analyzer")
27
+
28
+ #& Frame counter
29
+ self.frame_counter = -1
30
+ cpr_logger.info(f"[INIT] Frame counter initialized")
31
+
32
+ self.processed_frame_counter = 0 # Track only processed frames
33
+ cpr_logger.info(f"[INIT] Processed frame counter initialized")
34
 
 
35
  #& Open video file
36
+ self.cap = cv2.VideoCapture(input_video)
37
  if not self.cap.isOpened():
38
+ cpr_logger.error(f"[ERROR] Failed to open video file: {input_video}")
39
  return
40
+ cpr_logger.info(f"[INIT] Video file opened successfully")
41
 
42
  #& Get video properties
43
  self.frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
44
  self.fps = self.cap.get(cv2.CAP_PROP_FPS)
45
  print(f"[INIT] Video has {self.frame_count} frames at {self.fps:.2f} FPS")
46
 
47
+ #& Generate output path with MP4 extension
48
+ self.video_output_path = video_output_path
49
+ self.video_writer = None
50
+ self._writer_initialized = False
51
+ cpr_logger.info(f"[INIT] Output path: {self.video_output_path}")
52
+
53
+ #& For the graph plotter
54
+ self.plot_output_path = plot_output_path
55
 
56
  #& Initialize system components
57
  self.pose_estimator = PoseEstimator(min_confidence=0.5)
58
  self.role_classifier = RoleClassifier()
59
  self.chest_initializer = ChestInitializer()
60
+ self.metrics_calculator = MetricsCalculator(shoulder_width_cm=45*0.65)
61
 
62
+ # Remeber the conditions if you need to adjust the thresholds
63
  # if avg_right > self.right_arm_angle_threshold: error
64
  # if avg_left < self.left_arm_angle_threshold: error
65
 
66
  self.posture_analyzer = PostureAnalyzer(right_arm_angle_threshold=220, left_arm_angle_threshold=160, wrist_distance_threshold=170, history_length_to_average=10)
67
  self.wrists_midpoint_analyzer = WristsMidpointAnalyzer()
68
  self.shoulders_analyzer = ShouldersAnalyzer()
69
+ self.graph_plotter = GraphPlotter()
70
+ self.warnings_overlayer = WarningsOverlayer()
71
+ cpr_logger.info("[INIT] System components initialized")
 
72
 
73
+ #& Warm up pose estimator with dummy data
74
+ dummy_frame = np.zeros((480, 640, 3), dtype=np.uint8)
75
+ self.pose_estimator.detect_poses(dummy_frame) # Force model loading
76
+ cpr_logger.info("[INIT] Pose estimator warmed up with dummy data")
77
 
78
  #& Keep track of previous results for continuity
79
  self.prev_rescuer_processed_results = None
 
81
  self.prev_chest_params = None
82
  self.prev_midpoint = None
83
  self.prev_pose_results = None
84
+ cpr_logger.info("[INIT] Previous results initialized")
85
+
86
+ #& Fundamental timing parameters (in seconds)
87
+ self.MIN_ERROR_DURATION = 0.5 # Require sustained errors zfor 1 second
88
+ self.REPORTING_INTERVAL = 5.0 # Generate reports every 5 seconds
89
+ self.SAMPLING_INTERVAL = 0.2 # Analyze every 0.2 seconds
90
+ self.KEEP_RATE_AND_DEPTH_WARNINGS_INTERVAL = 3.0
91
+ self.MIN_CHUNK_LENGTH_TO_REPORT = 3.0
92
+
93
+ # Derived frame counts
94
+ self.sampling_interval_frames = int(round(self.fps * self.SAMPLING_INTERVAL))
95
+ self.error_threshold_frames = int(self.MIN_ERROR_DURATION / self.SAMPLING_INTERVAL)
96
+ self.reporting_interval_frames = int(self.REPORTING_INTERVAL / self.SAMPLING_INTERVAL)
97
+ self.return_rate_and_depth_warnings_interval_frames = int(self.KEEP_RATE_AND_DEPTH_WARNINGS_INTERVAL / self.SAMPLING_INTERVAL)
98
+ self.min_chunk_length_to_report_frames = int(self.MIN_CHUNK_LENGTH_TO_REPORT / self.SAMPLING_INTERVAL)
99
+
100
+ # For cleaner feedback, the reporting interval must be an exact multiple of the sampling interval.
101
+ ratio = self.REPORTING_INTERVAL / self.SAMPLING_INTERVAL
102
+ assert math.isclose(ratio, round(ratio)), \
103
+ f"Reporting interval ({self.REPORTING_INTERVAL}) must be an exact multiple of "\
104
+ f"sampling interval ({self.SAMPLING_INTERVAL}). Actual ratio: {ratio:.2f}"
105
+
106
+ # To be able to detect an error, the error detection window must be greater than or equal to the sampling interval.
107
+ assert self.MIN_ERROR_DURATION >= self.SAMPLING_INTERVAL, \
108
+ f"Error detection window ({self.MIN_ERROR_DURATION}s) must be ≥ sampling interval ({self.SAMPLING_INTERVAL}s)"
109
+
110
+ cpr_logger.info(f"[INIT] Temporal alignment:")
111
+ cpr_logger.info(f" - {self.SAMPLING_INTERVAL}s sampling → {self.sampling_interval_frames} frames")
112
+ cpr_logger.info(f" - {self.MIN_ERROR_DURATION}s error detection → {self.error_threshold_frames} samples")
113
+ cpr_logger.info(f" - {self.REPORTING_INTERVAL}s reporting → {self.reporting_interval_frames} samples")
114
 
115
  #& Workaround for minor glitches
116
+ # A frame is accepted as long as this counter does not exceed the error_threshold_frames set above.
117
+ #! These (and those in the warnings_overlayer) should exactly match the ones appended in the PostureAnalyzer.
118
+ self.possible_warnings = [
119
+ "Right arm bent!",
120
+ "Left arm bent!",
121
+ "Left hand not on chest!",
122
+ "Right hand not on chest!",
123
+ "Both hands not on chest!",
124
+ ]
125
+ self.consecutive_frames_with_posture_errors_counters = {warning: 0 for warning in self.possible_warnings}
126
 
127
  #& Initialize variables for reporting warnings
 
128
 
129
+ self.rate_and_depth_warnings_from_the_last_report = []
130
+ cpr_logger.info("[INIT] Rate and depth warnings from the last report initialized")
 
 
131
 
132
+ #& Chunk and mini chunk management (Indexes and Flags)
133
+ self.has_not_processed_a_frame_successfully_before = True
134
+ self.waiting_to_start_new_chunk = False
135
 
136
+ self.chunk_start_frame_index = None
137
+ self.chunk_end_frame_index = None
138
+
139
+ #& Posture warnings region management
140
+ self.prev_is_part_of_a_posture_warnings_region = False
141
+ self.posture_warnings_region_start_frame_index = None
142
+ self.posture_warnings_region_end_frame_index = None
143
+
144
+ self.posture_warnings = []
145
+ self.rate_and_depth_warnings = []
146
+
147
+ #& For Formated Warnings
148
+ self.cached_posture_warnings = []
149
+ self.cached_rate_and_depth_warnings = []
150
+ self.return_rate_and_depth_warnings_interval_frames_counter = self.return_rate_and_depth_warnings_interval_frames
151
+ cpr_logger.info("[INIT] Formatted warnings initialized")
152
 
153
+ def _initialize_video_writer(self, frame):
154
+ """Initialize writer with safe fallback options"""
155
+ height, width = frame.shape[:2]
156
+ effective_fps = self.fps / max(1, self.sampling_interval_frames)
157
+
158
+ # Try different codec/container combinations
159
+ for codec, ext, fmt in [('avc1', 'mp4', 'mp4v'), # H.264
160
+ ('MJPG', 'avi', 'avi'),
161
+ ('XVID', 'avi', 'avi')]:
162
+ fourcc = cv2.VideoWriter_fourcc(*codec)
163
+ writer = cv2.VideoWriter(self.video_output_path, fourcc, effective_fps, (width, height))
164
+
165
+ if writer.isOpened():
166
+ self.video_writer = writer
167
+ self._writer_initialized = True
168
+ cpr_logger.info(f"[VIDEO WRITER] Initialized with {codec} codec")
169
+ return
170
+ else:
171
+ writer.release()
172
+
173
+ cpr_logger.info("[ERROR] Failed to initialize any video writer!")
174
+ self._writer_initialized = False
175
+
176
+ def _handle_chunk_end(self):
177
+ """Helper to handle chunk termination logic"""
178
+ self._calculate_rate_and_depth_for_chunk()
179
+ cpr_logger.info(f"[RUN ANALYSIS] Calculated rate and depth for the chunk")
180
+
181
+ rate_and_depth_warnings = self._get_rate_and_depth_warnings()
182
+
183
+ # If the chunk is too short, we don't want to report any warnings it might contain.
184
+ if (self.chunk_end_frame_index - self.chunk_start_frame_index) < self.min_chunk_length_to_report_frames:
185
+ rate_and_depth_warnings = []
186
+
187
+ self.cached_rate_and_depth_warnings = rate_and_depth_warnings
188
+ self.return_rate_and_depth_warnings_interval_frames_counter = self.return_rate_and_depth_warnings_interval_frames
189
+ cpr_logger.info(f"[RUN ANALYSIS] Retrieved rate and depth warnings for the chunk")
190
+
191
+ self.rate_and_depth_warnings.append({
192
+ 'start_frame': self.chunk_start_frame_index,
193
+ 'end_frame': self.chunk_end_frame_index,
194
+ 'rate_and_depth_warnings': rate_and_depth_warnings,
195
+ })
196
+ cpr_logger.info(f"[RUN ANALYSIS] Assigned rate and depth warnings region data")
197
+
198
+ self.shoulders_analyzer.reset_shoulder_distances()
199
+ self.wrists_midpoint_analyzer.reset_midpoint_history()
200
+ cpr_logger.info(f"[RUN ANALYSIS] Reset shoulder distances and midpoint history for the chunk")
201
+
202
+ def _handle_posture_warnings_region_end(self):
203
+ """Helper to handle posture warnings region termination"""
204
+ self.posture_warnings.append({
205
+ 'start_frame': self.posture_warnings_region_start_frame_index,
206
+ 'end_frame': self.posture_warnings_region_end_frame_index,
207
+ 'posture_warnings': self.cached_posture_warnings.copy(),
208
+ })
209
+ cpr_logger.info(f"[RUN ANALYSIS] Assigned posture warnings region data")
210
+
211
+ def _start_new_chunk(self, chunk_type="chunk"):
212
+ """Helper to initialize new chunk"""
213
+ self.chunk_start_frame_index = self.frame_counter
214
+ self.waiting_to_start_new_chunk = False
215
+ cpr_logger.info(f"[CHUNK] New {chunk_type} started at {self.frame_counter}")
216
+
217
+ def _start_new_posture_warnings_region(self):
218
+ """Helper to initialize new posture warnings region"""
219
+ self.posture_warnings_region_start_frame_index = self.frame_counter
220
+ cpr_logger.info(f"[POSTURE WARNINGS] New region started at {self.frame_counter}")
221
+
222
+ def run_analysis_video(self):
223
+ try:
224
+ cpr_logger.info("[RUN ANALYSIS] Starting analysis")
225
 
 
226
  #& Main execution loop
227
+ main_loop_start_time = time.time()
228
+ cpr_logger.info("[RUN ANALYSIS] Main loop started")
229
  while self.cap.isOpened():
230
+ #& Always advance to next frame first
231
+ ret = self.cap.grab() # Faster than read() for skipping
232
+ if not ret: break
233
+
234
+ #& Increment frame counter
235
+ self.frame_counter += 1
236
+
237
+ cpr_logger.info(f"\n[FRAME {int(self.frame_counter)}]")
238
 
239
+ #& Check if you want to skip the frame
240
+ if self.frame_counter % self.sampling_interval_frames != 0:
241
+ cpr_logger.info(f"[SKIP FRAME] Skipping frame")
242
+ continue
243
+
244
+ #& Retrieve and process frame
245
+ _, frame = self.cap.retrieve()
246
+ cpr_logger.info(f"[RUN ANALYSIS] Retrieved frame")
247
 
248
+ #& Check for termination sentinel
249
+ if frame is None:
250
+ cpr_logger.info("Camera stream ended")
251
+
252
+ # Handle any open regions before breaking
253
+ if self.prev_is_part_of_a_posture_warnings_region:
254
+ # End the posture warnings region
255
+ self.posture_warnings_region_end_frame_index = self.frame_counter
256
+ cpr_logger.info(f"[RUN ANALYSIS] End of posture warnings region detected")
257
+ self._handle_posture_warnings_region_end()
258
+
259
+ elif self.chunk_start_frame_index is not None:
260
+ # End the current chunk
261
+ self.chunk_end_frame_index = self.frame_counter
262
+ cpr_logger.info(f"[RUN ANALYSIS] End of chunk detected")
263
+ self._handle_chunk_end()
264
+ break
265
+
266
  #& Rotate frame
267
  frame = self._handle_frame_rotation(frame)
268
+ cpr_logger.info(f"[RUN ANALYSIS] Rotated frame")
269
 
270
  #& Process frame
271
+ # If there are (sustained) posture warnings, then we did not even attempt to detect the midpoint.
272
+ # If there were no (sustained) posture warnings, we attempt to detect the midpoint which might either succeed or fail.
273
+ # This is why we need two variables to indicated what happened inside the process_frame function.
274
+ posture_warnings, has_appended_midpoint = self._process_frame(frame)
275
+ cpr_logger.info(f"[RUN ANALYSIS] Processed frame")
276
+
277
+ #& Posture Warnings Region Setting Flags
278
+ # When a frame is accepted, its warnings -if any- are reset.
279
+ # So if the function did return any errors this means that the frame is not accepted and is part of an posture warnings region.
280
+ is_part_of_a_posture_warnings_region = len(posture_warnings) > 0
281
+
282
+ # Then we need to decide if the frame marks a transition between a chunk region and an posture warnings region.
283
+ is_start_of_posture_warnings_region = (not self.prev_is_part_of_a_posture_warnings_region) and is_part_of_a_posture_warnings_region
284
+ is_end_of_posture_warnings_region = self.prev_is_part_of_a_posture_warnings_region and not is_part_of_a_posture_warnings_region
285
+
286
+ # Update the cached value for the next iteration
287
+ self.prev_is_part_of_a_posture_warnings_region = is_part_of_a_posture_warnings_region
288
+ cpr_logger.info(f"[RUN ANALYSIS] Posture warnings region flags updated")
289
+
290
+ #& Chunks and Posture Warnings Regions Management
291
+ #~ Case 1: posture warnings region after a chunk
292
+ if is_start_of_posture_warnings_region:
293
+ cpr_logger.info(f"[RUN ANALYSIS] Case 1: posture warnings region after a chunk")
294
+
295
+ # Start a new posture warnings region
296
+ self._start_new_posture_warnings_region()
297
+
298
+ # End the previous chunk if it exists
299
+ if self.chunk_start_frame_index is not None:
300
+ self.chunk_end_frame_index = self.frame_counter - 1
301
+ cpr_logger.info(f"[RUN ANALYSIS] End of chunk detected")
302
+ self._handle_chunk_end()
303
+
304
+ #~ Case 2: posture warnings region after a posture warnings region
305
+ if (self.cached_posture_warnings != posture_warnings) and (is_part_of_a_posture_warnings_region) and (not is_start_of_posture_warnings_region) and (not is_end_of_posture_warnings_region):
306
+ cpr_logger.info(f"[RUN ANALYSIS] Case 2: posture warnings region after a posture warnings region")
307
+
308
+ # End the previous posture warnings region
309
+ self.posture_warnings_region_end_frame_index = self.frame_counter - 1
310
+ cpr_logger.info(f"[RUN ANALYSIS] End of posture warnings region detected")
311
+ self._handle_posture_warnings_region_end()
312
+
313
+ # Start a new posture warnings region
314
+ self._start_new_posture_warnings_region()
315
+
316
+ #~ Case 3: chunk after a posture warnings region
317
+ if is_end_of_posture_warnings_region:
318
+ cpr_logger.info(f"[RUN ANALYSIS] Case 3: chunk after a posture warnings region")
319
+
320
+ # Start a new chunk
321
+ self.waiting_to_start_new_chunk = True
322
+ cpr_logger.info(f"[RUN ANALYSIS] Waiting to start a new chunk")
323
+ new_chunk_type = "chunk"
324
+
325
+ # End the previous posture warnings region
326
+ self.posture_warnings_region_end_frame_index = self.frame_counter - 1
327
+ cpr_logger.info(f"[RUN ANALYSIS] End of posture warnings region detected")
328
+ self._handle_posture_warnings_region_end()
329
+
330
+ #~ Case 4: chunk after a chunk
331
+ if (not is_part_of_a_posture_warnings_region) and (not is_end_of_posture_warnings_region) and (self.processed_frame_counter % self.reporting_interval_frames == 0):
332
+ cpr_logger.info(f"[RUN ANALYSIS] Case 4: chunk after a chunk")
333
+
334
+ # End the previous chunk if it exists
335
+ if self.chunk_start_frame_index is not None and self.chunk_start_frame_index != self.frame_counter:
336
+ self.chunk_end_frame_index = self.frame_counter
337
+ cpr_logger.info(f"[RUN ANALYSIS] End of chunk detected")
338
+ self._handle_chunk_end()
339
+
340
+ # Start a new chunk
341
+ self.waiting_to_start_new_chunk = True
342
+ cpr_logger.info(f"[RUN ANALYSIS] Waiting to start a new chunk")
343
+
344
+ new_chunk_type = "mini chunk"
345
 
346
+ #~ Follow up on cases 3 and 4
347
+ if (self.waiting_to_start_new_chunk) and (has_appended_midpoint):
348
+ cpr_logger.info(f"[RUN ANALYSIS] Follow up on cases 3 and 4")
349
+
350
+ if (new_chunk_type == "chunk") or (new_chunk_type == "mini chunk" and (self.frame_counter != self.chunk_end_frame_index)):
351
+ self._start_new_chunk()
352
+
353
  #& Compose frame
354
+ # This function is responsible for drawing the the chest region and the midpoint.
355
  # The frame would not be displayed yet, just composed.
356
+ composed_frame = self._compose_frame(frame, is_part_of_a_posture_warnings_region)
357
+
358
+ if composed_frame is not None:
359
+ frame = composed_frame
360
+ cpr_logger.info(f"[RUN ANALYSIS] Frame composed successfully")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361
  else:
362
+ cpr_logger.info(f"[RUN ANALYSIS] Frame composition failed")
363
+
364
+ #& Initialize video writer if not done yet
365
+ if frame is not None and not self._writer_initialized:
366
+ self._initialize_video_writer(frame)
367
+ cpr_logger.info(f"[VIDEO WRITER] Initialized video writer")
368
+
369
+ #& Write frame if writer is functional
370
+ if self._writer_initialized:
371
+ # Convert frame to BGR if needed
372
+ if frame.dtype != np.uint8:
373
+ frame = frame.astype(np.uint8)
374
+ if len(frame.shape) == 2: # Grayscale
375
+ frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
376
+
377
+ try:
378
+ self.video_writer.write(frame)
379
+ except Exception as e:
380
+ cpr_logger.error(f"[WRITE ERROR] {str(e)}")
381
+ self._writer_initialized = False
382
+
383
+ #& Update the cached posture warnings
384
+ # Don't update it before handling the four cases because the old cached warnings might be needed.
385
+ self.cached_posture_warnings = posture_warnings
386
+
387
+ #& Increment processed frame counter
388
+ self.processed_frame_counter += 1 # Increment here
389
+ cpr_logger.info(f"[RUN ANALYSIS] Processed frame counter incremented")
390
+
391
  #& Check if the user wants to quit
392
+ if cv2.waitKey(1) & 0xFF == ord('q'):
393
+ cpr_logger.info("[RUN ANALYSIS] 'q' pressed, exiting loop.")
394
  break
395
 
396
  main_loop_end_time = time.time()
397
  elapsed_time = main_loop_end_time - main_loop_start_time
398
+ cpr_logger.info(f"[TIMING] Main loop elapsed time: {elapsed_time:.2f}s")
399
 
400
  except Exception as e:
401
+ cpr_logger.error(f"[ERROR] An error occurred during main execution loop: {str(e)}")
402
 
403
  finally:
404
  report_and_plot_start_time = time.time()
405
 
 
406
  self.cap.release()
407
+ self.cap = None
408
+
409
+ if self.video_writer is not None:
410
+ self.video_writer.release()
411
+ cpr_logger.info(f"[VIDEO WRITER] Released writer. File should be at: {os.path.abspath(self.video_output_path)}")
412
  cv2.destroyAllWindows()
413
+ cpr_logger.info("[RUN ANALYSIS] Released video capture and destroyed all windows")
414
 
415
  self._calculate_rate_and_depth_for_all_chunks()
416
+ cpr_logger.info("[RUN ANALYSIS] Calculated weighted averages of the metrics across all chunks")
417
+
418
+ self._plot_full_motion_curve_for_all_chunks()
419
+ cpr_logger.info("[RUN ANALYSIS] Plotted full motion curve")
420
+
421
 
422
+ self.warnings_overlayer.add_warnings_to_processed_video(self.video_output_path, self.sampling_interval_frames, self.rate_and_depth_warnings, self.posture_warnings)
423
+ cpr_logger.info("[RUN ANALYSIS] Added warnings to processed video")
424
+
425
+ try:
426
+ if os.path.exists(self.video_output_path):
427
+ os.remove(self.video_output_path)
428
+ cpr_logger.info(f"[CLEANUP] Successfully deleted video file: {self.video_output_path}")
429
+ else:
430
+ cpr_logger.warning(f"[CLEANUP] Video file not found at: {self.video_output_path}")
431
+ except Exception as e:
432
+ cpr_logger.error(f"[ERROR] Failed to delete video file: {str(e)}")
433
 
434
  report_and_plot_end_time = time.time()
435
  report_and_plot_elapsed_time = report_and_plot_end_time - report_and_plot_start_time
436
+ cpr_logger.info(f"[TIMING] Report and plot elapsed time: {report_and_plot_elapsed_time:.2f}s")
437
+ return self.graph_plotter._chunks_json_data
438
+
439
+ def _format_warnings(self):
440
+ """Combine warnings into a simple structured response"""
441
+
442
+ if self.cached_posture_warnings:
443
+ return {
444
+ "status": "warning",
445
+ "posture_warnings": self.cached_posture_warnings,
446
+ "rate_and_depth_warnings": [],
447
+ }
448
+
449
+ if (self.cached_rate_and_depth_warnings) and (self.return_rate_and_depth_warnings_interval_frames_counter > 0):
450
+ self.return_rate_and_depth_warnings_interval_frames_counter -= 1
451
+
452
+ return {
453
+ "status": "warning",
454
+ "posture_warnings": [],
455
+ "rate_and_depth_warnings": self.cached_rate_and_depth_warnings,
456
+ }
457
+
458
+ return {
459
+ "status": "ok",
460
+ "posture_warnings": [],
461
+ "rate_and_depth_warnings": [],
462
+ }
463
 
464
  def _handle_frame_rotation(self, frame):
 
465
  if frame.shape[1] > frame.shape[0]: # Width > Height
466
  frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
467
  return frame
468
 
469
  def _process_frame(self, frame):
470
+ #* Warnings for real time feedback
471
+ warnings = []
472
+
473
  #* Chunk Completion Check
474
+ has_appended_midpoint = False
 
475
 
476
  #& Pose Estimation
477
  pose_results = self.pose_estimator.detect_poses(frame)
 
479
  #~ Handle Failed Detection or Update Previous Results
480
  if not pose_results:
481
  pose_results = self.prev_pose_results
482
+ cpr_logger.info("[POSE ESTIMATION] No pose detected, using previous results (could be None)")
483
  else:
484
  self.prev_pose_results = pose_results
485
 
486
  if not pose_results:
487
+ cpr_logger.info("[POSE ESTIMATION] Insufficient data for processing")
488
+ return warnings, has_appended_midpoint
489
 
490
  #& Rescuer and Patient Classification
491
  rescuer_processed_results, patient_processed_results = self.role_classifier.classify_roles(pose_results, self.prev_rescuer_processed_results, self.prev_patient_processed_results)
 
493
  #~ Handle Failed Classifications OR Update Previous Results
494
  if not rescuer_processed_results:
495
  rescuer_processed_results = self.prev_rescuer_processed_results
496
+ cpr_logger.info("[ROLE CLASSIFICATION] No rescuer detected, using previous results (could be None)")
497
  else:
498
  self.prev_rescuer_processed_results = rescuer_processed_results
499
 
500
  if not patient_processed_results:
501
  patient_processed_results = self.prev_patient_processed_results
502
+ cpr_logger.info("[ROLE CLASSIFICATION] No patient detected, using previous results (could be None)")
503
  else:
504
  self.prev_patient_processed_results = patient_processed_results
505
 
506
  if not rescuer_processed_results or not patient_processed_results:
507
+ cpr_logger.info("[ROLE CLASSIFICATION] Insufficient data for processing")
508
+ return warnings, has_appended_midpoint
509
 
510
  #^ Set Params in Role Classifier (to draw later)
511
  self.role_classifier.rescuer_processed_results = rescuer_processed_results
512
  self.role_classifier.patient_processed_results = patient_processed_results
513
+ cpr_logger.info(f"[ROLE CLASSIFICATION] Updated role classifier with new results")
514
 
515
  #& Chest Estimation
516
  chest_params = self.chest_initializer.estimate_chest_region(patient_processed_results["keypoints"], patient_processed_results["bounding_box"], frame_width=frame.shape[1], frame_height=frame.shape[0])
 
518
  #~ Handle Failed Estimation or Update Previous Results
519
  if not chest_params:
520
  chest_params = self.prev_chest_params
521
+ cpr_logger.info("[CHEST ESTIMATION] No chest region detected, using previous results (could be None)")
522
  else:
523
  self.prev_chest_params = chest_params
524
 
525
  if not chest_params:
526
+ cpr_logger.info("[CHEST ESTIMATION] Insufficient data for processing")
527
+ return warnings, has_appended_midpoint
528
 
529
  #^ Set Params in Chest Initializer (to draw later)
530
  self.chest_initializer.chest_params = chest_params
 
541
  self.chest_initializer.expected_chest_params = expected_chest_params
542
 
543
  #& Posture Analysis
544
+ cpr_logger.info(f"[POSTURE ANALYSIS] Analyzing posture")
545
+ current_warnings = self.posture_analyzer.validate_posture(rescuer_processed_results["keypoints"], self.chest_initializer.expected_chest_params)
546
+ cpr_logger.info(f"[POSTURE ANALYSIS] Posture analysis completed")
547
+
548
+ # Update individual warning counters
549
+ for warning in self.possible_warnings:
550
+ if warning in current_warnings:
551
+ self.consecutive_frames_with_posture_errors_counters[warning] += 1
552
+ else:
553
+ self.consecutive_frames_with_posture_errors_counters[warning] = 0
554
+
555
+ # Filter warnings that meet/exceed threshold
556
+ warnings = [
557
+ warning for warning in self.possible_warnings
558
+ if self.consecutive_frames_with_posture_errors_counters[warning] >= self.error_threshold_frames
559
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
560
 
561
  #^ Set Params in Posture Analyzer (to draw later)
562
  self.posture_analyzer.warnings = warnings
563
+ cpr_logger.info(f"[POSTURE ANALYSIS] Updated posture analyzer with new results")
564
 
565
  #& Wrist Midpoint Detection
566
  midpoint = self.wrists_midpoint_analyzer.detect_wrists_midpoint(rescuer_processed_results["keypoints"])
 
568
  #~ Handle Failed Detection or Update Previous Results
569
  if not midpoint:
570
  midpoint = self.prev_midpoint
571
+ cpr_logger.info("[WRIST MIDPOINT DETECTION] No midpoint detected, using previous results (could be None)")
572
  else:
573
  self.prev_midpoint = midpoint
574
 
575
  if not midpoint:
576
+ cpr_logger.info("[WRIST MIDPOINT DETECTION] Insufficient data for processing")
577
+ return warnings, has_appended_midpoint
578
+
579
+ if len(warnings) == 0:
580
  #^ Set Params in Role Classifier (to draw later)
581
+ has_appended_midpoint = True
582
  self.wrists_midpoint_analyzer.midpoint = midpoint
583
  self.wrists_midpoint_analyzer.midpoint_history.append(midpoint)
584
+ cpr_logger.info(f"[WRIST MIDPOINT DETECTION] Updated wrist midpoint analyzer with new results")
585
 
586
  #& Shoulder Distance Calculation
587
  shoulder_distance = self.shoulders_analyzer.calculate_shoulder_distance(rescuer_processed_results["keypoints"])
588
  if shoulder_distance is not None:
589
  self.shoulders_analyzer.shoulder_distance = shoulder_distance
590
  self.shoulders_analyzer.shoulder_distance_history.append(shoulder_distance)
591
+ cpr_logger.info(f"[SHOULDER DISTANCE] Updated shoulder distance analyzer with new results")
 
 
 
 
592
 
593
+ return warnings, has_appended_midpoint
 
 
 
 
 
 
594
 
595
+ def _compose_frame(self, frame, is_part_of_a_posture_warnings_region):
 
 
596
  # Chest Region
597
  if frame is not None:
598
  frame = self.chest_initializer.draw_expected_chest_region(frame)
599
+ cpr_logger.info(f"[VISUALIZATION] Drawn chest region")
 
 
 
 
 
600
 
601
  if frame is not None:
602
+ if not is_part_of_a_posture_warnings_region:
 
603
  frame = self.wrists_midpoint_analyzer.draw_midpoint(frame)
604
+ cpr_logger.info(f"[VISUALIZATION] Drawn midpoint")
605
+
606
  return frame
607
 
608
+ def _calculate_rate_and_depth_for_chunk(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
609
  try:
610
+ result = self.metrics_calculator.handle_chunk(np.array(self.wrists_midpoint_analyzer.midpoint_history), self.chunk_start_frame_index, self.chunk_end_frame_index, self.fps, np.array(self.shoulders_analyzer.shoulder_distance_history), self.sampling_interval_frames)
611
+
612
+ if result == False:
613
+ cpr_logger.info("[ERROR] Failed to calculate metrics for the chunk")
614
+ return
 
 
 
 
 
 
 
 
 
 
 
 
615
 
616
  except Exception as e:
617
+ cpr_logger.error(f"[ERROR] Metric calculation failed: {str(e)}")
618
 
619
  def _calculate_rate_and_depth_for_all_chunks(self):
620
  try:
621
+ self.metrics_calculator.calculate_rate_and_depth_for_all_chunk()
622
+ cpr_logger.info(f"[METRICS] Weighted averages calculated")
623
  except Exception as e:
624
+ cpr_logger.error(f"[ERROR] Failed to calculate weighted averages: {str(e)}")
625
 
626
  def _plot_full_motion_curve_for_all_chunks(self):
627
  try:
628
+ self.graph_plotter.plot_motion_curve_for_all_chunks(self.metrics_calculator.chunks_y_preprocessed,
629
+ self.metrics_calculator.chunks_peaks,
630
+ self.metrics_calculator.chunks_depth,
631
+ self.metrics_calculator.chunks_rate,
632
+ self.metrics_calculator.chunks_start_and_end_indices,
633
+ self.posture_warnings,
634
+ self.sampling_interval_frames,
635
+ self.fps,
636
+ self.plot_output_path)
637
+ cpr_logger.info("[PLOT] Full motion curve plotted")
638
  except Exception as e:
639
+ cpr_logger.error(f"[ERROR] Failed to plot full motion curve: {str(e)}")
640
+
641
+ def _get_rate_and_depth_warnings(self):
642
+ rate_and_depth_warnings = self.metrics_calculator.get_rate_and_depth_warnings()
643
+ cpr_logger.info(f"[VISUALIZATION] Rate and depth warnings data: {rate_and_depth_warnings}")
644
+
645
+ return rate_and_depth_warnings
646
+
647
+ if __name__ == "__main__":
648
+ cpr_logger.info(f"[MAIN] CPR Analysis Started")
649
 
650
+ # Configuration
651
+ requested_fps = 30
652
+ input_video = r"D:\CPR_education\CPR\End to End\Code Refactor\video_2.mp4"
653
+ # Validate input file exists
654
+ if not os.path.exists(input_video):
655
+ cpr_logger.error(f"[ERROR] Input video not found at: {input_video}")
656
+ sys.exit(1)
657
 
658
+ output_dir = r"D:\CPR_education\CPR\End to End\Code Refactor\Output"
659
+
660
+ # Set output paths using original name
661
+ video_output_path = os.path.join(output_dir, f"Myoutput.mp4")
662
+ plot_output_path = os.path.join(output_dir, f"Myoutput.png")
663
+
664
+ # Log paths for verification
665
+ cpr_logger.info(f"[CONFIG] Input video: {input_video}")
666
+ cpr_logger.info(f"[CONFIG] Video output: {video_output_path}")
667
+ cpr_logger.info(f"[CONFIG] Plot output: {plot_output_path}")
668
+
669
+ # Initialize and run analyzer
670
+ initialization_start_time = time.time()
671
+ analyzer = CPRAnalyzer(input_video, video_output_path, plot_output_path, requested_fps)
672
+
673
+ # Set plot output path in the analyzer
674
+ analyzer.plot_output_path = plot_output_path
675
+
676
+ initialization_end_time = time.time()
677
+ initialization_elapsed_time = initialization_end_time - initialization_start_time
678
+ cpr_logger.info(f"[TIMING] Initialization time: {initialization_elapsed_time:.2f}s")
679
+
680
+ try:
681
+ data = analyzer.run_analysis()
682
+ print(data)
683
+ finally:
684
+ cpr_logger.info(f"[MAIN] CPR Analysis Terminated")
CPR/analysis_socket_server.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import socket
2
+ import json
3
+ from threading import Thread
4
+ from queue import Queue
5
+ import threading
6
+ from CPR.logging_config import cpr_logger
7
+ import queue
8
+
9
+ class AnalysisSocketServer:
10
+ def __init__(self, host='localhost', port=5000):
11
+ self.host = host
12
+ self.port = port
13
+ self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
14
+ self.conn = None
15
+ self.running = False
16
+ self.warning_queue = Queue()
17
+ self.connection_event = threading.Event()
18
+ cpr_logger.info(f"[SOCKET] Server initialized on {host}:{port}")
19
+
20
+ def start_server(self):
21
+ self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
22
+ self.sock.bind((self.host, self.port))
23
+ self.sock.listen()
24
+ self.running = True
25
+ Thread(target=self._accept_connections, daemon=True).start()
26
+
27
+ def _accept_connections(self):
28
+ while self.running:
29
+ try:
30
+ self.conn, addr = self.sock.accept()
31
+ cpr_logger.info(f"[SOCKET] Connected by {addr}")
32
+ self.connection_event.set() # Signal that connection was made
33
+ Thread(target=self._handle_client, args=(self.conn,), daemon=True).start()
34
+ except Exception as e:
35
+ #! Not an error
36
+ cpr_logger.error(f"[SOCKET] Connection error: {str(e)}")
37
+
38
+ def wait_for_connection(self, timeout=None):
39
+ """Block until a client connects"""
40
+ #^ Set as an error for cleaner logging purposes
41
+ cpr_logger.error("[SOCKET] Waiting for client connection...")
42
+ self.connection_event.clear() # Reset the event
43
+ return self.connection_event.wait(timeout)
44
+
45
+ def _handle_client(self, conn):
46
+ while self.running:
47
+ try:
48
+ # Block until a warning is available (reduces CPU usage)
49
+ warnings = self.warning_queue.get(block=True, timeout=0.1)
50
+ serialized = json.dumps(warnings) + "\n"
51
+ conn.sendall(serialized.encode('utf-8'))
52
+ except queue.Empty:
53
+ continue # Timeout allows checking self.running periodically
54
+ except (BrokenPipeError, ConnectionResetError):
55
+ cpr_logger.error("[SOCKET] Client disconnected")
56
+ break
57
+ except Exception as e:
58
+ cpr_logger.error(f"[SOCKET] Error: {str(e)}")
59
+ break
60
+ conn.close()
61
+
62
+ def stop_server(self):
63
+ self.running = False
64
+ self.sock.close()
65
+ cpr_logger.info("[SOCKET] Server stopped")
CPR/chest_initializer.py CHANGED
@@ -1,6 +1,7 @@
1
  import cv2
2
  import numpy as np
3
  from CPR.keypoints import CocoKeypoints
 
4
 
5
  class ChestInitializer:
6
  """Handles chest point detection with validations in estimation."""
@@ -25,10 +26,20 @@ class ChestInitializer:
25
  shoulder_center = np.array([(left_shoulder[0] + right_shoulder[0]) / 2,
26
  (left_shoulder[1] + right_shoulder[1]) / 2])
27
 
28
- # Calculate chest center by applying directional adjustment separately for x and y
29
- chest_center_from_shoulder_x = shoulder_center[0] - 0.3 * bbox_delta_y
30
- chest_center_from_shoulder_y = shoulder_center[1] - 0.1 * bbox_delta_y
31
- chest_center_from_shoulder = np.array([chest_center_from_shoulder_x, chest_center_from_shoulder_y])
 
 
 
 
 
 
 
 
 
 
32
 
33
  # Chest dimensions (85% of shoulder width, 40% height)
34
  chest_dx = bbox_delta_y * 0.8
@@ -59,7 +70,7 @@ class ChestInitializer:
59
  return (cx, cy, cw, ch)
60
 
61
  except (IndexError, TypeError, ValueError) as e:
62
- print(f"Chest estimation error: {e}")
63
  return None
64
 
65
  def estimate_chest_region_weighted_avg(self, frame_width, frame_height, window_size=60, min_samples=3):
@@ -118,7 +129,7 @@ class ChestInitializer:
118
  )
119
 
120
  except Exception as e:
121
- print(f"Chest region estimation error: {e}")
122
  return None
123
 
124
  def draw_expected_chest_region(self, frame):
 
1
  import cv2
2
  import numpy as np
3
  from CPR.keypoints import CocoKeypoints
4
+ from CPR.logging_config import cpr_logger
5
 
6
  class ChestInitializer:
7
  """Handles chest point detection with validations in estimation."""
 
26
  shoulder_center = np.array([(left_shoulder[0] + right_shoulder[0]) / 2,
27
  (left_shoulder[1] + right_shoulder[1]) / 2])
28
 
29
+ #& Handing different patient positions
30
+ # If the x-coordinate shoulder center is closer to that of the Bottom-Right bbox corner (2)
31
+ # then the orientation is "right"
32
+ # If the x-coordinate shoulder center is closer to that of the Top-Left bbox corner (1)
33
+ # then the orientation is "left"
34
+
35
+ if abs(shoulder_center[0] - bbox_x2) < abs(shoulder_center[0] - bbox_x1): # Orientation is "right"
36
+ chest_center_from_shoulder_x = shoulder_center[0] - 0.3 * bbox_delta_y
37
+ chest_center_from_shoulder_y = shoulder_center[1] - 0.1 * bbox_delta_y
38
+ chest_center_from_shoulder = np.array([chest_center_from_shoulder_x, chest_center_from_shoulder_y])
39
+ else: # Orientation is "left"
40
+ chest_center_from_shoulder_x = shoulder_center[0] + 1.0 * bbox_delta_y
41
+ chest_center_from_shoulder_y = shoulder_center[1] - 0.1 * bbox_delta_y
42
+ chest_center_from_shoulder = np.array([chest_center_from_shoulder_x, chest_center_from_shoulder_y])
43
 
44
  # Chest dimensions (85% of shoulder width, 40% height)
45
  chest_dx = bbox_delta_y * 0.8
 
70
  return (cx, cy, cw, ch)
71
 
72
  except (IndexError, TypeError, ValueError) as e:
73
+ cpr_logger.error(f"Chest estimation error: {e}")
74
  return None
75
 
76
  def estimate_chest_region_weighted_avg(self, frame_width, frame_height, window_size=60, min_samples=3):
 
129
  )
130
 
131
  except Exception as e:
132
+ cpr_logger.error(f"Chest region estimation error: {e}")
133
  return None
134
 
135
  def draw_expected_chest_region(self, frame):
CPR/client.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import socket
2
+ import json
3
+
4
+ from CPR.logging_config import cpr_logger
5
+
6
+ HOST = 'localhost' # The server's hostname or IP address
7
+ PORT = 5000 # The port used by the server
8
+
9
+ #! Not an error
10
+
11
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
12
+ s.connect((HOST, PORT))
13
+ #^ Set as an error for cleaner logging purposes
14
+ cpr_logger.error(f"Connected to {HOST}:{PORT}")
15
+
16
+ try:
17
+ while True:
18
+ data = s.recv(1024)
19
+ if not data:
20
+ break
21
+
22
+ # Split messages (in case multiple JSONs in buffer)
23
+ for line in data.decode('utf-8').split('\n'):
24
+ if line.strip():
25
+ try:
26
+ warnings = json.loads(line)
27
+ cpr_logger.error("\nReceived warnings:")
28
+ cpr_logger.error(f"Status: {warnings['status']}")
29
+ cpr_logger.error(f"Posture Warnings: {warnings['posture_warnings']}")
30
+ cpr_logger.error(f"Rate/Depth Warnings: {warnings['rate_and_depth_warnings']}")
31
+ except json.JSONDecodeError:
32
+ cpr_logger.error("Received invalid JSON")
33
+ except KeyboardInterrupt:
34
+ cpr_logger.error("Disconnecting...")
CPR/graph_plotter.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import matplotlib.pyplot as plt
3
+ import sys
4
+ import cv2
5
+ from CPR.logging_config import cpr_logger
6
+ from matplotlib.ticker import MultipleLocator
7
+ import os
8
+
9
+ class GraphPlotter:
10
+ """Class to plot graphs for various metrics"""
11
+
12
+ def __init__(self):
13
+ self.chunks_y_preprocessed = []
14
+ self.chunks_peaks = []
15
+ self.chunks_depth = []
16
+ self.chunks_rate = []
17
+ self.chunks_start_and_end_indices = []
18
+ self.posture_warnings_regions = []
19
+ self.sampling_interval_in_frames = 0
20
+ self.fps = None
21
+ self._chunks_json_data = [] # Store chunk data for JSON output
22
+
23
+ self.error_symbols = {
24
+ "Right arm bent!": ('o', '#A61D1D'), # circle
25
+ "Left arm bent!": ('s', '#A61D1D'), # square
26
+ "Left hand not on chest!": ('P', '#A61D1D'), # plus
27
+ "Right hand not on chest!": ('*', '#A61D1D'), # star
28
+ "Both hands not on chest!": ('D', '#A61D1D') # diamond
29
+ }
30
+
31
+ self.annotation_y_level = None # Will store our target y-position
32
+
33
+ def _assign_graph_data(self, chunks_y_preprocessed, chunks_peaks, chunks_depth, chunks_rate, chunks_start_and_end_indices, posture_warnings_regions, sampling_interval_in_frames, fps):
34
+ """Assign data members for the class"""
35
+ self.chunks_y_preprocessed = chunks_y_preprocessed
36
+ self.chunks_peaks = chunks_peaks
37
+ self.chunks_depth = chunks_depth
38
+ self.chunks_rate = chunks_rate
39
+ self.chunks_start_and_end_indices = chunks_start_and_end_indices
40
+ self.posture_warnings_regions = posture_warnings_regions
41
+ self.sampling_interval_in_frames = sampling_interval_in_frames
42
+ self.fps = fps # Store FPS
43
+
44
+ cpr_logger.info(f"[Graph Plotter] Data members assigned with {len(self.chunks_start_and_end_indices)} chunks and {len(self.posture_warnings_regions)} error regions for a sampling interval of {self.sampling_interval_in_frames} frames and FPS {self.fps}")
45
+
46
+ def _plot_single_chunk(self, ax, chunk, idx, prev_last_point, prev_chunk_end):
47
+ (start_frame, end_frame), depth, rate = chunk
48
+ # Convert frames to time
49
+ chunk_frames = np.arange(start_frame, end_frame + 1, self.sampling_interval_in_frames)
50
+ chunk_times = chunk_frames / self.fps # Convert to seconds
51
+ y_preprocessed = self.chunks_y_preprocessed[idx]
52
+ peaks = self.chunks_peaks[idx]
53
+
54
+ # Check if chunks are contiguous and need connection (frame-based logic)
55
+ if (prev_chunk_end is not None and
56
+ start_frame == prev_chunk_end + self.sampling_interval_in_frames and
57
+ prev_last_point is not None):
58
+
59
+ # Convert connection points to seconds
60
+ connect_start = prev_chunk_end / self.fps
61
+ connect_end = start_frame / self.fps
62
+ connect_times = [connect_start, connect_end]
63
+
64
+ cpr_logger.info(f"[Graph Plotter] Connecting chunk {idx+1} to previous chunk (time {connect_start:.2f}-{connect_end:.2f}s)")
65
+ ax.plot(connect_times, [prev_last_point['y_preprocessed'], y_preprocessed[0]],
66
+ color="#2F5597", linewidth=2.5)
67
+
68
+ # Plot current chunk data
69
+ cpr_logger.info(f"[Graph Plotter] Plotting chunk {idx+1} (time {chunk_times[0]:.2f}-{chunk_times[-1]:.2f}s)")
70
+ smooth_label = "Motion" if idx == 0 else ""
71
+ peaks_label = "Peaks" if idx == 0 else ""
72
+
73
+ # Updated motion plot
74
+ ax.plot(chunk_times, y_preprocessed,
75
+ color="#2F5597", linewidth=2.5,
76
+ marker='o', markersize=4,
77
+ markerfacecolor='#2F5597', markeredgecolor='#2F5597',
78
+ label=smooth_label)
79
+
80
+ # Updated peaks
81
+ if peaks.size > 0:
82
+ ax.plot(chunk_times[peaks], y_preprocessed[peaks],
83
+ "x", color="#ED7D31", markersize=8,
84
+ label=peaks_label)
85
+
86
+ # Annotate chunk metrics (time-based)
87
+ if (depth is not None and rate is not None) and (depth > 0 and rate > 0):
88
+ mid_time = (start_frame + end_frame) / (2 * self.fps)
89
+ cpr_logger.info(f"[Graph Plotter] Chunk {idx+1} metrics: {depth:.1f}cm depth, {rate:.1f}cpm rate")
90
+
91
+ # Calculate or use stored annotation y-level
92
+ if self.annotation_y_level is None:
93
+ # For first chunk, calculate midpoint between min and max of y_preprocessed
94
+ y_range = np.max(y_preprocessed) - np.min(y_preprocessed)
95
+ self.annotation_y_level = np.min(y_preprocessed) + y_range * 0.5 # 70% up from bottom
96
+ cpr_logger.info(f"[Graph Plotter] Setting annotation y-level to {self.annotation_y_level:.2f}")
97
+
98
+ # Updated annotation box using consistent y-level
99
+ ax.annotate(f"Depth: {depth:.1f}cm\nRate: {rate:.1f}cpm",
100
+ xy=(mid_time, self.annotation_y_level),
101
+ xytext=(0, 10), textcoords='offset points',
102
+ ha='center', va='bottom', fontsize=9,
103
+ bbox=dict(boxstyle='round,pad=0.5',
104
+ fc='#F2F2F2', ec='#595959', alpha=0.8))
105
+
106
+ return {'y_preprocessed': y_preprocessed[-1]}, end_frame
107
+
108
+ def _plot_error_regions(self, ax, computed_error_regions):
109
+ """Visualize error regions with adaptive symbol sizing"""
110
+ cpr_logger.info("[Graph Plotter] Rendering error regions:")
111
+
112
+ # Size parameters
113
+ target_width_ratio = 0.7 # Max 80% of region width
114
+ legend_size = 80 # Fixed legend symbol size (points²)
115
+
116
+ legend_handles = []
117
+ y_mid = np.mean(ax.get_ylim())
118
+
119
+ # Get figure dimensions for size conversion
120
+ fig = ax.figure
121
+ fig_width_points = fig.get_figwidth() * fig.dpi
122
+ x_min, x_max = ax.get_xlim()
123
+ data_range = x_max - x_min
124
+ points_per_second = fig_width_points / data_range
125
+
126
+ for idx, (start_sec, end_sec) in enumerate(computed_error_regions):
127
+ region_width = end_sec - start_sec
128
+ region_data = self.posture_warnings_regions[idx]
129
+ warnings = region_data.get('posture_warnings', [])
130
+
131
+ # Calculate max allowed width in data units (seconds)
132
+ max_data_width = region_width * target_width_ratio
133
+
134
+ # Convert legend size to data units
135
+ legend_data_width = (np.sqrt(legend_size) / points_per_second)
136
+
137
+ # Determine final symbol width (data units)
138
+ symbol_data_width = min(legend_data_width, max_data_width)
139
+
140
+ # Convert back to points² for matplotlib
141
+ symbol_point_width = symbol_data_width * points_per_second
142
+ symbol_size = symbol_point_width ** 2
143
+
144
+ for error in warnings:
145
+ if error in self.error_symbols:
146
+ marker, color = self.error_symbols[error]
147
+
148
+ ax.scatter(
149
+ x=(start_sec + end_sec)/2,
150
+ y=y_mid,
151
+ s=symbol_size,
152
+ marker=marker,
153
+ color=color,
154
+ alpha=0.7,
155
+ edgecolors='black',
156
+ linewidths=0.5,
157
+ zorder=5
158
+ )
159
+
160
+ # Create legend entry once
161
+ if not any(error == h.get_label() for h in legend_handles):
162
+ legend_handles.append(
163
+ ax.scatter([], [],
164
+ s=legend_size,
165
+ marker=marker,
166
+ color=color,
167
+ edgecolors='black',
168
+ linewidths=0.5,
169
+ alpha=0.7,
170
+ label=error)
171
+ )
172
+
173
+ # Updated error region fill
174
+ ax.axvspan(start_sec, end_sec,
175
+ color='#FCE4D6', alpha=0.3, zorder=1)
176
+
177
+ if not ax.get_xlabel():
178
+ ax.set_xlabel("Time (seconds)", fontsize=10)
179
+ if not ax.get_ylabel():
180
+ ax.set_ylabel("Signal Value", fontsize=10)
181
+
182
+ return legend_handles
183
+
184
+ def plot_motion_curve_for_all_chunks(self, chunks_y_preprocessed, chunks_peaks, chunks_depth, chunks_rate, chunks_start_and_end_indices, posture_warnings_regions, sampling_interval_in_frames, fps, plot_output_path):
185
+ """Plot combined analysis with connected chunks and proper error regions"""
186
+
187
+ self._assign_graph_data(chunks_y_preprocessed, chunks_peaks, chunks_depth, chunks_rate, chunks_start_and_end_indices, posture_warnings_regions, sampling_interval_in_frames, fps)
188
+ cpr_logger.info("[Graph Plotter] Starting to plot motion curve for all chunks")
189
+
190
+ # Create figure even if there's only error regions to plot
191
+ plt.figure(figsize=(16, 8))
192
+ ax = plt.gca()
193
+ ax.xaxis.set_major_locator(MultipleLocator(5))
194
+
195
+ # Plot CPR chunks if they exist
196
+ if self.chunks_start_and_end_indices:
197
+ sorted_chunks = sorted(zip(self.chunks_start_and_end_indices,
198
+ self.chunks_depth,
199
+ self.chunks_rate),
200
+ key=lambda x: x[0][0])
201
+ cpr_logger.info(f"[Graph Plotter] Processing {len(sorted_chunks)} CPR chunks")
202
+
203
+ prev_last_point = None
204
+ prev_chunk_end = None
205
+
206
+ for idx, chunk in enumerate(sorted_chunks):
207
+ cpr_logger.info(f"[Graph Plotter] Rendering chunk {idx+1}/{len(sorted_chunks)}")
208
+ prev_last_point, prev_chunk_end = self._plot_single_chunk(ax, chunk, idx, prev_last_point, prev_chunk_end)
209
+
210
+ self._print_analysis_details(sorted_chunks)
211
+ else:
212
+ cpr_logger.info("[Graph Plotter] No chunk data available for plotting")
213
+ # Set reasonable default axis if only plotting errors
214
+ ax.set_ylim(0, 100) # Example default Y-axis range for position
215
+
216
+ # Always plot error regions if they exist
217
+ computed_error_regions = [(er['start_frame']/self.fps, er['end_frame']/self.fps)
218
+ for er in self.posture_warnings_regions]
219
+
220
+ # In the "Configure remaining elements" section (replace existing legend code):
221
+ handles, labels = ax.get_legend_handles_labels()
222
+
223
+ # Collect error handles from _plot_error_regions (modified to return them)
224
+ error_legend_handles = self._plot_error_regions(ax, computed_error_regions)
225
+
226
+ # Merge both sets of handles/labels
227
+ if error_legend_handles:
228
+ handles += error_legend_handles
229
+ labels += [h.get_label() for h in error_legend_handles]
230
+
231
+ # Remove duplicate labels
232
+ unique = [(h, l) for i, (h, l) in enumerate(zip(handles, labels)) if l not in labels[:i]]
233
+
234
+ # Create single horizontal legend at bottom
235
+ if unique:
236
+ ax.legend(
237
+ *zip(*unique),
238
+ loc='upper center',
239
+ bbox_to_anchor=(0.5, -0.08),
240
+ ncol=len(unique),
241
+ fontsize=8,
242
+ handletextpad=0.3,
243
+ columnspacing=1.5,
244
+ framealpha=0.9,
245
+ borderpad=0.7
246
+ )
247
+ plt.tight_layout(rect=[0, 0.025, 1, 1])
248
+
249
+ plt.xlabel("Time (seconds)")
250
+ plt.ylabel("Vertical Position (px)")
251
+ plt.title("Complete CPR Analysis with Metrics", pad=20) # Added pad parameter
252
+
253
+ plt.grid(True)
254
+ cpr_logger.info(f"\n[Graph Plotter] Finalizing plot layout")
255
+
256
+ # Adjust tight_layout with additional padding
257
+ plt.tight_layout(rect=[0, 0.025, 1, 0.95]) # Reduced top from 1 to 0.95 to make space
258
+
259
+ if plot_output_path:
260
+ # Ensure directory exists
261
+ os.makedirs(os.path.dirname(plot_output_path), exist_ok=True)
262
+ plt.savefig(plot_output_path, dpi=300, bbox_inches='tight')
263
+ cpr_logger.info(f"[Graph Plotter] Plot saved to {plot_output_path}")
264
+
265
+ #plt.show()
266
+ cpr_logger.info("[Graph Plotter] Plot display complete")
267
+
268
+ def _print_analysis_details(self, sorted_chunks):
269
+ """Combined helper for printing chunks and error regions in seconds"""
270
+ cpr_logger.info(f"\n\n=== CPR Chunk Analysis ===")
271
+ display_idx = 0 # Separate counter for displayed indices
272
+
273
+ # Convert frame numbers to seconds using video FPS
274
+ fps = self.fps # Get FPS from class instance
275
+
276
+ for ((start_frame, end_frame), depth, rate) in sorted_chunks:
277
+ # Skip chunks with both values at 0
278
+ if depth == 0 and rate == 0:
279
+ continue
280
+
281
+ # Convert frames to seconds
282
+ start_sec = start_frame / fps
283
+ end_sec = end_frame / fps
284
+ duration_sec = (end_frame - start_frame + 1) / fps # +1 to include both endpoints
285
+
286
+ cpr_logger.info(f"[Graph Plotter] Chunk {display_idx+1}: "
287
+ f"Time {start_sec:.2f}s - {end_sec:.2f}s ({duration_sec:.2f}s), "
288
+ f"Depth: {depth:.1f}cm, Rate: {rate:.1f}cpm")
289
+
290
+ #! Formatted json to mobile
291
+
292
+ chunk_data = {
293
+ "start": round(start_sec, 2),
294
+ "end": round(end_sec, 2),
295
+ "depth": round(depth, 1),
296
+ "rate": round(rate, 1)
297
+ }
298
+ self._chunks_json_data.append(chunk_data)
299
+
300
+ display_idx += 1
301
+
302
+ cpr_logger.info(f"\n\n=== Error Region Analysis ===")
303
+
304
+ for i, region in enumerate(self.posture_warnings_regions): # Updated to match actual attribute name
305
+ start_frame = region['start_frame']
306
+ end_frame = region['end_frame']
307
+ errors = region['posture_warnings']
308
+
309
+ # Convert to seconds
310
+ start_sec = start_frame / fps
311
+ end_sec = end_frame / fps
312
+ error_str = ", ".join(errors) if errors else "No errors detected"
313
+
314
+ cpr_logger.info(f"[Graph Plotter] Region {i+1}: "
315
+ f"Time {start_sec:.2f}s - {end_sec:.2f}s - {error_str}")
316
+
317
+ cpr_logger.info(f"\n\n")
CPR/logging_config.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # logging_config.py
2
+ import logging
3
+
4
+ # 1. Set default log level here (change this value as needed)
5
+ DEFAULT_LOG_LEVEL = logging.INFO # Switch to logging.ERROR for errors-only by default
6
+
7
+ # 2. Configure logger with default level
8
+ cpr_logger = logging.getLogger("CPR-Analyzer")
9
+ cpr_logger.setLevel(DEFAULT_LOG_LEVEL)
10
+
11
+ # 3. Create console handler with formatter
12
+ console_handler = logging.StreamHandler()
13
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
14
+ console_handler.setFormatter(formatter)
15
+
16
+ # 4. Add handler to logger
17
+ cpr_logger.addHandler(console_handler)
18
+
19
+ # 5. Prevent propagation to root logger
20
+ cpr_logger.propagate = False
CPR/metrics_calculator.py CHANGED
@@ -4,230 +4,320 @@ from scipy.signal import savgol_filter, find_peaks
4
  import matplotlib.pyplot as plt
5
  import sys
6
  import cv2
7
- import cloudinary as cld
8
- import cloudinary.uploader
9
- import tempfile
10
- import json
11
  import os
 
12
 
13
  class MetricsCalculator:
14
  """Rate and depth calculation from motion data with improved peak detection"""
15
 
16
- def __init__(self, frame_count, shoulder_width_cm):
 
17
  self.shoulder_width_cm = shoulder_width_cm
 
 
 
 
 
 
 
 
18
  self.peaks = np.array([])
19
  self.peaks_max = np.array([])
20
  self.peaks_min = np.array([])
21
- self.y_smoothed = np.array([])
22
  self.cm_px_ratio = None
23
- self.midpoints_list = np.array([])
24
- self.shoulder_distances = []
25
 
26
- # Parameters for the final report
 
 
 
 
 
 
 
 
 
27
  self.chunks_depth = []
28
  self.chunks_rate = []
29
- self.chunks_start_and_end_indices = []
30
 
31
- self.chunks_midpoints = []
32
- self.chunks_smoothed = []
33
- self.chunks_peaks = []
34
 
35
- self.frame_count = frame_count
36
 
37
- # Validation thresholds
38
- self.depth = None
39
- self.rate = None
40
 
 
41
  self.min_depth_threshold = 3.0 # cm
42
  self.max_depth_threshold = 6.0 # cm
43
 
44
  self.min_rate_threshold = 100.0 # cpm
45
  self.max_rate_threshold = 120.0 # cpm
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
- def smooth_midpoints(self, midpoints):
49
- """Apply Savitzky-Golay filter to smooth motion data"""
50
- self.midpoints_list = np.array(midpoints)
 
 
 
 
 
 
 
51
 
52
- if len(self.midpoints_list) > 5: # Ensure enough data points
53
  try:
54
- self.y_smoothed = savgol_filter(
55
- self.midpoints_list[:, 1],
56
- window_length=10,
57
  polyorder=2,
58
  mode='nearest'
59
  )
60
- return True
61
  except Exception as e:
62
- print(f"Smoothing error: {e}")
63
- self.y_smoothed = self.midpoints_list[:, 1] # Fallback to original
64
- return False
65
  else:
66
- self.y_smoothed = self.midpoints_list[:, 1] # Not enough points
67
- return False
 
 
 
 
68
 
69
- def detect_peaks(self):
70
- """Improved peak detection with adjusted prominence for min peaks"""
71
- if self.y_smoothed.size == 0:
72
- print("No smoothed values found for peak detection")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  return False
74
 
75
  try:
76
- distance = min(10, len(self.y_smoothed)) # Dynamic distance based on data length
77
 
78
  # Detect max peaks with default prominence
79
- self.peaks_max, _ = find_peaks(self.y_smoothed, distance=distance)
80
 
81
  # Detect min peaks with reduced or no prominence requirement
82
  self.peaks_min, _ = find_peaks(
83
- -self.y_smoothed,
84
  distance=distance,
85
- prominence=(0.3, None) # Adjust based on your data's characteristics
86
  )
87
 
88
  self.peaks = np.sort(np.concatenate((self.peaks_max, self.peaks_min)))
89
 
90
  return len(self.peaks) > 0
91
  except Exception as e:
92
- print(f"Peak detection error: {e}")
93
  return False
94
 
95
- def _validate_chunk(self, chunk_start_frame_index, chunk_end_frame_index):
96
- """Validate that the data length matches the expected frame range.
97
- Terminates the program with error code 1 if validation fails.
98
-
 
 
 
99
  Args:
100
- chunk_start_frame_index: Start frame index of the chunk
101
- chunk_end_frame_index: End frame index of the chunk
102
-
103
- Exits:
104
- If validation fails, prints error message and exits with code 1
105
  """
106
- try:
107
- # Calculate expected number of frames
108
- num_frames = chunk_end_frame_index - chunk_start_frame_index + 1
109
-
110
- # Validate midpoints data
111
- if len(self.midpoints_list[:, 1]) != num_frames:
112
- print(f"\nERROR: Data length mismatch in midpoints_list")
113
- print(f"Expected: {num_frames} frames ({chunk_start_frame_index}-{chunk_end_frame_index})")
114
- print(f"Actual: {len(self.midpoints_list[:, 1])} frames")
115
- sys.exit(1)
116
-
117
- # Validate smoothed data
118
- if len(self.y_smoothed) != num_frames:
119
- print(f"\nERROR: Data length mismatch in y_smoothed")
120
- print(f"Expected: {num_frames} frames ({chunk_start_frame_index}-{chunk_end_frame_index})")
121
- print(f"Actual: {len(self.y_smoothed)} frames")
122
- sys.exit(1)
123
-
124
- except Exception as e:
125
- print(f"\nCRITICAL VALIDATION ERROR: {str(e)}")
126
- sys.exit(1)
127
 
128
- def calculate_metrics(self, shoulder_distances, fps, chunk_start_frame_index, chunk_end_frame_index):
129
- """Calculate compression metrics with improved calculations"""
 
 
 
 
130
 
131
- self._validate_chunk(chunk_start_frame_index, chunk_end_frame_index)
132
-
133
- self.shoulder_distances = shoulder_distances
 
 
 
 
134
 
 
 
 
 
135
  try:
136
- # Calculate pixel to cm ratio
137
- if len(self.shoulder_distances) > 0:
138
- avg_dist = np.mean(self.shoulder_distances)
139
- self.cm_px_ratio = self.shoulder_width_cm / avg_dist
140
- else:
141
- print("No shoulder distances available")
142
- return None, None
143
-
144
- # Depth calculation using all peaks
145
  depth = None
146
  if len(self.peaks) > 1:
147
- depth = np.mean(np.abs(np.diff(self.y_smoothed[self.peaks]))) * self.cm_px_ratio
148
 
149
- # Rate calculation using only compression peaks (peaks_max)
150
  rate = None
151
  if len(self.peaks_max) > 1:
152
- rate = 1 / (np.mean(np.diff(self.peaks_max)) / fps) * 60 # Convert to CPM
153
-
 
 
 
154
  if depth is None or rate is None:
155
  depth = 0
156
  rate = 0
157
- self.peaks = np.array([]) # Reset peaks if no valid data
158
 
159
- # Store the results of this chunk for the final report if they are not None
160
  self.depth = depth
161
  self.rate = rate
 
 
 
 
 
 
162
 
163
- self.chunks_depth.append(depth)
164
- self.chunks_rate.append(rate)
165
- self.chunks_start_and_end_indices.append((chunk_start_frame_index, chunk_end_frame_index))
166
-
167
- self.chunks_midpoints.append(self.midpoints_list.copy())
168
- self.chunks_smoothed.append(self.y_smoothed.copy())
169
- self.chunks_peaks.append(self.peaks.copy())
170
-
171
 
172
- return depth, rate
173
-
174
- except Exception as e:
175
- print(f"Metric calculation error: {e}")
176
- return None, None
177
-
178
- def plot_motion_curve(self, chunk_start_frame_index, chunk_end_frame_index):
179
- """Enhanced visualization with original and smoothed data"""
180
- if self.midpoints_list.size == 0:
181
- print("No midpoint data to plot")
182
- return
183
-
184
- self._validate_chunk(chunk_start_frame_index, chunk_end_frame_index)
185
 
186
- # Create frame index array for x-axis
187
- frame_indices = np.arange(chunk_start_frame_index, chunk_end_frame_index + 1)
188
 
 
 
189
 
190
- plt.figure(figsize=(12, 6))
191
 
192
- # Plot original and smoothed data with correct frame indices
193
- plt.plot(frame_indices, self.midpoints_list[:, 1],
194
- label="Original Motion",
195
- color="red",
196
- linestyle="dashed",
197
- alpha=0.6)
198
-
199
- plt.plot(frame_indices, self.y_smoothed,
200
- label="Smoothed Motion",
201
- color="blue",
202
- linewidth=2)
203
-
204
- # Plot peaks if detected
205
- if self.peaks.size > 0:
206
- plt.plot(frame_indices[self.peaks],
207
- self.y_smoothed[self.peaks],
208
- "x",
209
- color="green",
210
- markersize=10,
211
- label="Peaks")
212
- else:
213
- print("No peaks to plot")
214
-
215
- plt.xlabel("Frame Number")
216
- plt.ylabel("Vertical Position (px)")
217
- plt.title("Compression Motion Analysis")
218
- plt.grid(True)
219
- plt.legend()
220
- plt.show()
221
-
222
- def calculate_weighted_averages(self):
223
- """Calculate weighted averages based on chunk durations
224
  """
 
 
 
 
 
 
 
225
  if not self.chunks_depth or not self.chunks_rate or not self.chunks_start_and_end_indices:
226
- print("[WARNING] No chunk data available for averaging")
227
  return None
228
 
229
  if not (len(self.chunks_depth) == len(self.chunks_rate) == len(self.chunks_start_and_end_indices)):
230
- print("[ERROR] Mismatched chunk data lists")
231
  return None
232
 
233
  total_weight = 0
@@ -246,306 +336,134 @@ class MetricsCalculator:
246
  total_weight += chunk_duration
247
 
248
  if total_weight == 0:
249
- print("[ERROR] Total chunk durations is zero")
250
- return None
251
 
252
- weighted_depth = weighted_depth_sum / total_weight
253
- weighted_rate = weighted_rate_sum / total_weight
254
-
255
- print(f"[RESULTS] Weighted average depth: {weighted_depth:.1f} cm")
256
- print(f"[RESULTS] Weighted average rate: {weighted_rate:.1f} cpm")
257
-
258
- return weighted_depth, weighted_rate
259
-
260
- def plot_motion_curve_for_all_chunks(self, posture_errors_for_all_error_region):
261
- """Plot combined analysis with metrics annotations and posture error labels"""
262
- if not self.chunks_start_and_end_indices:
263
- print("No chunk data available for plotting")
264
- return
265
-
266
- # Print chunk information before plotting
267
- print("\n=== Chunk Ranges ===")
268
- for i, (start_end, depth, rate) in enumerate(zip(self.chunks_start_and_end_indices,
269
- self.chunks_depth,
270
- self.chunks_rate)):
271
- print(f"Chunk {i+1}: Frames {start_end[0]}-{start_end[1]} | "
272
- f"Depth: {depth:.1f}cm | Rate: {rate:.1f}cpm")
273
-
274
- plt.figure(figsize=(16, 8))
275
- ax = plt.gca()
276
 
277
- # Sort chunks chronologically
278
- sorted_chunks = sorted(zip(self.chunks_start_and_end_indices,
279
- self.chunks_depth,
280
- self.chunks_rate),
281
- key=lambda x: x[0][0])
 
 
 
 
 
282
 
283
- # 1. Plot all valid chunks with metrics
284
- prev_chunk_end = None # Track previous chunk's end position
285
-
286
- for idx, ((start, end), depth, rate) in enumerate(sorted_chunks):
287
- chunk_frames = np.arange(start, end + 1)
288
- midpoints = self.chunks_midpoints[idx]
289
- smoothed = self.chunks_smoothed[idx]
290
- peaks = self.chunks_peaks[idx]
291
-
292
- # Add separator line between chunks
293
- if prev_chunk_end is not None:
294
- separator_x = prev_chunk_end + 0.5 # Midpoint between chunks
295
- ax.axvline(x=separator_x, color='orange', linestyle=':', linewidth=1.5)
296
 
297
- # Plot data
298
- ax.plot(chunk_frames, midpoints[:, 1],
299
- color="red", linestyle="dashed", alpha=0.6,
300
- label="Original Motion" if idx == 0 else "")
301
- ax.plot(chunk_frames, smoothed,
302
- color="blue", linewidth=2,
303
- label="Smoothed Motion" if idx == 0 else "")
304
 
305
- # Plot peaks
306
- if peaks.size > 0:
307
- ax.plot(chunk_frames[peaks], smoothed[peaks],
308
- "x", color="green", markersize=8,
309
- label="Peaks" if idx == 0 else "")
310
-
311
- # Annotate chunk metrics
312
- mid_frame = (start + end) // 2
313
- ax.annotate(f"Depth: {depth:.1f}cm\nRate: {rate:.1f}cpm",
314
- xy=(mid_frame, np.max(smoothed)),
315
- xytext=(0, 10), textcoords='offset points',
316
- ha='center', va='bottom', fontsize=9,
317
- bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5))
318
-
319
- # Update previous chunk end tracker
320
- prev_chunk_end = end
321
- # 2. Identify and label posture error regions
322
- error_regions = []
323
-
324
- # Before first chunk
325
- if sorted_chunks[0][0][0] > 0:
326
- error_regions.append((0, sorted_chunks[0][0][0]-1))
327
-
328
- # Between chunks
329
- for i in range(1, len(sorted_chunks)):
330
- prev_end = sorted_chunks[i-1][0][1]
331
- curr_start = sorted_chunks[i][0][0]
332
- if curr_start - prev_end > 1:
333
- error_regions.append((prev_end + 1, curr_start - 1))
334
-
335
- # After last chunk
336
- last_end = sorted_chunks[-1][0][1]
337
- if last_end < self.frame_count - 1:
338
- error_regions.append((last_end + 1, self.frame_count - 1))
339
-
340
- # Print error regions information
341
- print("\n=== Error Regions ===")
342
- for i, (start, end) in enumerate(error_regions):
343
- # Get errors for this region if available
344
- try:
345
- errors = posture_errors_for_all_error_region[i]
346
- error_str = ", ".join(errors) if errors else "No errors detected"
347
- except IndexError:
348
- error_str = "No error data"
349
-
350
- print(f"Error Region {i+1}: Frames {start}-{end} | Errors: {error_str}")
351
 
352
- # Shade and label error regions
353
- for error_region_index, region in enumerate (error_regions):
354
- ax.axvspan(region[0], region[1],
355
- color='gray', alpha=0.2,
356
- label='Posture Errors' if region == error_regions[0] else "")
357
-
358
- # Add vertical dotted lines at boundaries
359
- ax.axvline(x=region[0], color='black', linestyle=':', alpha=0.5)
360
- ax.axvline(x=region[1], color='black', linestyle=':', alpha=0.5)
361
-
362
- # Add frame number labels - properly aligned
363
- y_pos = ax.get_ylim()[0] + 0.02 * (ax.get_ylim()[1] - ax.get_ylim()[0])
364
-
365
- # Start frame label - right aligned before the line
366
- ax.text(region[0] - 1, y_pos,
367
- f"Frame {region[0]}",
368
- rotation=90, va='bottom', ha='right',
369
- fontsize=8, alpha=0.7)
370
-
371
- # End frame label - left aligned after the line
372
- ax.text(region[1] + 1, y_pos,
373
- f"Frame {region[1]}",
374
- rotation=90, va='bottom', ha='left',
375
- fontsize=8, alpha=0.7)
376
-
377
- # Add error labels if available
378
- if posture_errors_for_all_error_region:
379
- try:
380
- # Get errors for this specific error region
381
- region_errors = posture_errors_for_all_error_region[error_region_index]
382
-
383
- # Format errors text
384
- error_text = "Errors:\n" + "\n".join(region_errors) if region_errors else ""
385
-
386
- # Position text in middle of the error region
387
- mid_frame = (region[0] + region[1]) // 2
388
- ax.text(mid_frame, np.mean(ax.get_ylim()),
389
- error_text,
390
- ha='center', va='center',
391
- fontsize=9, color='red', alpha=0.8,
392
- bbox=dict(boxstyle='round,pad=0.3',
393
- fc='white', ec='red', alpha=0.7))
394
- except IndexError:
395
- print(f"No error data for region {error_region_index}")
396
-
397
- # 3. Add weighted averages
398
- if hasattr(self, 'weighted_depth') and hasattr(self, 'weighted_rate'):
399
- ax.annotate(f"Weighted Averages:\nDepth: {self.weighted_depth:.1f}cm\nRate: {self.weighted_rate:.1f}cpm",
400
- xy=(0.98, 0.98), xycoords='axes fraction',
401
- ha='right', va='top', fontsize=10,
402
- bbox=dict(boxstyle='round,pad=0.5', fc='white', ec='black'))
403
-
404
- # 4. Configure legend and layout
405
- handles, labels = ax.get_legend_handles_labels()
406
- unique = [(h, l) for i, (h, l) in enumerate(zip(handles, labels)) if l not in labels[:i]]
407
- ax.legend(*zip(*unique), loc='upper right')
408
-
409
- plt.xlabel("Frame Number")
410
- plt.ylabel("Vertical Position (px)")
411
- plt.title("Complete CPR Analysis with Metrics")
412
- plt.grid(True)
413
- plt.tight_layout()
414
-
415
- os.makedirs("plots", exist_ok=True)
416
- plot_path = "plots/cpr_analysis.png"
417
- plt.savefig("plots/cpr_analysis.png", dpi=300)
418
- response = cloudinary.uploader.upload(plot_path, resource_type="image")
419
- plt.close()
420
- plt.show()
421
- return response['secure_url']
422
-
423
- def validate_calculate_metrics(self):
424
- """Validate the calculated metrics against thresholds"""
425
- if self.depth is None or self.rate is None:
426
- print("[ERROR] Depth and rate must be calculated before validation")
427
- return False
428
 
429
- depth_valid = self.min_depth_threshold <= self.depth <= self.max_depth_threshold
430
- rate_valid = self.min_rate_threshold <= self.rate <= self.max_rate_threshold
431
 
432
- if not depth_valid:
433
- print(f"[WARNING] Depth {self.depth:.1f}cm is out of range ({self.min_depth_threshold}-{self.max_depth_threshold})")
434
- if not rate_valid:
435
- print(f"[WARNING] Rate {self.rate:.1f}cpm is out of range ({self.min_rate_threshold}-{self.max_rate_threshold})")
 
436
 
437
- return depth_valid and rate_valid
438
-
439
- def get_json_chunk_data(self):
440
- """Get chunk data in JSON format for external use"""
441
- if not self.chunks_start_and_end_indices:
442
- print("No chunk data available")
443
- return None
444
 
445
- chunk_data = [
446
- {
447
- "start": round(start / 30, 2),
448
- "end": round(end / 30, 2),
449
- "depth": depth,
450
- "rate": rate
451
- }
452
- for (start, end), depth, rate in zip(
453
- self.chunks_start_and_end_indices,
454
- self.chunks_depth,
455
- self.chunks_rate
456
- )
457
- if (end - start) >= 60
458
- ]
459
 
460
- return chunk_data
461
-
462
-
463
- def annotate_video_with_chunks(self, input_video_path, posture_errors_for_all_error_region):
464
- cap = cv2.VideoCapture(input_video_path)
465
- fps = cap.get(cv2.CAP_PROP_FPS)
466
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
467
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
468
-
469
- # Validate video opened
470
- if not cap.isOpened() or fps == 0 or width == 0 or height == 0:
471
- raise ValueError("Failed to open input video or invalid video properties.")
472
-
473
- # Create named temp file for cloudinary upload (must not be open during writing on Windows)
474
- temp_video = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False)
475
- temp_video_path = temp_video.name
476
- temp_video.close()
477
-
478
- # ALSO save a local version for inspection
479
- #os.makedirs("temp_output", exist_ok=True)
480
- #local_output_path = os.path.join("temp_output", "annotated_output.mp4")
481
-
482
- out = cv2.VideoWriter(temp_video_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
483
- #local_out = cv2.VideoWriter(local_output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
484
-
485
- frame_idx = 0
486
- current_chunk = 0
487
-
488
- # Generate error regions
489
- error_regions = []
490
- # Before first chunk
491
- if self.chunks_start_and_end_indices[0][0] > 0:
492
- error_regions.append((0, self.chunks_start_and_end_indices[0][0] - 1))
493
-
494
- # Between chunks
495
- for i in range(1, len(self.chunks_start_and_end_indices)):
496
- prev_end = self.chunks_start_and_end_indices[i - 1][1]
497
- curr_start = self.chunks_start_and_end_indices[i][0]
498
- if curr_start - prev_end > 1:
499
- error_regions.append((prev_end + 1, curr_start - 1))
500
-
501
- # After last chunk
502
- last_end = self.chunks_start_and_end_indices[-1][1]
503
- if last_end < self.frame_count - 1:
504
- error_regions.append((last_end + 1, self.frame_count - 1))
505
-
506
- # Iterate over the video frames and annotate
507
- while cap.isOpened():
508
- ret, frame = cap.read()
509
- if not ret:
510
- break
511
-
512
- # Handle chunk annotation
513
- if current_chunk < len(self.chunks_start_and_end_indices):
514
- start_idx, end_idx = self.chunks_start_and_end_indices[current_chunk]
515
- if start_idx <= frame_idx <= end_idx:
516
- rate = self.chunks_rate[current_chunk]
517
- depth = self.chunks_depth[current_chunk]
518
-
519
- text1 = f"Rate: {rate:.1f}cpm"
520
- text2 = f"Depth: {depth:.1f}cm"
521
- cv2.putText(frame, text1, (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
522
- cv2.putText(frame, text2, (20, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
523
-
524
- if frame_idx > end_idx:
525
- current_chunk += 1
526
-
527
- # Annotate error regions
528
- for i, (start, end) in enumerate(error_regions):
529
- if start <= frame_idx <= end:
530
- region_errors = posture_errors_for_all_error_region[i]
531
-
532
- # Format errors text
533
- error_text = "Errors: ".join(region_errors) if region_errors else ""
534
- cv2.putText(frame, error_text, (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
535
-
536
- out.write(frame)
537
- #local_out.write(frame)
538
- frame_idx += 1
539
-
540
- cap.release()
541
- out.release()
542
- #local_out.release()
543
-
544
- # Upload to Cloudinary
545
- response = cloudinary.uploader.upload(temp_video_path, resource_type="video")
546
-
547
- # Clean up temp file
548
- os.remove(temp_video_path)
549
-
550
- #print(f"✅ Local saved video at: {local_output_path}")
551
- return response['secure_url']
 
4
  import matplotlib.pyplot as plt
5
  import sys
6
  import cv2
 
 
 
 
7
  import os
8
+ from CPR.logging_config import cpr_logger
9
 
10
  class MetricsCalculator:
11
  """Rate and depth calculation from motion data with improved peak detection"""
12
 
13
+ def __init__(self, shoulder_width_cm):
14
+ # Configuration parameters
15
  self.shoulder_width_cm = shoulder_width_cm
16
+
17
+ # Parameters for cleaning the smoothed midpoints
18
+ self.removing_impulse_noise_window_size = 5
19
+ self.removing_impulse_noise_threshold = 3.0
20
+
21
+ # Parameters for one chunk
22
+ self.y_preprocessed = np.array([])
23
+
24
  self.peaks = np.array([])
25
  self.peaks_max = np.array([])
26
  self.peaks_min = np.array([])
27
+
28
  self.cm_px_ratio = None
 
 
29
 
30
+ self.depth = None
31
+ self.rate = None
32
+
33
+ self.rate_and_depth_warnings = []
34
+
35
+ # Parameters for all chunks
36
+ self.chunks_y_preprocessed = []
37
+
38
+ self.chunks_peaks = []
39
+
40
  self.chunks_depth = []
41
  self.chunks_rate = []
 
42
 
43
+ self.weighted_depth = None
44
+ self.weighted_rate = None
 
45
 
46
+ self.chunks_start_and_end_indices = []
47
 
48
+ self.chunks_rate_and_depth_warnings = []
 
 
49
 
50
+ # Parameters for validation
51
  self.min_depth_threshold = 3.0 # cm
52
  self.max_depth_threshold = 6.0 # cm
53
 
54
  self.min_rate_threshold = 100.0 # cpm
55
  self.max_rate_threshold = 120.0 # cpm
56
 
57
+ #^ ################# Validating #######################
58
+
59
+ def validate_midpoints_and_frames_count_in_chunk(self, y_exact, chunk_start_frame_index, chunk_end_frame_index, sampling_interval_in_frames):
60
+ """
61
+ Validate the number of midpoints and frames in a chunk
62
+
63
+ Args:
64
+ y_exact (np.ndarray): The exact y-values of the midpoints.
65
+ chunk_start_frame_index (int): The starting frame index of the chunk.
66
+ chunk_end_frame_index (int): The ending frame index of the chunk.
67
+ sampling_interval_in_frames (int): The interval at which frames are sampled.
68
+
69
+ Raises:
70
+ ValueError: If the number of midpoints does not match the expected number for the given chunk.
71
+ """
72
+ try:
73
+ # Calculate expected number of sampled frames
74
+ start = chunk_start_frame_index
75
+ end = chunk_end_frame_index
76
+ interval = sampling_interval_in_frames
77
+
78
+ # Mathematical formula to count sampled frames
79
+ expected_samples = (end // interval) - ((start - 1) // interval)
80
+
81
+ # Validate
82
+ actual_y_exact_length = len(y_exact)
83
+ if actual_y_exact_length != expected_samples:
84
+ cpr_logger.info(f"\nERROR: Mismatch in expected and actual samples")
85
+ cpr_logger.info(f"Expected: {expected_samples} samples (frames {start}-{end} @ every {interval} frames)")
86
+ cpr_logger.info(f"Actual: {actual_y_exact_length} midoints points recieived")
87
+ sys.exit(1)
88
+
89
+ except Exception as e:
90
+ cpr_logger.error(f"\nCRITICAL VALIDATION ERROR: {str(e)}")
91
+ sys.exit(1)
92
+
93
+ #^ ################# Preprocessing #######################
94
 
95
+ def _smooth_midpoints(self, midpoints):
96
+ """
97
+ Smooth the y-values of the midpoints using Savitzky-Golay filter
98
+
99
+ Args:
100
+ y_exact (np.ndarray): The exact y-values of the midpoints.
101
+
102
+ Returns:
103
+ np.ndarray: The smoothed y-values.
104
+ """
105
 
106
+ if len(midpoints) > 5: # Ensure enough data points
107
  try:
108
+ y_smooth = savgol_filter(
109
+ midpoints[:, 1],
110
+ window_length=3,
111
  polyorder=2,
112
  mode='nearest'
113
  )
114
+ return y_smooth
115
  except Exception as e:
116
+ cpr_logger.error(f"Smoothing error: {e}")
117
+ y_smooth = midpoints[:, 1] # Fallback to original
118
+ return y_smooth
119
  else:
120
+ y_smooth = midpoints[:, 1] # Not enough points
121
+ return y_smooth
122
+
123
+ def _clean_midpoints(self, y_smooth):
124
+ """
125
+ Clean the smoothed y-values to remove impulse noise using median filtering
126
 
127
+ Args:
128
+ y_smooth (np.ndarray): The smoothed y-values.
129
+
130
+ Returns:
131
+ np.ndarray: The cleaned y-values.
132
+ """
133
+
134
+ if len(y_smooth) < self.removing_impulse_noise_window_size:
135
+ return y_smooth # Not enough points for processing
136
+
137
+ y_clean = np.array(y_smooth, dtype=float) # Copy to avoid modifying original
138
+ half_window = self.removing_impulse_noise_window_size // 2
139
+
140
+ for i in range(len(y_smooth)):
141
+ # Get local window (handle boundaries)
142
+ start = max(0, i - half_window)
143
+ end = min(len(y_smooth), i + half_window + 1)
144
+ window = y_smooth[start:end]
145
+
146
+ # Calculate local median and MAD (robust statistics)
147
+ med = np.median(window)
148
+ mad = 1.4826 * np.median(np.abs(window - med)) # Median Absolute Deviation
149
+
150
+ # Detect and replace outliers
151
+ if abs(y_smooth[i] - med) > self.removing_impulse_noise_threshold * mad:
152
+ # Replace with median of immediate neighbors (better than global median)
153
+ left = y_smooth[max(0, i-1)]
154
+ right = y_smooth[min(len(y_smooth)-1, i+1)]
155
+ y_clean[i] = np.median([left, right])
156
+
157
+ return y_clean
158
+
159
+ def preprocess_midpoints(self, midpoints):
160
+ """
161
+ Preprocess the y-values of the midpoints by smoothing and cleaning
162
+
163
+ Sets:
164
+ y_preprocessed (np.ndarray): The preprocessed y-values.
165
+
166
+ Args:
167
+ y_exact (np.ndarray): The exact y-values of the midpoints.
168
+
169
+ Returns:
170
+ bool: True if preprocessing was successful, False otherwise.
171
+ """
172
+
173
+ y_smooth = self._smooth_midpoints(midpoints)
174
+ y_clean = self._clean_midpoints(y_smooth)
175
+
176
+ self.y_preprocessed = y_clean
177
+
178
+ return len(self.y_preprocessed) > 0 # Return True if preprocessing was successful
179
+
180
+ #^ ################# Processing #######################
181
+
182
+ def detect_midpoints_peaks(self):
183
+ """
184
+ Detect peaks in the preprocessed y-values using dynamic distance
185
+
186
+ Sets:
187
+ peaks (np.ndarray): The detected peaks.
188
+ peaks_max (np.ndarray): The detected max peaks.
189
+ peaks_min (np.ndarray): The detected min peaks.
190
+
191
+ Returns:
192
+ bool: True if peaks were detected, False otherwise.
193
+ """
194
+
195
+ if self.y_preprocessed.size == 0:
196
+ cpr_logger.info("No smoothed values found for peak detection")
197
  return False
198
 
199
  try:
200
+ distance = min(1, len(self.y_preprocessed)) # Dynamic distance based on data length
201
 
202
  # Detect max peaks with default prominence
203
+ self.peaks_max, _ = find_peaks(self.y_preprocessed, distance=distance)
204
 
205
  # Detect min peaks with reduced or no prominence requirement
206
  self.peaks_min, _ = find_peaks(
207
+ -self.y_preprocessed,
208
  distance=distance,
209
+ prominence=(0.5, None) # Adjust based on your data's characteristics
210
  )
211
 
212
  self.peaks = np.sort(np.concatenate((self.peaks_max, self.peaks_min)))
213
 
214
  return len(self.peaks) > 0
215
  except Exception as e:
216
+ cpr_logger.error(f"Peak detection error: {e}")
217
  return False
218
 
219
+ def calculate_cm_px_ratio(self, shoulder_distances):
220
+ """
221
+ Calculate the ratio of cm to pixels based on shoulder distances
222
+
223
+ Sets:
224
+ cm_px_ratio (float): The ratio of cm to pixels.
225
+
226
  Args:
227
+ shoulder_distances (list): List of shoulder distances in pixels.
 
 
 
 
228
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
 
230
+ if len(shoulder_distances) > 0:
231
+ avg_shoulder_width_px = np.mean(shoulder_distances)
232
+ self.cm_px_ratio = self.shoulder_width_cm / avg_shoulder_width_px
233
+ else:
234
+ self.cm_px_ratio = None
235
+ cpr_logger.info("No shoulder distances available for cm/px ratio calculation")
236
 
237
+ def calculate_rate_and_depth_for_chunk(self, original_fps, sampling_interval_in_frames=1):
238
+ """
239
+ Calculate the rate and depth of the motion data for a chunk.
240
+
241
+ Sets:
242
+ depth (float): The calculated depth in cm.
243
+ rate (float): The calculated rate in cpm.
244
 
245
+ Args:
246
+ original_fps (float): The original frames per second of the video.
247
+ sampling_interval_in_frames (int): Number of frames skipped between samples.
248
+ """
249
  try:
250
+
251
+ # Without Adjustment: A peak distance of 5 (downsampled frames) would incorrectly be interpreted as 5/30 = 0.167 sec (too short).
252
+ # With Adjustment: The same peak distance 5 (downsampled frames) correctly represents 5/10 = 0.5 sec.
253
+
254
+ effective_fps = original_fps / sampling_interval_in_frames # Correctly reduced FPS
255
+
256
+ # Depth calculation (unchanged)
 
 
257
  depth = None
258
  if len(self.peaks) > 1:
259
+ depth = np.mean(np.abs(np.diff(self.y_preprocessed[self.peaks]))) * self.cm_px_ratio
260
 
261
+ # Rate calculation (now uses effective_fps)
262
  rate = None
263
  if len(self.peaks_max) > 1:
264
+ # Peak indices are from the downsampled signal, so we use effective_fps
265
+ peak_intervals = np.diff(self.peaks_max) # Already in downsampled frames
266
+ rate = (1 / (np.mean(peak_intervals) / effective_fps)) * 60 # Correct CPM
267
+
268
+ # Handle cases with no valid data
269
  if depth is None or rate is None:
270
  depth = 0
271
  rate = 0
272
+ self.peaks = np.array([])
273
 
 
274
  self.depth = depth
275
  self.rate = rate
276
+ except Exception as e:
277
+ cpr_logger.error(f"Error calculating rate and depth: {e}")
278
+
279
+ def assign_chunk_data(self, chunk_start_frame_index, chunk_end_frame_index):
280
+ """
281
+ Capture chunk data for later analysis
282
 
283
+ Sets:
284
+ chunks_depth (list): List of depths for each chunk.
285
+ chunks_rate (list): List of rates for each chunk.
286
+ chunks_start_and_end_indices (list): List of start and end indices for each chunk.
287
+ chunks_y_preprocessed (list): List of preprocessed y-values for each chunk.
288
+ chunks_peaks (list): List of detected peaks for each chunk.
 
 
289
 
290
+ Args:
291
+ chunk_start_frame_index (int): The starting frame index of the chunk.
292
+ chunk_end_frame_index (int): The ending frame index of the chunk.
293
+ """
294
+ self.chunks_depth.append(self.depth)
295
+ self.chunks_rate.append(self.rate)
296
+ self.chunks_start_and_end_indices.append((chunk_start_frame_index, chunk_end_frame_index))
 
 
 
 
 
 
297
 
298
+ self.chunks_y_preprocessed.append(self.y_preprocessed.copy())
299
+ self.chunks_peaks.append(self.peaks.copy())
300
 
301
+ self.current_chunk_start = chunk_start_frame_index
302
+ self.current_chunk_end = chunk_end_frame_index
303
 
304
+ self.chunks_rate_and_depth_warnings.append(self.rate_and_depth_warnings.copy())
305
 
306
+ def calculate_rate_and_depth_for_all_chunk(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
  """
308
+ Calculate the weighted average rate and depth for all chunks
309
+
310
+ Sets:
311
+ weighted_depth (float): The weighted average depth in cm.
312
+ weighted_rate (float): The weighted average rate in cpm.
313
+ """
314
+
315
  if not self.chunks_depth or not self.chunks_rate or not self.chunks_start_and_end_indices:
316
+ cpr_logger.info("[WARNING] No chunk data available for averaging")
317
  return None
318
 
319
  if not (len(self.chunks_depth) == len(self.chunks_rate) == len(self.chunks_start_and_end_indices)):
320
+ cpr_logger.info("[ERROR] Mismatched chunk data lists")
321
  return None
322
 
323
  total_weight = 0
 
336
  total_weight += chunk_duration
337
 
338
  if total_weight == 0:
339
+ self.weighted_depth = None
340
+ self.weighted_rate = None
341
 
342
+ cpr_logger.info("[ERROR] No valid chunks for averaging")
343
+ else:
344
+ self.weighted_depth = weighted_depth_sum / total_weight
345
+ self.weighted_rate = weighted_rate_sum / total_weight
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346
 
347
+ cpr_logger.info(f"[RESULTS] Weighted average depth: {self.weighted_depth:.1f} cm")
348
+ cpr_logger.info(f"[RESULTS] Weighted average rate: {self.weighted_rate:.1f} cpm")
349
+
350
+ #^ ################# Warnings #######################
351
+
352
+ def _get_rate_and_depth_status(self):
353
+ """Internal validation logic"""
354
+
355
+ depth_status = "normal"
356
+ rate_status = "normal"
357
 
358
+ if self.depth < self.min_depth_threshold and self.depth > 0:
359
+ depth_status = "low"
360
+ elif self.depth > self.max_depth_threshold:
361
+ depth_status = "high"
 
 
 
 
 
 
 
 
 
362
 
363
+ if self.rate < self.min_rate_threshold and self.rate > 0:
364
+ rate_status = "low"
365
+ elif self.rate > self.max_rate_threshold:
366
+ rate_status = "high"
 
 
 
367
 
368
+ return depth_status, rate_status
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
369
 
370
+ def get_rate_and_depth_warnings(self):
371
+ """Get performance warnings based on depth and rate"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
372
 
373
+ depth_status, rate_status = self._get_rate_and_depth_status()
 
374
 
375
+ warnings = []
376
+ if depth_status == "low":
377
+ warnings.append("Depth too low!")
378
+ elif depth_status == "high":
379
+ warnings.append("Depth too high!")
380
 
381
+ if rate_status == "low":
382
+ warnings.append("Rate too slow!")
383
+ elif rate_status == "high":
384
+ warnings.append("Rate too fast!")
 
 
 
385
 
386
+ self.rate_and_depth_warnings = warnings
 
 
 
 
 
 
 
 
 
 
 
 
 
387
 
388
+ return warnings
389
+
390
+ #^ ################# Handle Chunk #######################
391
+
392
+ def handle_chunk(self, midpoints, chunk_start_frame_index, chunk_end_frame_index, fps, shoulder_distances, sampling_interval_in_frames):
393
+ """
394
+ Handle a chunk of motion data by validating, preprocessing, and calculating metrics
395
+ for the chunk.
396
+
397
+ Args:
398
+ y_exact (np.ndarray): The exact y-values of the midpoints.
399
+ chunk_start_frame_index (int): The starting frame index of the chunk.
400
+ chunk_end_frame_index (int): The ending frame index of the chunk.
401
+ fps (float): The frames per second of the video.
402
+ shoulder_distances (list): List of shoulder distances in pixels.
403
+
404
+ Returns:
405
+ bool: True if the chunk was processed successfully, False otherwise.
406
+ """
407
+
408
+ # The program is terminated if the validation fails
409
+ self.validate_midpoints_and_frames_count_in_chunk(midpoints, chunk_start_frame_index, chunk_end_frame_index, sampling_interval_in_frames)
410
+
411
+ preprocessing_reult = self.preprocess_midpoints(midpoints)
412
+ if not preprocessing_reult:
413
+ cpr_logger.info("Preprocessing failed, skipping chunk")
414
+ return False
415
+
416
+ self.detect_midpoints_peaks()
417
+ if not self.detect_midpoints_peaks():
418
+ cpr_logger.info("Peak detection failed, skipping chunk")
419
+
420
+ self.peaks = np.array([])
421
+ self.peaks_max = np.array([])
422
+ self.peaks_min = np.array([])
423
+
424
+ self.depth = 0
425
+ self.rate = 0
426
+
427
+ return False
428
+
429
+ self.calculate_cm_px_ratio(shoulder_distances)
430
+ if self.cm_px_ratio is None:
431
+ cpr_logger.info("cm/px ratio calculation failed, skipping chunk")
432
+
433
+ self.depth = 0
434
+ self.rate = 0
435
+
436
+ return False
437
+
438
+ self.calculate_rate_and_depth_for_chunk(fps, sampling_interval_in_frames)
439
+ if self.depth is None or self.rate is None:
440
+ cpr_logger.info("Rate and depth calculation failed, skipping chunk")
441
+ return False
442
+ else:
443
+ cpr_logger.info(f"Chunk {chunk_start_frame_index}-{chunk_end_frame_index} - Depth: {self.depth:.1f} cm, Rate: {self.rate:.1f} cpm")
444
+
445
+ self.get_rate_and_depth_warnings()
446
+
447
+ self.assign_chunk_data(chunk_start_frame_index, chunk_end_frame_index)
448
+ cpr_logger.info(f"Chunk {chunk_start_frame_index}-{chunk_end_frame_index} processed successfully")
449
+ return True
450
+
451
+ #^ ################# Comments #######################
452
+ # Between every two consecutive mini chunks, there wil be "sampling interval" frames unaccounted for.
453
+ # This is because when we reach the "reporting interval" number of frames, we terminate the first mini chunk.
454
+ # But we only start the next mini chunk when we detect the next successfully processed frame.
455
+ # Which is "sampling interval" frames later at the earliest.
456
+ # We can't just initialize the next mini chunk at the "reporting interval" frame, because we need to wait for the next successful frame.
457
+ # Becuase maybe the next frame is a frame with posture errors.
458
+ # For better visualization, we connect between the last point of the previous chunk and the first point of the next chunk if they are "sampling interval" frames apart.
459
+ # But that is only for visualization, all calculations are done on the original frames.
460
+
461
+ # Chunks that are too short can fail any stage of the "handle chunk" process.
462
+ # If they do, we vizualize what we have and ignore the rest.
463
+ # For example, a chunk with < 2 peaks will not be able to calculate the rate.
464
+ # So we will set it to zero and display the midpoints and detected peaks.
465
+ # If there are no peaks, we will set the rate to zero and display the midpoints.
466
+
467
+ # Problems with chunks could be:
468
+ # - Less than 3 seconds.
469
+ # - Not enough peaks to calculate depth and rate
 
 
 
 
 
 
 
 
 
 
CPR/pose_estimation.py CHANGED
@@ -3,13 +3,13 @@ import cv2
3
  import numpy as np
4
  from ultralytics import YOLO
5
  from CPR.keypoints import CocoKeypoints
6
- import torch
7
 
8
  class PoseEstimator:
9
  """Human pose estimation using YOLO"""
10
 
11
  def __init__(self, model_path="yolo11n-pose.pt", min_confidence=0.2):
12
- self.model = YOLO(model_path).to("cuda:0" if torch.cuda.is_available() else "cpu")
13
  self.min_confidence = min_confidence
14
 
15
  def detect_poses(self, frame):
@@ -20,7 +20,7 @@ class PoseEstimator:
20
  return None
21
  return results[0]
22
  except Exception as e:
23
- print(f"Pose detection error: {e}")
24
  return None
25
 
26
  def get_keypoints(self, results, person_idx=0):
@@ -30,7 +30,7 @@ class PoseEstimator:
30
  return None
31
  return results.keypoints.xy[person_idx].cpu().numpy()
32
  except Exception as e:
33
- print(f"Keypoint extraction error: {e}")
34
  return None
35
 
36
  def draw_keypoints(self, frame, results):
@@ -38,5 +38,5 @@ class PoseEstimator:
38
  try:
39
  return results.plot()
40
  except Exception as e:
41
- print(f"Keypoint drawing error: {e}")
42
  return frame
 
3
  import numpy as np
4
  from ultralytics import YOLO
5
  from CPR.keypoints import CocoKeypoints
6
+ from CPR.logging_config import cpr_logger
7
 
8
  class PoseEstimator:
9
  """Human pose estimation using YOLO"""
10
 
11
  def __init__(self, model_path="yolo11n-pose.pt", min_confidence=0.2):
12
+ self.model = YOLO(model_path)
13
  self.min_confidence = min_confidence
14
 
15
  def detect_poses(self, frame):
 
20
  return None
21
  return results[0]
22
  except Exception as e:
23
+ cpr_logger.error(f"Pose detection error: {e}")
24
  return None
25
 
26
  def get_keypoints(self, results, person_idx=0):
 
30
  return None
31
  return results.keypoints.xy[person_idx].cpu().numpy()
32
  except Exception as e:
33
+ cpr_logger.error(f"Keypoint extraction error: {e}")
34
  return None
35
 
36
  def draw_keypoints(self, frame, results):
 
38
  try:
39
  return results.plot()
40
  except Exception as e:
41
+ cpr_logger.error(f"Keypoint drawing error: {e}")
42
  return frame
CPR/posture_analyzer.py CHANGED
@@ -3,12 +3,10 @@ import math
3
  import cv2
4
  import numpy as np
5
  from CPR.keypoints import CocoKeypoints
 
6
 
7
  class PostureAnalyzer:
8
  """Posture analysis and visualization with comprehensive validation"""
9
-
10
- #! The warnings depend on the average readings from the last 10 frames
11
- #! This "10" should be adjusted according to the sampling rate of the video
12
 
13
  def __init__(self, right_arm_angle_threshold, left_arm_angle_threshold, wrist_distance_threshold, history_length_to_average):
14
  self.history_length_to_average = history_length_to_average
@@ -21,15 +19,6 @@ class PostureAnalyzer:
21
  self.left_arm_angle_threshold = left_arm_angle_threshold
22
  self.wrist_distance_threshold = wrist_distance_threshold
23
 
24
- self.warning_positions = {
25
- 'right_arm_angle': (50, 50),
26
- 'left_arm_angle': (50, 100),
27
- 'one_handed': (50, 150),
28
- 'hands_not_on_chest': (50, 200)
29
- }
30
-
31
- self.posture_errors_for_all_error_region = []
32
-
33
  def _calculate_angle(self, a, b, c):
34
  """Calculate angle between three points"""
35
  try:
@@ -37,7 +26,7 @@ class PostureAnalyzer:
37
  math.atan2(a[1]-b[1], a[0]-b[0]))
38
  return ang + 360 if ang < 0 else ang
39
  except Exception as e:
40
- print(f"Angle calculation error: {e}")
41
  return 0
42
 
43
  def _check_bended_right_arm(self, keypoints):
@@ -55,12 +44,12 @@ class PostureAnalyzer:
55
  avg_right = np.mean(self.right_arm_angles[-self.history_length_to_average:] if self.right_arm_angles else 0)
56
 
57
  if avg_right > self.right_arm_angle_threshold:
58
- warnings.append("Right arm bent")
59
 
60
  return warnings
61
 
62
  except Exception as e:
63
- print(f"Right arm check error: {e}")
64
 
65
  return warnings
66
 
@@ -79,132 +68,65 @@ class PostureAnalyzer:
79
  avg_left = np.mean(self.left_arm_angles[-self.history_length_to_average:] if self.left_arm_angles else 0)
80
 
81
  if avg_left < self.left_arm_angle_threshold:
82
- warnings.append("Left arm bent")
83
 
84
  return warnings
85
 
86
  except Exception as e:
87
- print(f"Left arm check error: {e}")
88
 
89
  return warnings
90
 
91
- def _check_one_handed_cpr(self, keypoints):
92
- """Check for one-handed CPR pattern (returns warning)"""
93
- warnings = []
94
- try:
95
- # Calculate wrist distance
96
- left_wrist = keypoints[CocoKeypoints.LEFT_WRIST.value]
97
- right_wrist = keypoints[CocoKeypoints.RIGHT_WRIST.value]
98
-
99
- wrist_distance = np.linalg.norm(left_wrist - right_wrist)
100
- self.wrist_distances.append(wrist_distance)
101
-
102
- # Analyze distance with moving average
103
- avg_distance = np.mean(self.wrist_distances[-self.history_length_to_average:] if self.wrist_distances else 0)
104
-
105
- if avg_distance > self.wrist_distance_threshold:
106
- warnings.append("One-handed CPR detected!")
107
-
108
- except Exception as e:
109
- print(f"One-handed CPR check error: {e}")
110
-
111
- return warnings
112
 
113
- def _check_hands_on_chest(self, wrists_midpoint, chest_params): # (cx, cy, cw, ch)
114
- """Check if hands are on the chest (returns warning)"""
115
  warnings = []
116
  try:
117
- # Check if hands are on the chest
118
- if wrists_midpoint is None or chest_params is None:
119
- return ["Hands not on chest"]
120
 
121
- # Unpack parameters
122
- wrist_x, wrist_y = wrists_midpoint
123
  cx, cy, cw, ch = chest_params
 
 
 
 
 
 
124
 
125
- if not ((cx - cw/2 < wrist_x < cx + cw/2) and (cy - ch/2 < wrist_y < cy + ch/2)):
126
- warnings.append("Hands not on chest")
127
-
 
 
 
 
 
 
 
 
 
 
 
128
  except Exception as e:
129
- print(f"Hands on chest check error: {e}")
130
 
131
  return warnings
132
 
133
- def validate_posture(self, keypoints, wrists_midpoint, chest_params):
 
134
  """Run all posture validations (returns aggregated warnings)"""
135
  warnings = []
136
- warnings += self._check_bended_right_arm(keypoints)
137
- warnings += self._check_bended_left_arm(keypoints)
138
- warnings += self._check_one_handed_cpr(keypoints)
139
- warnings += self._check_hands_on_chest(wrists_midpoint, chest_params)
140
- return warnings
141
-
142
- def display_warnings(self, frame):
143
- """Display posture warnings with colored background rectangles
144
-
145
- Args:
146
- frame: Input image frame to draw warnings on
147
-
148
- Returns:
149
- Frame with warnings and background rectangles drawn
150
- """
151
- if not self.warnings:
152
- return frame
153
-
154
- warning_config = {
155
- "Right arm bent": {
156
- "color": (0, 0, 255), # Red
157
- "position": self.warning_positions['right_arm_angle'],
158
- "text": "Right arm bent!"
159
- },
160
- "Left arm bent": {
161
- "color": (0, 255, 255), # Yellow
162
- "position": self.warning_positions['left_arm_angle'],
163
- "text": "Left arm bent!"
164
- },
165
- "One-handed": {
166
- "color": (0, 255, 0), # Green
167
- "position": self.warning_positions['one_handed'],
168
- "text": "One-handed CPR detected!"
169
- },
170
- "Hands not on chest": {
171
- "color": (255, 0, 0), # Blue
172
- "position": self.warning_positions['hands_not_on_chest'],
173
- "text": "Hands not on chest!"
174
- }
175
- }
176
 
177
- try:
178
- for warning_text, config in warning_config.items():
179
- if any(warning_text in w for w in self.warnings):
180
- self._draw_warning_banner(
181
- frame=frame,
182
- text=config['text'],
183
- color=config['color'],
184
- position=config['position']
185
- )
186
-
187
- except Exception as e:
188
- print(f"Warning display error: {e}")
189
-
190
- return frame
191
 
192
- def _draw_warning_banner(self, frame, text, color, position):
193
- """Helper function to draw a single warning banner"""
194
- (text_width, text_height), _ = cv2.getTextSize(
195
- text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2)
196
 
197
- x, y = position
198
- # Calculate background rectangle coordinates
199
- x1 = x - 10
200
- y1 = y - text_height - 10
201
- x2 = x + text_width + 10
202
- y2 = y + 10
203
-
204
- # Draw background rectangle
205
- cv2.rectangle(frame, (x1, y1), (x2, y2), color, -1)
206
-
207
- # Draw warning text
208
- cv2.putText(frame, text, (x, y),
209
- cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2,
210
- cv2.LINE_AA)
 
3
  import cv2
4
  import numpy as np
5
  from CPR.keypoints import CocoKeypoints
6
+ from CPR.logging_config import cpr_logger
7
 
8
  class PostureAnalyzer:
9
  """Posture analysis and visualization with comprehensive validation"""
 
 
 
10
 
11
  def __init__(self, right_arm_angle_threshold, left_arm_angle_threshold, wrist_distance_threshold, history_length_to_average):
12
  self.history_length_to_average = history_length_to_average
 
19
  self.left_arm_angle_threshold = left_arm_angle_threshold
20
  self.wrist_distance_threshold = wrist_distance_threshold
21
 
 
 
 
 
 
 
 
 
 
22
  def _calculate_angle(self, a, b, c):
23
  """Calculate angle between three points"""
24
  try:
 
26
  math.atan2(a[1]-b[1], a[0]-b[0]))
27
  return ang + 360 if ang < 0 else ang
28
  except Exception as e:
29
+ cpr_logger.error(f"Angle calculation error: {e}")
30
  return 0
31
 
32
  def _check_bended_right_arm(self, keypoints):
 
44
  avg_right = np.mean(self.right_arm_angles[-self.history_length_to_average:] if self.right_arm_angles else 0)
45
 
46
  if avg_right > self.right_arm_angle_threshold:
47
+ warnings.append("Right arm bent!")
48
 
49
  return warnings
50
 
51
  except Exception as e:
52
+ cpr_logger.error(f"Right arm check error: {e}")
53
 
54
  return warnings
55
 
 
68
  avg_left = np.mean(self.left_arm_angles[-self.history_length_to_average:] if self.left_arm_angles else 0)
69
 
70
  if avg_left < self.left_arm_angle_threshold:
71
+ warnings.append("Left arm bent!")
72
 
73
  return warnings
74
 
75
  except Exception as e:
76
+ cpr_logger.error(f"Left arm check error: {e}")
77
 
78
  return warnings
79
 
80
+ def _check_hands_on_chest(self, keypoints, chest_params):
81
+ """Check individual hand positions and return specific warnings"""
82
+
83
+ # Get the wrist keypoints
84
+ left_wrist = keypoints[CocoKeypoints.LEFT_WRIST.value]
85
+ right_wrist = keypoints[CocoKeypoints.RIGHT_WRIST.value]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
 
 
87
  warnings = []
88
  try:
89
+ if chest_params is None:
90
+ return ["Both hands not on chest!"] # Fallback warning
 
91
 
 
 
92
  cx, cy, cw, ch = chest_params
93
+ left_in = right_in = False
94
+
95
+ # Check left hand
96
+ if left_wrist is not None:
97
+ left_in = (cx - cw/2 < left_wrist[0] < cx + cw/2) and \
98
+ (cy - ch/2 < left_wrist[1] < cy + ch/2)
99
 
100
+ # Check right hand
101
+ if right_wrist is not None:
102
+ right_in = (cx - cw/2 < right_wrist[0] < cx + cw/2) and \
103
+ (cy - ch/2 < right_wrist[1] < cy + ch/2)
104
+
105
+ # Determine warnings
106
+ if not left_in and not right_in:
107
+ warnings.append("Both hands not on chest!")
108
+ else:
109
+ if not left_in:
110
+ warnings.append("Left hand not on chest!")
111
+ if not right_in:
112
+ warnings.append("Right hand not on chest!")
113
+
114
  except Exception as e:
115
+ cpr_logger.error(f"Hands check error: {e}")
116
 
117
  return warnings
118
 
119
+
120
+ def validate_posture(self, keypoints, chest_params):
121
  """Run all posture validations (returns aggregated warnings)"""
122
  warnings = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
+ warnings += self._check_hands_on_chest(keypoints, chest_params)
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
+ if ("Right hand not on chest!" not in warnings) and ("Both hands not on chest!" not in warnings):
127
+ warnings += self._check_bended_right_arm(keypoints)
 
 
128
 
129
+ if ("Left hand not on chest!" not in warnings) and ("Both hands not on chest!" not in warnings):
130
+ warnings += self._check_bended_left_arm(keypoints)
131
+
132
+ return warnings
 
 
 
 
 
 
 
 
 
 
CPR/role_classifier.py CHANGED
@@ -2,7 +2,7 @@
2
  import cv2
3
  import numpy as np
4
  from ultralytics.utils.plotting import Annotator # Import YOLO's annotator
5
- from CPR.keypoints import CocoKeypoints
6
 
7
 
8
  class RoleClassifier:
@@ -28,7 +28,7 @@ class RoleClassifier:
28
  return 1 if height > width else 0 # 1 for vertical, 0 for horizontal
29
 
30
  except (TypeError, ValueError) as e:
31
- print(f"Verticality score calculation error: {e}")
32
  return -1
33
 
34
  def _calculate_bounding_box_center(self, bounding_box):
@@ -94,11 +94,12 @@ class RoleClassifier:
94
  if threshold:
95
  box_area = width * height
96
  if box_area > threshold * 1.2: # 20% tolerance
97
- print(f"Filtered oversized box {i} (area: {box_area:.1f} > threshold: {threshold:.1f})")
98
  continue
99
 
100
  # Calculate features
101
  verticality_score = self._calculate_verticality_score(bounding_box)
 
102
  bounding_box_center = self._calculate_bounding_box_center(bounding_box)
103
 
104
  # Store valid results
@@ -111,7 +112,7 @@ class RoleClassifier:
111
  })
112
 
113
  except Exception as e:
114
- print(f"Error processing detection {i}: {e}")
115
  continue
116
 
117
  # Step 2: Identify the patient (horizontal posture)
@@ -132,6 +133,7 @@ class RoleClassifier:
132
  potential_rescuers = [
133
  res for res in processed_results
134
  if res['verticality_score'] == 1
 
135
  and res['original_index'] != patient['original_index']
136
  ]
137
 
@@ -158,7 +160,7 @@ class RoleClassifier:
158
  keypoints = self.rescuer_processed_results["keypoints"]
159
  annotator.kpts(keypoints, shape=frame.shape[:2])
160
  except Exception as e:
161
- print(f"Error drawing rescuer: {str(e)}")
162
 
163
  # Draw patient (B) with red box and keypoints
164
  if self.patient_processed_results:
@@ -170,7 +172,7 @@ class RoleClassifier:
170
  keypoints = self.patient_processed_results["keypoints"]
171
  annotator.kpts(keypoints, shape=frame.shape[:2])
172
  except Exception as e:
173
- print(f"Error drawing patient: {str(e)}")
174
 
175
  return annotator.result()
176
 
 
2
  import cv2
3
  import numpy as np
4
  from ultralytics.utils.plotting import Annotator # Import YOLO's annotator
5
+ from CPR.logging_config import cpr_logger
6
 
7
 
8
  class RoleClassifier:
 
28
  return 1 if height > width else 0 # 1 for vertical, 0 for horizontal
29
 
30
  except (TypeError, ValueError) as e:
31
+ cpr_logger.error(f"Verticality score calculation error: {e}")
32
  return -1
33
 
34
  def _calculate_bounding_box_center(self, bounding_box):
 
94
  if threshold:
95
  box_area = width * height
96
  if box_area > threshold * 1.2: # 20% tolerance
97
+ cpr_logger.info(f"Filtered oversized box {i} (area: {box_area:.1f} > threshold: {threshold:.1f})")
98
  continue
99
 
100
  # Calculate features
101
  verticality_score = self._calculate_verticality_score(bounding_box)
102
+ #!We already have the center coordinates from the bounding box, no need to recalculate it.
103
  bounding_box_center = self._calculate_bounding_box_center(bounding_box)
104
 
105
  # Store valid results
 
112
  })
113
 
114
  except Exception as e:
115
+ cpr_logger.error(f"Error processing detection {i}: {e}")
116
  continue
117
 
118
  # Step 2: Identify the patient (horizontal posture)
 
133
  potential_rescuers = [
134
  res for res in processed_results
135
  if res['verticality_score'] == 1
136
+ #! Useless condition because the patient was horizontal
137
  and res['original_index'] != patient['original_index']
138
  ]
139
 
 
160
  keypoints = self.rescuer_processed_results["keypoints"]
161
  annotator.kpts(keypoints, shape=frame.shape[:2])
162
  except Exception as e:
163
+ cpr_logger.error(f"Error drawing rescuer: {str(e)}")
164
 
165
  # Draw patient (B) with red box and keypoints
166
  if self.patient_processed_results:
 
172
  keypoints = self.patient_processed_results["keypoints"]
173
  annotator.kpts(keypoints, shape=frame.shape[:2])
174
  except Exception as e:
175
+ cpr_logger.error(f"Error drawing patient: {str(e)}")
176
 
177
  return annotator.result()
178
 
CPR/shoulders_analyzer.py CHANGED
@@ -1,7 +1,6 @@
1
- # analyzers.py
2
- import cv2
3
  import numpy as np
4
  from CPR.keypoints import CocoKeypoints
 
5
 
6
  class ShouldersAnalyzer:
7
  """Analyzes shoulder distances and posture"""
@@ -23,7 +22,7 @@ class ShouldersAnalyzer:
23
 
24
  return distance
25
  except Exception as e:
26
- print(f"Shoulder distance error: {e}")
27
  return
28
 
29
  def reset_shoulder_distances(self):
 
 
 
1
  import numpy as np
2
  from CPR.keypoints import CocoKeypoints
3
+ from CPR.logging_config import cpr_logger
4
 
5
  class ShouldersAnalyzer:
6
  """Analyzes shoulder distances and posture"""
 
22
 
23
  return distance
24
  except Exception as e:
25
+ cpr_logger.error(f"Shoulder distance error: {e}")
26
  return
27
 
28
  def reset_shoulder_distances(self):
CPR/threaded_camera.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ from queue import Queue
3
+ import queue
4
+ import cv2
5
+ from CPR.logging_config import cpr_logger
6
+
7
+ class ThreadedCamera:
8
+ def __init__(self, source, requested_fps = 30):
9
+
10
+ # The constructor of OpenCV's VideoCapture class automatically opens the camera
11
+ self.cap = cv2.VideoCapture(source)
12
+ if not self.cap.isOpened():
13
+ raise ValueError(f"[VIDEO CAPTURE] Unable to open camera source: {source}")
14
+ cpr_logger.info(f"[VIDEO CAPTURE] Camera source opened: {source}")
15
+
16
+ # Attempt to configure the camera to the requested FPS
17
+ # Which is set to the value we have been working on with recorded videos
18
+ # .set() returns True if the camera acknowledged the request, not if it actually achieved the FPS.
19
+ set_success = self.cap.set(cv2.CAP_PROP_FPS, requested_fps)
20
+
21
+ # Get the actual FPS from the camera
22
+ # This is the FPS that the camera is actually using, which may differ from the requested FPS.
23
+ actual_fps = self.cap.get(cv2.CAP_PROP_FPS)
24
+ self.fps = actual_fps
25
+
26
+ cpr_logger.info(f"[VIDEO CAPTURE] Requested FPS: {requested_fps}, Set Success: {set_success}, Actual FPS: {actual_fps}")
27
+
28
+ # The buffer should be able to hold a lag of up to 2 seconds
29
+ number_of_seconds_to_buffer = 5
30
+ queue_size = int(actual_fps * number_of_seconds_to_buffer)
31
+ self.q = Queue(maxsize=queue_size)
32
+ cpr_logger.info(f"[VIDEO CAPTURE] Queue size: {queue_size}")
33
+
34
+ # Set a flag to indicate that the camera is running
35
+ self.running = threading.Event()
36
+ self.running.set() # Initial state = running
37
+ cpr_logger.info(f"[VIDEO CAPTURE] Camera running: {self.running.is_set()}")
38
+
39
+ self.number_of_total_frames = 0
40
+ self.number_of_dropped_frames = 0
41
+
42
+ self.thread = None
43
+
44
+ def start_capture(self):
45
+ # Clear any existing frames in queue
46
+ while not self.q.empty():
47
+ self.q.get()
48
+
49
+ # threading.Thread() initialize a new thread
50
+ # target=self._reader specify the method (_reader) the thread will execute
51
+ self.thread = threading.Thread(target=self._reader)
52
+ cpr_logger.info(f"[VIDEO CAPTURE] Thread initialized: {self.thread}")
53
+
54
+ # Set the thread as a daemon thread:
55
+ # Daemon threads automatically exit when the main program exits
56
+ # They run in the background and don't block program termination
57
+ self.thread.daemon = True
58
+ cpr_logger.info(f"[VIDEO CAPTURE] Thread daemon: {self.thread.daemon}")
59
+
60
+ # Start the thread execution:
61
+ # Call the _reader method in parallel with the main program
62
+ self.thread.start()
63
+
64
+ def _reader(self):
65
+ while self.running.is_set():
66
+ ret, frame = self.cap.read()
67
+ if not ret:
68
+ cpr_logger.info("Camera disconnected")
69
+ self.q.put(None) # Sentinel for clean exit
70
+ break
71
+
72
+ try:
73
+ self.number_of_total_frames += 1
74
+ self.q.put(frame, timeout=0.1)
75
+ except queue.Full:
76
+ cpr_logger.info("Frame dropped")
77
+ self.number_of_dropped_frames += 1
78
+
79
+ def read(self):
80
+ return self.q.get()
81
+
82
+ def release(self):
83
+ #! Not an error
84
+ cpr_logger.error(f"[VIDEO CAPTURE] Total frames: {self.number_of_total_frames}, Dropped frames: {self.number_of_dropped_frames}")
85
+
86
+ self.running.clear()
87
+
88
+ # First release the capture to unblock pending reads
89
+ self.cap.release() # MOVED THIS LINE UP
90
+
91
+ # Then join the thread
92
+ self.thread.join(timeout=1.0)
93
+
94
+ if self.thread.is_alive():
95
+ cpr_logger.info("Warning: Thread didn't terminate cleanly")
96
+ # Removed redundant self.cap.release()
97
+
98
+ def isOpened(self):
99
+ return self.running.is_set() and self.cap.isOpened()
100
+
101
+ def __del__(self):
102
+ if self.running.is_set(): # Only release if not already done
103
+ self.release()
CPR/warnings_overlayer.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import os
4
+ import sys
5
+
6
+ from CPR.logging_config import cpr_logger
7
+
8
+ class WarningsOverlayer:
9
+ def __init__(self):
10
+ # Single drawer configuration
11
+ self.DRAWER_CONFIG = {
12
+ "base_position": (0.05, 0.15), # 5% from left, 15% from top
13
+ "vertical_spacing": 0.06 # 6% of frame height between warnings
14
+ }
15
+
16
+ # Warning config (colors only)
17
+ self.WARNING_CONFIG = {
18
+ # Posture Warnings
19
+ "Right arm bent!": {"color": (52, 110, 235)},
20
+ "Left arm bent!": {"color": (52, 110, 235)},
21
+ "Left hand not on chest!": {"color": (161, 127, 18)},
22
+ "Right hand not on chest!": {"color": (161, 127, 18)},
23
+ "Both hands not on chest!": {"color": (161, 127, 18)},
24
+
25
+ # Rate/Depth Warnings
26
+ "Depth too low!": {"color": (125, 52, 235)},
27
+ "Depth too high!": {"color": (125, 52, 235)},
28
+ "Rate too slow!": {"color": (235, 52, 214)},
29
+ "Rate too fast!": {"color": (235, 52, 214)}
30
+ }
31
+
32
+ def add_warnings_to_processed_video(self, video_output_path, sampling_interval_frames, rate_and_depth_warnings, posture_warnings):
33
+ """Process both warning types with identical handling"""
34
+ cpr_logger.info("\n[POST-PROCESS] Starting warning overlay")
35
+
36
+ # Read processed video with original parameters
37
+ cap = cv2.VideoCapture(video_output_path)
38
+ if not cap.isOpened():
39
+ cpr_logger.info("[ERROR] Failed to open processed video")
40
+ return
41
+
42
+ # Get original video properties
43
+ original_fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))
44
+ processed_fps = cap.get(cv2.CAP_PROP_FPS)
45
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
46
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
47
+
48
+ # Create final writer with ORIGINAL codec and parameters
49
+ base = os.path.splitext(video_output_path)[0]
50
+ final_path = os.path.abspath(f"{base}_final.mp4")
51
+ writer = cv2.VideoWriter(final_path, original_fourcc, processed_fps, (width, height))
52
+
53
+ # Combine all warnings into unified list
54
+ all_warnings = []
55
+
56
+ for entry in posture_warnings:
57
+ if warnings := entry.get('posture_warnings'):
58
+ start = entry['start_frame'] // sampling_interval_frames
59
+ end = entry['end_frame'] // sampling_interval_frames
60
+ all_warnings.append((int(start), int(end), warnings))
61
+
62
+ for entry in rate_and_depth_warnings:
63
+ if warnings := entry.get('rate_and_depth_warnings'):
64
+ start = entry['start_frame'] // sampling_interval_frames
65
+ end = entry['end_frame'] // sampling_interval_frames
66
+ all_warnings.append((int(start), int(end), warnings))
67
+
68
+ # Screenshot tracking
69
+ os.makedirs("screenshots", exist_ok=True)
70
+ screenshot_counts = {}
71
+
72
+ # Warnings to skip for screenshots
73
+ skip_warnings = {
74
+ "Depth too low!",
75
+ "Depth too high!",
76
+ "Rate too slow!",
77
+ "Rate too fast!"
78
+ }
79
+
80
+ # Video processing loop
81
+ frame_idx = 0
82
+ while True:
83
+ ret, frame = cap.read()
84
+ if not ret:
85
+ break
86
+
87
+ active_warnings = []
88
+ for start, end, warnings in all_warnings:
89
+ if start <= frame_idx <= end:
90
+ active_warnings.extend(warnings)
91
+
92
+ # Draw and optionally screenshot active warnings
93
+ self._draw_warnings(frame, active_warnings)
94
+
95
+ for warning_text in set(active_warnings):
96
+ if warning_text in skip_warnings:
97
+ continue
98
+ count = screenshot_counts.get(warning_text, 0)
99
+ if count < 2:
100
+ screenshot_path = os.path.join("screenshots", f"{warning_text.replace(' ', '_')}_{frame_idx}.jpg")
101
+ cv2.imwrite(screenshot_path, frame)
102
+ screenshot_counts[warning_text] = count + 1
103
+
104
+ writer.write(frame)
105
+ frame_idx += 1
106
+
107
+ cap.release()
108
+ writer.release()
109
+ cpr_logger.info(f"\n[POST-PROCESS] Final output saved to: {final_path}")
110
+
111
+
112
+ def _draw_warnings(self, frame, active_warnings):
113
+ """Draw all warnings in a single vertical drawer"""
114
+ frame_height = frame.shape[0]
115
+ frame_width = frame.shape[1]
116
+
117
+ # Calculate starting position
118
+ base_x = int(self.DRAWER_CONFIG["base_position"][0] * frame_width)
119
+ current_y = int(self.DRAWER_CONFIG["base_position"][1] * frame_height)
120
+
121
+ # Calculate spacing between warnings
122
+ y_spacing = int(self.DRAWER_CONFIG["vertical_spacing"] * frame_height)
123
+
124
+ # Draw all active warnings vertically
125
+ for warning_text in active_warnings:
126
+ if color := self.WARNING_CONFIG.get(warning_text, {}).get("color"):
127
+ # Draw warning at current position
128
+ self._draw_warning_banner(
129
+ frame=frame,
130
+ text=warning_text,
131
+ color=color,
132
+ position=(base_x, current_y))
133
+
134
+ # Move down for next warning
135
+ current_y += y_spacing
136
+
137
+ def _draw_warning_banner(self, frame, text, color, position):
138
+ """Base drawing function for warning banners"""
139
+ (text_width, text_height), _ = cv2.getTextSize(
140
+ text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2)
141
+
142
+ x, y = position
143
+ # Background rectangle
144
+ cv2.rectangle(frame,
145
+ (x - 10, y - text_height - 10),
146
+ (x + text_width + 10, y + 10),
147
+ color, -1)
148
+ # Text
149
+ cv2.putText(frame, text, (x, y),
150
+ cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
151
+
CPR/wrists_midpoint_analyzer.py CHANGED
@@ -1,6 +1,7 @@
1
  import cv2
2
  import numpy as np
3
  from CPR.keypoints import CocoKeypoints
 
4
 
5
  class WristsMidpointAnalyzer:
6
  """Analyzes and tracks wrist midpoints for rescuer"""
@@ -34,7 +35,7 @@ class WristsMidpointAnalyzer:
34
  return midpoint
35
 
36
  except Exception as e:
37
- print(f"Midpoint tracking error: {e}")
38
  return None
39
 
40
  def draw_midpoint(self, frame):
@@ -54,7 +55,7 @@ class WristsMidpointAnalyzer:
54
 
55
  return frame
56
  except Exception as e:
57
- print(f"Midpoint drawing error: {e}")
58
  return frame
59
 
60
  def reset_midpoint_history(self):
 
1
  import cv2
2
  import numpy as np
3
  from CPR.keypoints import CocoKeypoints
4
+ from CPR.logging_config import cpr_logger
5
 
6
  class WristsMidpointAnalyzer:
7
  """Analyzes and tracks wrist midpoints for rescuer"""
 
35
  return midpoint
36
 
37
  except Exception as e:
38
+ cpr_logger.error(f"Midpoint tracking error: {e}")
39
  return None
40
 
41
  def draw_midpoint(self, frame):
 
55
 
56
  return frame
57
  except Exception as e:
58
+ cpr_logger.error(f"Midpoint drawing error: {e}")
59
  return frame
60
 
61
  def reset_midpoint_history(self):
test.py → Yolo_Only.py RENAMED
File without changes
main.py CHANGED
@@ -23,13 +23,13 @@ from fastapi import WebSocket, WebSocketDisconnect
23
  import base64
24
  import cv2
25
  import time
26
- #from CPR.CPRAnalyzer import CPRAnalyzer
27
  import tempfile
28
  import matplotlib.pyplot as plt
29
  import json
30
  import asyncio
31
  import concurrent.futures
32
- from CPRRealTime.main import CPRAnalyzer
33
  from threading import Thread
34
  from starlette.responses import StreamingResponse
35
  import threading
@@ -38,10 +38,13 @@ from CPRRealTime.analysis_socket_server import AnalysisSocketServer # adjust if
38
  from CPRRealTime.logging_config import cpr_logger
39
  import logging
40
  import sys
 
41
 
42
 
43
  app = FastAPI()
44
 
 
 
45
  UPLOAD_DIR = "uploads"
46
  os.makedirs(UPLOAD_DIR, exist_ok=True)
47
 
@@ -79,30 +82,6 @@ cloudinary.config(
79
  def greet_json():
80
  return {"Hello": "World!"}
81
 
82
- # ✅ MongoDB document count route for Images collection
83
- @app.get("/count")
84
- def count_docs():
85
- collection = db["Images"]
86
- count = collection.count_documents({})
87
- return {"document_count": count}
88
-
89
- # ✅ Upload image to Cloudinary and save URL to MongoDB
90
- @app.post("/cloudinary/upload")
91
- async def upload_sample(file: UploadFile = File(...)):
92
- try:
93
- # Upload the file to Cloudinary
94
- result = cloudinary.uploader.upload(file.file, public_id=file.filename)
95
- uploaded_url = result["secure_url"]
96
-
97
- # Save image URL to MongoDB
98
- collection = db["Images"]
99
- doc = {"filename": file.filename, "url": uploaded_url}
100
- collection.insert_one(doc)
101
-
102
- return {"uploaded_url": uploaded_url}
103
- except Exception as e:
104
- return {"error": str(e)}
105
-
106
  @app.post("/predict_burn")
107
  async def predict_burn(file: UploadFile = File(...)):
108
  try:
@@ -202,19 +181,7 @@ async def segment_burn_endpoint(reference: UploadFile = File(...), patient: Uplo
202
  except Exception as e:
203
  return JSONResponse(content={"error": str(e)}, status_code=500)
204
 
205
- # ✅ Optimize and transform image URL
206
- @app.get("/cloudinary/transform")
207
- def transform_image():
208
- try:
209
- optimized_url, _ = cloudinary_url("shoes", fetch_format="auto", quality="auto")
210
- auto_crop_url, _ = cloudinary_url("shoes", width=500, height=500, crop="auto", gravity="auto")
211
- return {
212
- "optimized_url": optimized_url,
213
- "auto_crop_url": auto_crop_url
214
- }
215
- except Exception as e:
216
- return {"error": str(e)}
217
-
218
  @app.post("/classify-ecg")
219
  async def classify_ecg_endpoint(file: UploadFile = File(...)):
220
  model = joblib.load('voting_classifier.pkl')
@@ -272,6 +239,18 @@ async def diagnose_ecg(file: UploadFile = File(...)):
272
  return JSONResponse(content={"error": str(e)}, status_code=500)
273
 
274
 
 
 
 
 
 
 
 
 
 
 
 
 
275
  @app.post("/process_video")
276
  async def process_video(file: UploadFile = File(...)):
277
  if not file.content_type.startswith("video/"):
@@ -280,24 +259,84 @@ async def process_video(file: UploadFile = File(...)):
280
  print("File content type:", file.content_type)
281
  print("File filename:", file.filename)
282
 
283
- # Save uploaded file
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
  video_path = os.path.join(UPLOAD_DIR, file.filename)
285
  with open(video_path, "wb") as buffer:
286
  shutil.copyfileobj(file.file, buffer)
287
 
288
  print(f"\n[API] CPR Analysis Started on {video_path}")
289
 
290
- # Run analyzer
 
 
 
 
291
  start_time = time.time()
292
- analyzer = CPRAnalyzer(video_path)
293
- wholevideoURL, graphURL, warnings,chunks = analyzer.run_analysis() # Expects tuple return
 
 
 
 
294
 
295
- for w in warnings:
296
- filename = w['image_url'] # just the filename
297
- local_path = os.path.join("screenshots", filename)
298
- upload_result = cloudinary.uploader.upload(local_path, folder="posture_warnings")
299
- w['image_url'] = upload_result['secure_url']
 
 
 
 
 
 
 
 
300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301
 
302
  print(f"[API] CPR Analysis Completed on {video_path}")
303
  analysis_time = time.time() - start_time
@@ -306,220 +345,112 @@ async def process_video(file: UploadFile = File(...)):
306
  if wholevideoURL is None:
307
  raise HTTPException(status_code=500, detail="No chunk data was generated from the video.")
308
 
309
- # Return chunks and error regions
310
  return JSONResponse(content={
311
  "videoURL": wholevideoURL,
312
  "graphURL": graphURL,
313
  "warnings": warnings,
314
  "chunks": chunks,
315
-
316
  })
317
 
318
 
319
- @app.post("/process_image")
320
- async def process_image(file: UploadFile = File(...)):
321
- if not file.content_type.startswith("image/"):
322
- raise HTTPException(status_code=400, detail="File must be an image.")
323
-
324
- print("File content type:", file.content_type)
325
- print("File filename:", file.filename)
326
 
327
- # Save uploaded image
328
- image_path = os.path.join(UPLOAD_DIR, file.filename)
329
- with open(image_path, "wb") as buffer:
330
- shutil.copyfileobj(file.file, buffer)
331
-
332
-
333
-
334
- # Run YOLO detection on the image
335
- try:
336
- results = model(
337
- source=image_path,
338
- show=False,
339
- save=False
340
- )
341
 
342
- if not results or len(results) == 0 or results[0].keypoints is None or results[0].keypoints.xy is None:
343
- return JSONResponse(content={"message": "No keypoints detected"}, status_code=200)
 
 
 
344
 
345
- keypoints = results[0].keypoints.xy
346
- confidences = results[0].boxes.conf if results[0].boxes is not None else []
 
347
 
348
- except Exception as e:
349
- raise HTTPException(status_code=500, detail=f"YOLO processing error: {str(e)}")
350
 
351
- return JSONResponse(content={
352
- "message": "Image processed successfully",
353
- "KeypointsXY": keypoints.tolist(),
354
- "confidences": confidences.tolist()
355
- })
 
356
 
 
 
 
 
357
 
 
 
 
 
 
 
 
 
358
 
 
 
 
359
 
 
 
 
 
 
360
 
361
- @app.websocket("/ws/process_image")
362
- async def websocket_process_image(websocket: WebSocket):
363
- await websocket.accept()
364
- try:
365
- while True:
366
- # 1) Receive raw JPEG bytes from Flutter
367
- data: bytes = await websocket.receive_bytes()
368
-
369
- # 2) (Optional) Decode to BGR for any pre-display or debugging
370
- np_arr = np.frombuffer(data, np.uint8)
371
- frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
372
-
373
- # 3) Write to temp file for YOLO ingestion
374
- with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tmp:
375
- tmp.write(data)
376
- tmp_path = tmp.name
377
-
378
- # 4) Run inference without any built-in display
379
- try:
380
- results = model(source=tmp_path, show=False, save=False)
381
- except Exception as e:
382
- os.unlink(tmp_path)
383
- await websocket.send_text(json.dumps({
384
- "error": f"YOLO error: {e}"
385
- }))
386
- continue
387
-
388
- # cleanup temp file
389
- os.unlink(tmp_path)
390
-
391
- # 5) Clear any stray windows
392
- cv2.destroyAllWindows()
393
-
394
- # 6) Get the single annotated BGR image
395
- annotated: np.ndarray = results[0].plot()
396
-
397
- # 7) Show exactly one window in color
398
- cv2.imshow("Pose / Segmentation", annotated)
399
- cv2.waitKey(1) # small delay to allow window refresh
400
-
401
- # 8) Build and send JSON back to Flutter
402
- if not results or len(results) == 0 or results[0].keypoints is None:
403
- payload = {"message": "No keypoints detected"}
404
- else:
405
- keypoints = results[0].keypoints.xy.tolist()
406
- confidences = (
407
- results[0].boxes.conf.tolist()
408
- if results[0].boxes is not None
409
- else []
410
- )
411
- payload = {
412
- "message": "Image processed successfully",
413
- "KeypointsXY": keypoints,
414
- "confidences": confidences,
415
- }
416
 
417
- await websocket.send_text(json.dumps(payload))
 
418
 
419
- except WebSocketDisconnect:
420
- print("Client disconnected")
421
- finally:
422
- # Ensure the window is closed on disconnect
423
- cv2.destroyAllWindows()
 
424
 
 
 
 
425
 
 
 
 
 
 
 
 
 
 
426
 
 
 
427
 
428
- @app.websocket("/ws/process_video")
429
- async def websocket_process_video(websocket: WebSocket):
430
 
431
- await websocket.accept()
 
432
 
433
- frame_buffer = []
434
- frame_limit = 50
435
- frame_size = (640, 480) # Adjust if needed
436
- fps = 30 # Adjust if needed
437
- loop = asyncio.get_event_loop()
438
 
439
- # Progress reporting during analysis
440
- async def progress_callback(data):
441
- await websocket.send_text(json.dumps(data))
442
-
443
- def sync_callback(data):
444
- asyncio.run_coroutine_threadsafe(progress_callback(data), loop)
445
-
446
- def save_frames_to_video(frames, path):
447
- out = cv2.VideoWriter(path, cv2.VideoWriter_fourcc(*'mp4v'), fps, frame_size)
448
- for frame in frames:
449
- resized = cv2.resize(frame, frame_size)
450
- out.write(resized)
451
- out.release()
452
-
453
- def run_analysis_on_buffer(frames):
454
- try:
455
- tmp_path = "temp_video.mp4"
456
- save_frames_to_video(frames, tmp_path)
457
-
458
- # Notify: video saved
459
- asyncio.run_coroutine_threadsafe(
460
- websocket.send_text(json.dumps({
461
- "status": "info",
462
- "message": "Video saved. Starting CPR analysis..."
463
- })),
464
- loop
465
- )
466
-
467
- # Run analysis
468
- analyzer = CPRAnalyzer(video_path=tmp_path)
469
- analyzer.run_analysis(progress_callback=sync_callback)
470
-
471
- except Exception as e:
472
- asyncio.run_coroutine_threadsafe(
473
- websocket.send_text(json.dumps({"error": str(e)})),
474
- loop
475
- )
476
-
477
- try:
478
- while True:
479
- data: bytes = await websocket.receive_bytes()
480
- np_arr = np.frombuffer(data, np.uint8)
481
- frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
482
- if frame is None:
483
- continue
484
-
485
- frame_buffer.append(frame)
486
- print(f"Frame added to buffer: {len(frame_buffer)}")
487
-
488
- if len(frame_buffer) == frame_limit:
489
- # Notify Flutter that we're switching to processing
490
- await websocket.send_text(json.dumps({
491
- "status": "ready",
492
- "message": "Prepare Right CPR: First 150 frames received. Starting processing."
493
- }))
494
-
495
- # Copy and clear buffer
496
- buffer_copy = frame_buffer[:]
497
- frame_buffer.clear()
498
-
499
- # Launch background processing
500
- executor = concurrent.futures.ThreadPoolExecutor()
501
- loop.run_in_executor(executor, run_analysis_on_buffer, buffer_copy)
502
- else:
503
- # Tell Flutter to send the next frame
504
- await websocket.send_text(json.dumps({
505
- "status": "continue",
506
- "message": f"Frame {len(frame_buffer)} received. Send next."
507
- }))
508
-
509
- except WebSocketDisconnect:
510
- print("Client disconnected")
511
-
512
- except Exception as e:
513
- await websocket.send_text(json.dumps({"error": str(e)}))
514
-
515
- finally:
516
- cv2.destroyAllWindows()
517
 
518
 
519
  logger = logging.getLogger("cpr_logger")
520
  clients = set()
521
  analyzer_thread = None
522
  analysis_started = False
 
523
  socket_server: AnalysisSocketServer = None # Global reference
524
 
525
 
@@ -537,44 +468,38 @@ async def forward_results_from_queue(websocket: WebSocket, warning_queue):
537
 
538
  def run_cpr_analysis(source, requested_fps, output_path):
539
  global socket_server
540
- cpr_logger.info(f"[MAIN] CPR Analysis Started")
541
-
542
- # Configuration
543
  requested_fps = 30
544
  input_video = source
545
-
546
- # Create output directory if it doesn't exist
547
  output_dir = r"D:\BackendGp\Deploy_El7a2ny_Application\CPRRealTime\outputs"
548
  os.makedirs(output_dir, exist_ok=True)
549
-
550
- # Set output paths using original name
551
- video_output_path = os.path.join(output_dir, f"output.mp4")
552
- plot_output_path = os.path.join(output_dir, f"output.png")
553
-
554
- # Log paths for verification
555
- cpr_logger.info(f"[CONFIG] Input video: {input_video}")
556
- cpr_logger.info(f"[CONFIG] Video output: {video_output_path}")
557
- cpr_logger.info(f"[CONFIG] Plot output: {plot_output_path}")
558
-
559
- # Initialize and run analyzer
560
- initialization_start_time = time.time()
561
- analyzer = CPRAnalyzer(input_video, video_output_path, plot_output_path, requested_fps)
562
- socket_server = analyzer.socket_server # <- get reference to its queue
563
 
564
- # Set plot output path in the analyzer
 
 
 
 
 
 
 
 
 
565
  analyzer.plot_output_path = plot_output_path
566
-
567
- initialization_end_time = time.time()
568
- initialization_elapsed_time = initialization_end_time - initialization_start_time
569
- cpr_logger.info(f"[TIMING] Initialization time: {initialization_elapsed_time:.2f}s")
570
-
571
  try:
572
  analyzer.run_analysis()
573
  finally:
574
- analyzer.socket_server.stop_server()
 
 
575
 
576
 
577
- @app.websocket("/ws/test")
578
  async def websocket_analysis(websocket: WebSocket):
579
  global analyzer_thread, analysis_started, socket_server
580
 
@@ -582,37 +507,59 @@ async def websocket_analysis(websocket: WebSocket):
582
  clients.add(websocket)
583
  logger.info("[WebSocket] Flutter connected")
584
 
585
- # Start the analyzer only once
586
- if not analysis_started:
587
- source = "http://192.168.137.33:8080/video" # Replace with your video source
588
- requested_fps = 30
589
- output_path = r"D:\CPR\End to End\Code Refactor\output\output.mp4"
590
-
591
- analyzer_thread = threading.Thread(
592
- target=run_cpr_analysis,
593
- args=(source, requested_fps, output_path),
594
- daemon=True
595
- )
596
- analyzer_thread.start()
597
- analysis_started = True
598
-
599
- logger.info("[WebSocket] Analysis thread started")
600
 
601
- # Wait until socket server is initialized
602
- while socket_server is None or not socket_server.warning_queue:
603
- await asyncio.sleep(0.1)
604
 
605
- # Start async task to stream warnings from queue to Flutter
606
- forward_task = asyncio.create_task(forward_results_from_queue(websocket, socket_server.warning_queue))
 
 
607
 
608
  try:
609
  while True:
610
- # Keep connection alive (optional)
611
- await asyncio.sleep(1)
612
  except WebSocketDisconnect:
613
  logger.warning("[WebSocket] Client disconnected")
614
  forward_task.cancel()
615
  finally:
616
  clients.discard(websocket)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
617
 
618
-
 
 
23
  import base64
24
  import cv2
25
  import time
26
+ from CPR.CPRAnalyzer import CPRAnalyzer as OfflineAnalyzer
27
  import tempfile
28
  import matplotlib.pyplot as plt
29
  import json
30
  import asyncio
31
  import concurrent.futures
32
+ from CPRRealTime.main import CPRAnalyzer as RealtimeAnalyzer
33
  from threading import Thread
34
  from starlette.responses import StreamingResponse
35
  import threading
 
38
  from CPRRealTime.logging_config import cpr_logger
39
  import logging
40
  import sys
41
+ import re
42
 
43
 
44
  app = FastAPI()
45
 
46
+ SCREENSHOTS_DIR = "screenshots" # Folder containing screenshots to upload
47
+ OUTPUT_DIR = "Output" # Folder containing the .mp4 video and graph .png
48
  UPLOAD_DIR = "uploads"
49
  os.makedirs(UPLOAD_DIR, exist_ok=True)
50
 
 
82
  def greet_json():
83
  return {"Hello": "World!"}
84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  @app.post("/predict_burn")
86
  async def predict_burn(file: UploadFile = File(...)):
87
  try:
 
181
  except Exception as e:
182
  return JSONResponse(content={"error": str(e)}, status_code=500)
183
 
184
+
 
 
 
 
 
 
 
 
 
 
 
 
185
  @app.post("/classify-ecg")
186
  async def classify_ecg_endpoint(file: UploadFile = File(...)):
187
  model = joblib.load('voting_classifier.pkl')
 
239
  return JSONResponse(content={"error": str(e)}, status_code=500)
240
 
241
 
242
+ def clean_warning_name(filename: str) -> str:
243
+ """
244
+ Remove frame index and underscores from filename base
245
+ E.g. "posture_001.png" -> "posture"
246
+ """
247
+ name, _ = os.path.splitext(filename)
248
+ # Remove trailing underscore + digits
249
+ cleaned = re.sub(r'_\d+$', '', name)
250
+ # Remove all underscores in the name for description
251
+ cleaned_desc = cleaned.replace('_', ' ')
252
+ return cleaned, cleaned_desc
253
+
254
  @app.post("/process_video")
255
  async def process_video(file: UploadFile = File(...)):
256
  if not file.content_type.startswith("video/"):
 
259
  print("File content type:", file.content_type)
260
  print("File filename:", file.filename)
261
 
262
+ # Prepare directories
263
+ os.makedirs(UPLOAD_DIR, exist_ok=True)
264
+ os.makedirs(SCREENSHOTS_DIR, exist_ok=True)
265
+ os.makedirs(OUTPUT_DIR, exist_ok=True)
266
+
267
+ folders = ["screenshots", "uploads", "Output"]
268
+
269
+ for folder in folders:
270
+ if os.path.exists(folder):
271
+ for filename in os.listdir(folder):
272
+ file_path = os.path.join(folder, filename)
273
+ if os.path.isfile(file_path):
274
+ os.remove(file_path)
275
+
276
+ # Save uploaded video file
277
  video_path = os.path.join(UPLOAD_DIR, file.filename)
278
  with open(video_path, "wb") as buffer:
279
  shutil.copyfileobj(file.file, buffer)
280
 
281
  print(f"\n[API] CPR Analysis Started on {video_path}")
282
 
283
+ # Prepare output paths for the analyzer
284
+ video_output_path = os.path.join(OUTPUT_DIR, "Myoutput.mp4")
285
+ plot_output_path = os.path.join(OUTPUT_DIR, "Myoutput.png")
286
+
287
+ # Initialize analyzer with input video and output paths
288
  start_time = time.time()
289
+ analyzer = OfflineAnalyzer(video_path, video_output_path, plot_output_path, requested_fps=30)
290
+
291
+ # Run the analysis (choose your method)
292
+ chunks = analyzer.run_analysis_video()
293
+
294
+ warnings = [] # Start empty list
295
 
296
+ # Upload screenshots and build warnings list with descriptions and URLs
297
+ if os.path.exists(SCREENSHOTS_DIR):
298
+ for filename in os.listdir(SCREENSHOTS_DIR):
299
+ if filename.lower().endswith(('.png', '.jpg', '.jpeg')):
300
+ local_path = os.path.join(SCREENSHOTS_DIR, filename)
301
+ cleaned_name, description = clean_warning_name(filename)
302
+
303
+ upload_result = cloudinary.uploader.upload(
304
+ local_path,
305
+ folder="posture_warnings",
306
+ public_id=cleaned_name,
307
+ overwrite=True
308
+ )
309
 
310
+ # Add new warning with image_url and description
311
+ warnings.append({
312
+ "image_url": upload_result['secure_url'],
313
+ "description": description
314
+ })
315
+
316
+ video_path = "Output/Myoutput_final.mp4"
317
+
318
+ if os.path.isfile(video_path):
319
+ upload_result = cloudinary.uploader.upload_large(
320
+ video_path,
321
+ resource_type="video",
322
+ folder="output_videos",
323
+ public_id="Myoutput_final",
324
+ overwrite=True
325
+ )
326
+ wholevideoURL = upload_result['secure_url']
327
+ else:
328
+ wholevideoURL = None
329
+
330
+ # Upload graph output
331
+ graphURL = None
332
+ if os.path.isfile(plot_output_path):
333
+ upload_graph_result = cloudinary.uploader.upload(
334
+ plot_output_path,
335
+ folder="output_graphs",
336
+ public_id=os.path.splitext(os.path.basename(plot_output_path))[0],
337
+ overwrite=True
338
+ )
339
+ graphURL = upload_graph_result['secure_url']
340
 
341
  print(f"[API] CPR Analysis Completed on {video_path}")
342
  analysis_time = time.time() - start_time
 
345
  if wholevideoURL is None:
346
  raise HTTPException(status_code=500, detail="No chunk data was generated from the video.")
347
 
 
348
  return JSONResponse(content={
349
  "videoURL": wholevideoURL,
350
  "graphURL": graphURL,
351
  "warnings": warnings,
352
  "chunks": chunks,
 
353
  })
354
 
355
 
356
+ # @app.websocket("/ws/process_video")
357
+ # async def websocket_process_video(websocket: WebSocket):
 
 
 
 
 
358
 
359
+ # await websocket.accept()
 
 
 
 
 
 
 
 
 
 
 
 
 
360
 
361
+ # frame_buffer = []
362
+ # frame_limit = 50
363
+ # frame_size = (640, 480) # Adjust if needed
364
+ # fps = 30 # Adjust if needed
365
+ # loop = asyncio.get_event_loop()
366
 
367
+ # # Progress reporting during analysis
368
+ # async def progress_callback(data):
369
+ # await websocket.send_text(json.dumps(data))
370
 
371
+ # def sync_callback(data):
372
+ # asyncio.run_coroutine_threadsafe(progress_callback(data), loop)
373
 
374
+ # def save_frames_to_video(frames, path):
375
+ # out = cv2.VideoWriter(path, cv2.VideoWriter_fourcc(*'mp4v'), fps, frame_size)
376
+ # for frame in frames:
377
+ # resized = cv2.resize(frame, frame_size)
378
+ # out.write(resized)
379
+ # out.release()
380
 
381
+ # def run_analysis_on_buffer(frames):
382
+ # try:
383
+ # tmp_path = "temp_video.mp4"
384
+ # save_frames_to_video(frames, tmp_path)
385
 
386
+ # # Notify: video saved
387
+ # asyncio.run_coroutine_threadsafe(
388
+ # websocket.send_text(json.dumps({
389
+ # "status": "info",
390
+ # "message": "Video saved. Starting CPR analysis..."
391
+ # })),
392
+ # loop
393
+ # )
394
 
395
+ # # Run analysis
396
+ # analyzer = CPRAnalyzer(video_path=tmp_path)
397
+ # analyzer.run_analysis(progress_callback=sync_callback)
398
 
399
+ # except Exception as e:
400
+ # asyncio.run_coroutine_threadsafe(
401
+ # websocket.send_text(json.dumps({"error": str(e)})),
402
+ # loop
403
+ # )
404
 
405
+ # try:
406
+ # while True:
407
+ # data: bytes = await websocket.receive_bytes()
408
+ # np_arr = np.frombuffer(data, np.uint8)
409
+ # frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
410
+ # if frame is None:
411
+ # continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
412
 
413
+ # frame_buffer.append(frame)
414
+ # print(f"Frame added to buffer: {len(frame_buffer)}")
415
 
416
+ # if len(frame_buffer) == frame_limit:
417
+ # # Notify Flutter that we're switching to processing
418
+ # await websocket.send_text(json.dumps({
419
+ # "status": "ready",
420
+ # "message": "Prepare Right CPR: First 150 frames received. Starting processing."
421
+ # }))
422
 
423
+ # # Copy and clear buffer
424
+ # buffer_copy = frame_buffer[:]
425
+ # frame_buffer.clear()
426
 
427
+ # # Launch background processing
428
+ # executor = concurrent.futures.ThreadPoolExecutor()
429
+ # loop.run_in_executor(executor, run_analysis_on_buffer, buffer_copy)
430
+ # else:
431
+ # # Tell Flutter to send the next frame
432
+ # await websocket.send_text(json.dumps({
433
+ # "status": "continue",
434
+ # "message": f"Frame {len(frame_buffer)} received. Send next."
435
+ # }))
436
 
437
+ # except WebSocketDisconnect:
438
+ # print("Client disconnected")
439
 
440
+ # except Exception as e:
441
+ # await websocket.send_text(json.dumps({"error": str(e)}))
442
 
443
+ # finally:
444
+ # cv2.destroyAllWindows()
445
 
 
 
 
 
 
446
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
447
 
448
 
449
  logger = logging.getLogger("cpr_logger")
450
  clients = set()
451
  analyzer_thread = None
452
  analysis_started = False
453
+ analyzer_lock = threading.Lock()
454
  socket_server: AnalysisSocketServer = None # Global reference
455
 
456
 
 
468
 
469
  def run_cpr_analysis(source, requested_fps, output_path):
470
  global socket_server
471
+ logger.info(f"[MAIN] CPR Analysis Started")
472
+
 
473
  requested_fps = 30
474
  input_video = source
475
+
 
476
  output_dir = r"D:\BackendGp\Deploy_El7a2ny_Application\CPRRealTime\outputs"
477
  os.makedirs(output_dir, exist_ok=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
478
 
479
+ video_output_path = os.path.join(output_dir, "output.mp4")
480
+ plot_output_path = os.path.join(output_dir, "output.png")
481
+
482
+ logger.info(f"[CONFIG] Input video: {input_video}")
483
+ logger.info(f"[CONFIG] Video output: {video_output_path}")
484
+ logger.info(f"[CONFIG] Plot output: {plot_output_path}")
485
+
486
+ initialization_start_time = time.time()
487
+ analyzer = RealtimeAnalyzer(input_video, video_output_path, plot_output_path, requested_fps)
488
+ socket_server = analyzer.socket_server
489
  analyzer.plot_output_path = plot_output_path
490
+
491
+ elapsed_time = time.time() - initialization_start_time
492
+ logger.info(f"[TIMING] Initialization time: {elapsed_time:.2f}s")
493
+
 
494
  try:
495
  analyzer.run_analysis()
496
  finally:
497
+ if analyzer.socket_server:
498
+ analyzer.socket_server.stop_server()
499
+ logger.info("[MAIN] Analyzer stopped")
500
 
501
 
502
+ @app.websocket("/ws/real")
503
  async def websocket_analysis(websocket: WebSocket):
504
  global analyzer_thread, analysis_started, socket_server
505
 
 
507
  clients.add(websocket)
508
  logger.info("[WebSocket] Flutter connected")
509
 
510
+ # Ensure analyzer starts only once using a thread-safe lock
511
+ with analyzer_lock:
512
+ if not analysis_started:
513
+ source = "http://192.168.1.16:8080/video"
514
+ requested_fps = 30
515
+ output_path = r"D:\CPR\End to End\Code Refactor\output\output.mp4"
516
+
517
+ analyzer_thread = threading.Thread(
518
+ target=run_cpr_analysis,
519
+ args=(source, requested_fps, output_path),
520
+ daemon=True
521
+ )
522
+ analyzer_thread.start()
523
+ analysis_started = True
524
+ logger.info("[WebSocket] Analysis thread started")
525
 
526
+ # Wait until the socket server and queue are initialized
527
+ while socket_server is None or socket_server.warning_queue is None:
528
+ await asyncio.sleep(0.1)
529
 
530
+ # Start async task to stream data to client
531
+ forward_task = asyncio.create_task(
532
+ forward_results_from_queue(websocket, socket_server.warning_queue)
533
+ )
534
 
535
  try:
536
  while True:
537
+ await asyncio.sleep(1) # Keep alive
 
538
  except WebSocketDisconnect:
539
  logger.warning("[WebSocket] Client disconnected")
540
  forward_task.cancel()
541
  finally:
542
  clients.discard(websocket)
543
+ logger.info(f"[WebSocket] Active clients: {len(clients)}")
544
+
545
+ # Optional: stop analysis if no clients remain
546
+ if not clients and socket_server:
547
+ logger.info("[WebSocket] No clients left. Stopping analyzer.")
548
+ socket_server.stop_server()
549
+ analysis_started = False
550
+ socket_server = None
551
+
552
+
553
+ import signal
554
+
555
+ def shutdown_handler(signum, frame):
556
+ logger.info("Received shutdown signal")
557
+ if socket_server:
558
+ try:
559
+ socket_server.stop_server()
560
+ except Exception as e:
561
+ logger.warning(f"Error during socket server shutdown: {e}")
562
+ os._exit(0)
563
 
564
+ signal.signal(signal.SIGINT, shutdown_handler)
565
+ signal.signal(signal.SIGTERM, shutdown_handler)