bsmith3715 commited on
Commit
2d486e8
·
verified ·
1 Parent(s): 4e60f8a

Update pilates_evaluator.py

Browse files
Files changed (1) hide show
  1. pilates_evaluator.py +227 -225
pilates_evaluator.py CHANGED
@@ -1,226 +1,228 @@
1
- import cv2
2
- import numpy as np
3
- import json
4
- from datetime import datetime
5
- import matplotlib.pyplot as plt
6
- from pathlib import Path
7
- import os
8
- import urllib.request
9
-
10
- class PilatesVideoEvaluator:
11
- def __init__(self):
12
- # Initialize OpenCV pose detection
13
- self.BODY_PARTS = {
14
- "Neck": 0, "RShoulder": 1, "RElbow": 2, "RWrist": 3,
15
- "LShoulder": 4, "LElbow": 5, "LWrist": 6, "RHip": 7, "RKnee": 8,
16
- "RAnkle": 9, "LHip": 10, "LKnee": 11, "LAnkle": 12
17
- }
18
-
19
- # Download the model if it doesn't exist
20
- if not os.path.exists('pose_model.caffemodel') or not os.path.exists('pose_deploy.prototxt'):
21
- print("Downloading pose estimation model...")
22
- model_url = "https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel"
23
- proto_url = "https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt"
24
-
25
- urllib.request.urlretrieve(model_url, "pose_model.caffemodel")
26
- urllib.request.urlretrieve(proto_url, "pose_deploy.prototxt")
27
- print("Model downloaded successfully!")
28
-
29
- # Load the model
30
- self.net = cv2.dnn.readNetFromCaffe("pose_deploy.prototxt", "pose_model.caffemodel")
31
-
32
- # Evaluation metrics
33
- self.metrics = {
34
- 'total_frames': 0,
35
- 'pose_detected_frames': 0,
36
- 'movement_consistency': [],
37
- 'balance_scores': [],
38
- 'posture_alignment': [],
39
- 'video_quality_score': 0,
40
- 'exercise_duration': 0,
41
- 'detected_exercises': []
42
- }
43
-
44
- def analyze_posture(self, frame):
45
- """Analyze posture using OpenCV pose estimation"""
46
- height, width = frame.shape[:2]
47
- blob = cv2.dnn.blobFromImage(frame, 1.0/255, (368, 368), (0, 0, 0), swapRB=False, crop=False)
48
- self.net.setInput(blob)
49
- output = self.net.forward()
50
-
51
- # Process the output to get keypoints
52
- points = []
53
- for i in range(len(self.BODY_PARTS)):
54
- # Confidence map for the current keypoint
55
- probMap = output[0, i, :, :]
56
- probMap = cv2.resize(probMap, (width, height))
57
-
58
- # Find global maxima of the probMap
59
- minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
60
-
61
- if prob > 0.1: # Confidence threshold
62
- points.append((int(point[0]), int(point[1])))
63
- else:
64
- points.append(None)
65
-
66
- return points
67
-
68
- def detect_exercise_type(self, points):
69
- """Detect exercise type based on keypoint positions"""
70
- if not points or len(points) < 18:
71
- return "Unknown"
72
-
73
- # Example: Detect plank position
74
- if (points[self.BODY_PARTS["RShoulder"]] and points[self.BODY_PARTS["RElbow"]] and
75
- points[self.BODY_PARTS["LShoulder"]] and points[self.BODY_PARTS["LElbow"]]):
76
-
77
- r_shoulder = points[self.BODY_PARTS["RShoulder"]]
78
- r_elbow = points[self.BODY_PARTS["RElbow"]]
79
- l_shoulder = points[self.BODY_PARTS["LShoulder"]]
80
- l_elbow = points[self.BODY_PARTS["LElbow"]]
81
-
82
- # Check if arms are straight (plank position)
83
- r_arm_angle = self.calculate_angle(r_shoulder, r_elbow)
84
- l_arm_angle = self.calculate_angle(l_shoulder, l_elbow)
85
-
86
- if 150 < r_arm_angle < 180 and 150 < l_arm_angle < 180:
87
- return "Plank"
88
-
89
- return "Unknown"
90
-
91
- def calculate_angle(self, point1, point2):
92
- """Calculate angle between two points"""
93
- if not point1 or not point2:
94
- return 0
95
- return np.degrees(np.arctan2(point2[1] - point1[1], point2[0] - point1[0]))
96
-
97
- def process_video(self, video_path):
98
- """Process video and analyze exercises"""
99
- cap = cv2.VideoCapture(video_path)
100
- if not cap.isOpened():
101
- raise ValueError("Could not open video file")
102
-
103
- while cap.isOpened():
104
- ret, frame = cap.read()
105
- if not ret:
106
- break
107
-
108
- self.metrics['total_frames'] += 1
109
-
110
- # Analyze posture
111
- points = self.analyze_posture(frame)
112
- if points:
113
- self.metrics['pose_detected_frames'] += 1
114
-
115
- # Detect exercise type
116
- exercise_type = self.detect_exercise_type(points)
117
- if exercise_type != "Unknown":
118
- self.metrics['detected_exercises'].append(exercise_type)
119
-
120
- # Calculate metrics
121
- self.metrics['movement_consistency'].append(self.calculate_movement_consistency(points))
122
- self.metrics['balance_scores'].append(self.calculate_balance_score(points))
123
- self.metrics['posture_alignment'].append(self.calculate_posture_alignment(points))
124
-
125
- cap.release()
126
- self.calculate_final_metrics()
127
-
128
- def calculate_movement_consistency(self, points):
129
- """Calculate movement consistency score"""
130
- # Implement movement consistency calculation
131
- return 0.8 # Placeholder
132
-
133
- def calculate_balance_score(self, points):
134
- """Calculate balance score"""
135
- # Implement balance score calculation
136
- return 0.7 # Placeholder
137
-
138
- def calculate_posture_alignment(self, points):
139
- """Calculate posture alignment score"""
140
- # Implement posture alignment calculation
141
- return 0.9 # Placeholder
142
-
143
- def calculate_final_metrics(self):
144
- """Calculate final metrics"""
145
- if self.metrics['total_frames'] > 0:
146
- self.metrics['video_quality_score'] = (
147
- self.metrics['pose_detected_frames'] / self.metrics['total_frames']
148
- ) * 100
149
-
150
- def generate_report(self, output_path):
151
- """Generate evaluation report"""
152
- report = {
153
- 'timestamp': datetime.now().isoformat(),
154
- 'metrics': self.metrics,
155
- 'summary': {
156
- 'video_quality': f"{self.metrics['video_quality_score']:.2f}%",
157
- 'detected_exercises': list(set(self.metrics['detected_exercises'])),
158
- 'average_movement_consistency': np.mean(self.metrics['movement_consistency']),
159
- 'average_balance_score': np.mean(self.metrics['balance_scores']),
160
- 'average_posture_alignment': np.mean(self.metrics['posture_alignment'])
161
- }
162
- }
163
-
164
- with open(output_path, 'w') as f:
165
- json.dump(report, f, indent=4)
166
-
167
- def visualize_results(self, output_path):
168
- """Visualize evaluation results"""
169
- plt.figure(figsize=(12, 8))
170
-
171
- # Plot metrics over time
172
- plt.subplot(2, 2, 1)
173
- plt.plot(self.metrics['movement_consistency'], label='Movement Consistency')
174
- plt.title('Movement Consistency Over Time')
175
- plt.legend()
176
-
177
- plt.subplot(2, 2, 2)
178
- plt.plot(self.metrics['balance_scores'], label='Balance Score')
179
- plt.title('Balance Score Over Time')
180
- plt.legend()
181
-
182
- plt.subplot(2, 2, 3)
183
- plt.plot(self.metrics['posture_alignment'], label='Posture Alignment')
184
- plt.title('Posture Alignment Over Time')
185
- plt.legend()
186
-
187
- plt.tight_layout()
188
- plt.savefig(output_path)
189
- plt.close()
190
-
191
- def main():
192
- """Example usage of the Pilates Video Evaluator"""
193
- evaluator = PilatesVideoEvaluator()
194
-
195
- # Replace with your video path
196
- video_path = "pilates_workout.mp4"
197
- output_video_path = "analyzed_pilates_workout.mp4"
198
- report_path = "pilates_evaluation_report.json"
199
-
200
- try:
201
- # Process the video
202
- print("Starting video analysis...")
203
- evaluator.process_video(video_path)
204
-
205
- # Print report
206
- print("\n" + "="*50)
207
- print("PILATES VIDEO EVALUATION REPORT")
208
- print("="*50)
209
-
210
- print(f"Video Quality: {evaluator.metrics['video_quality_score']:.2f}%")
211
- print(f"Detected Exercises: {', '.join(evaluator.metrics['detected_exercises'])}")
212
- print(f"Average Movement Consistency: {evaluator.metrics['average_movement_consistency']:.2f}")
213
- print(f"Average Balance Score: {evaluator.metrics['average_balance_score']:.2f}")
214
- print(f"Average Posture Alignment: {evaluator.metrics['average_posture_alignment']:.2f}")
215
-
216
- # Save report and visualization
217
- evaluator.generate_report(report_path)
218
- evaluator.visualize_results(output_video_path)
219
-
220
- except Exception as e:
221
- print(f"Error processing video: {e}")
222
- print("Make sure you have the required dependencies installed:")
223
- print("pip install opencv-python numpy matplotlib")
224
-
225
- if __name__ == "__main__":
 
 
226
  main()
 
1
+ import cv2
2
+ import numpy as np
3
+ import json
4
+ from datetime import datetime
5
+ import matplotlib.pyplot as plt
6
+ from pathlib import Path
7
+ import os
8
+ import urllib.request
9
+
10
+ class PilatesVideoEvaluator:
11
+ def __init__(self):
12
+ # Initialize OpenCV pose detection
13
+ self.BODY_PARTS = {
14
+ "Neck": 0, "RShoulder": 1, "RElbow": 2, "RWrist": 3,
15
+ "LShoulder": 4, "LElbow": 5, "LWrist": 6, "RHip": 7, "RKnee": 8,
16
+ "RAnkle": 9, "LHip": 10, "LKnee": 11, "LAnkle": 12
17
+ }
18
+
19
+ # Download the model if it doesn't exist
20
+ if not os.path.exists('pose_model.caffemodel') or not os.path.exists('pose_deploy.prototxt'):
21
+ print("Downloading pose estimation model...")
22
+ model_url = "https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/models/pose/coco/pose_iter_440000.caffemodel?raw=true"
23
+ proto_url = "https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/openpose_pose_coco.prototxt"
24
+
25
+
26
+ urllib.request.urlretrieve(model_url, "pose_model.caffemodel")
27
+ urllib.request.urlretrieve(proto_url, "pose_deploy.prototxt")
28
+
29
+ print("Model downloaded successfully!")
30
+
31
+ # Load the model
32
+ self.net = cv2.dnn.readNetFromCaffe("pose_deploy.prototxt", "pose_model.caffemodel")
33
+
34
+ # Evaluation metrics
35
+ self.metrics = {
36
+ 'total_frames': 0,
37
+ 'pose_detected_frames': 0,
38
+ 'movement_consistency': [],
39
+ 'balance_scores': [],
40
+ 'posture_alignment': [],
41
+ 'video_quality_score': 0,
42
+ 'exercise_duration': 0,
43
+ 'detected_exercises': []
44
+ }
45
+
46
+ def analyze_posture(self, frame):
47
+ """Analyze posture using OpenCV pose estimation"""
48
+ height, width = frame.shape[:2]
49
+ blob = cv2.dnn.blobFromImage(frame, 1.0/255, (368, 368), (0, 0, 0), swapRB=False, crop=False)
50
+ self.net.setInput(blob)
51
+ output = self.net.forward()
52
+
53
+ # Process the output to get keypoints
54
+ points = []
55
+ for i in range(len(self.BODY_PARTS)):
56
+ # Confidence map for the current keypoint
57
+ probMap = output[0, i, :, :]
58
+ probMap = cv2.resize(probMap, (width, height))
59
+
60
+ # Find global maxima of the probMap
61
+ minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
62
+
63
+ if prob > 0.1: # Confidence threshold
64
+ points.append((int(point[0]), int(point[1])))
65
+ else:
66
+ points.append(None)
67
+
68
+ return points
69
+
70
+ def detect_exercise_type(self, points):
71
+ """Detect exercise type based on keypoint positions"""
72
+ if not points or len(points) < 18:
73
+ return "Unknown"
74
+
75
+ # Example: Detect plank position
76
+ if (points[self.BODY_PARTS["RShoulder"]] and points[self.BODY_PARTS["RElbow"]] and
77
+ points[self.BODY_PARTS["LShoulder"]] and points[self.BODY_PARTS["LElbow"]]):
78
+
79
+ r_shoulder = points[self.BODY_PARTS["RShoulder"]]
80
+ r_elbow = points[self.BODY_PARTS["RElbow"]]
81
+ l_shoulder = points[self.BODY_PARTS["LShoulder"]]
82
+ l_elbow = points[self.BODY_PARTS["LElbow"]]
83
+
84
+ # Check if arms are straight (plank position)
85
+ r_arm_angle = self.calculate_angle(r_shoulder, r_elbow)
86
+ l_arm_angle = self.calculate_angle(l_shoulder, l_elbow)
87
+
88
+ if 150 < r_arm_angle < 180 and 150 < l_arm_angle < 180:
89
+ return "Plank"
90
+
91
+ return "Unknown"
92
+
93
+ def calculate_angle(self, point1, point2):
94
+ """Calculate angle between two points"""
95
+ if not point1 or not point2:
96
+ return 0
97
+ return np.degrees(np.arctan2(point2[1] - point1[1], point2[0] - point1[0]))
98
+
99
+ def process_video(self, video_path):
100
+ """Process video and analyze exercises"""
101
+ cap = cv2.VideoCapture(video_path)
102
+ if not cap.isOpened():
103
+ raise ValueError("Could not open video file")
104
+
105
+ while cap.isOpened():
106
+ ret, frame = cap.read()
107
+ if not ret:
108
+ break
109
+
110
+ self.metrics['total_frames'] += 1
111
+
112
+ # Analyze posture
113
+ points = self.analyze_posture(frame)
114
+ if points:
115
+ self.metrics['pose_detected_frames'] += 1
116
+
117
+ # Detect exercise type
118
+ exercise_type = self.detect_exercise_type(points)
119
+ if exercise_type != "Unknown":
120
+ self.metrics['detected_exercises'].append(exercise_type)
121
+
122
+ # Calculate metrics
123
+ self.metrics['movement_consistency'].append(self.calculate_movement_consistency(points))
124
+ self.metrics['balance_scores'].append(self.calculate_balance_score(points))
125
+ self.metrics['posture_alignment'].append(self.calculate_posture_alignment(points))
126
+
127
+ cap.release()
128
+ self.calculate_final_metrics()
129
+
130
+ def calculate_movement_consistency(self, points):
131
+ """Calculate movement consistency score"""
132
+ # Implement movement consistency calculation
133
+ return 0.8 # Placeholder
134
+
135
+ def calculate_balance_score(self, points):
136
+ """Calculate balance score"""
137
+ # Implement balance score calculation
138
+ return 0.7 # Placeholder
139
+
140
+ def calculate_posture_alignment(self, points):
141
+ """Calculate posture alignment score"""
142
+ # Implement posture alignment calculation
143
+ return 0.9 # Placeholder
144
+
145
+ def calculate_final_metrics(self):
146
+ """Calculate final metrics"""
147
+ if self.metrics['total_frames'] > 0:
148
+ self.metrics['video_quality_score'] = (
149
+ self.metrics['pose_detected_frames'] / self.metrics['total_frames']
150
+ ) * 100
151
+
152
+ def generate_report(self, output_path):
153
+ """Generate evaluation report"""
154
+ report = {
155
+ 'timestamp': datetime.now().isoformat(),
156
+ 'metrics': self.metrics,
157
+ 'summary': {
158
+ 'video_quality': f"{self.metrics['video_quality_score']:.2f}%",
159
+ 'detected_exercises': list(set(self.metrics['detected_exercises'])),
160
+ 'average_movement_consistency': np.mean(self.metrics['movement_consistency']),
161
+ 'average_balance_score': np.mean(self.metrics['balance_scores']),
162
+ 'average_posture_alignment': np.mean(self.metrics['posture_alignment'])
163
+ }
164
+ }
165
+
166
+ with open(output_path, 'w') as f:
167
+ json.dump(report, f, indent=4)
168
+
169
+ def visualize_results(self, output_path):
170
+ """Visualize evaluation results"""
171
+ plt.figure(figsize=(12, 8))
172
+
173
+ # Plot metrics over time
174
+ plt.subplot(2, 2, 1)
175
+ plt.plot(self.metrics['movement_consistency'], label='Movement Consistency')
176
+ plt.title('Movement Consistency Over Time')
177
+ plt.legend()
178
+
179
+ plt.subplot(2, 2, 2)
180
+ plt.plot(self.metrics['balance_scores'], label='Balance Score')
181
+ plt.title('Balance Score Over Time')
182
+ plt.legend()
183
+
184
+ plt.subplot(2, 2, 3)
185
+ plt.plot(self.metrics['posture_alignment'], label='Posture Alignment')
186
+ plt.title('Posture Alignment Over Time')
187
+ plt.legend()
188
+
189
+ plt.tight_layout()
190
+ plt.savefig(output_path)
191
+ plt.close()
192
+
193
+ def main():
194
+ """Example usage of the Pilates Video Evaluator"""
195
+ evaluator = PilatesVideoEvaluator()
196
+
197
+ # Replace with your video path
198
+ video_path = "pilates_workout.mp4"
199
+ output_video_path = "analyzed_pilates_workout.mp4"
200
+ report_path = "pilates_evaluation_report.json"
201
+
202
+ try:
203
+ # Process the video
204
+ print("Starting video analysis...")
205
+ evaluator.process_video(video_path)
206
+
207
+ # Print report
208
+ print("\n" + "="*50)
209
+ print("PILATES VIDEO EVALUATION REPORT")
210
+ print("="*50)
211
+
212
+ print(f"Video Quality: {evaluator.metrics['video_quality_score']:.2f}%")
213
+ print(f"Detected Exercises: {', '.join(evaluator.metrics['detected_exercises'])}")
214
+ print(f"Average Movement Consistency: {evaluator.metrics['average_movement_consistency']:.2f}")
215
+ print(f"Average Balance Score: {evaluator.metrics['average_balance_score']:.2f}")
216
+ print(f"Average Posture Alignment: {evaluator.metrics['average_posture_alignment']:.2f}")
217
+
218
+ # Save report and visualization
219
+ evaluator.generate_report(report_path)
220
+ evaluator.visualize_results(output_video_path)
221
+
222
+ except Exception as e:
223
+ print(f"Error processing video: {e}")
224
+ print("Make sure you have the required dependencies installed:")
225
+ print("pip install opencv-python numpy matplotlib")
226
+
227
+ if __name__ == "__main__":
228
  main()