zlf18 commited on
Commit
4af1644
·
verified ·
1 Parent(s): 89e917e

Upload 10 files

Browse files
fitness_coach/body_parts.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Body Part Groupings and Joint Metadata
3
+ Defines how 17-joint skeleton maps to body part groups for scoring
4
+ """
5
+
6
+ import numpy as np
7
+
8
+ # Joint indices for 17-joint Human3.6M format
9
+ # 0: Hip, 1-3: Right leg, 4-6: Left leg, 7-10: Spine/Head, 11-13: Left arm, 14-16: Right arm
10
+ JOINT_NAMES = [
11
+ 'Hip', # 0
12
+ 'RightHip', # 1
13
+ 'RightKnee', # 2
14
+ 'RightAnkle', # 3
15
+ 'LeftHip', # 4
16
+ 'LeftKnee', # 5
17
+ 'LeftAnkle', # 6
18
+ 'Spine', # 7
19
+ 'Thorax', # 8
20
+ 'Neck', # 9
21
+ 'Head', # 10
22
+ 'LeftShoulder', # 11
23
+ 'LeftElbow', # 12
24
+ 'LeftWrist', # 13
25
+ 'RightShoulder', # 14
26
+ 'RightElbow', # 15
27
+ 'RightWrist', # 16
28
+ ]
29
+
30
+ # Body part groupings for scoring
31
+ JOINT_GROUPS = {
32
+ 'right_arm': [14, 15, 16], # Right shoulder, elbow, wrist
33
+ 'left_arm': [11, 12, 13], # Left shoulder, elbow, wrist
34
+ 'right_leg': [1, 2, 3], # Right hip, knee, ankle
35
+ 'left_leg': [4, 5, 6], # Left hip, knee, ankle
36
+ 'torso': [0, 7, 8, 9, 10], # Hip, spine, thorax, neck, head
37
+ 'core': [0, 7, 8], # Hip, spine, thorax (for core exercises like push-ups)
38
+ 'upper_body': [7, 8, 9, 10, 11, 12, 13, 14, 15, 16], # Everything above hip
39
+ 'lower_body': [0, 1, 2, 3, 4, 5, 6], # Everything below and including hip
40
+ }
41
+
42
+ # Noise levels per joint type (as fraction of body scale)
43
+ # Different joints have different acceptable variation
44
+ JOINT_NOISE_LEVELS = {
45
+ 'core': 0.02, # Hip, spine - very tight tolerance
46
+ 'shoulders': 0.04, # Shoulder joints
47
+ 'elbows': 0.06, # Elbow, knee
48
+ 'wrists': 0.08, # Wrist, ankle
49
+ 'hands': 0.10, # Hands, feet - most variation
50
+ }
51
+
52
+ # Map each joint to its noise level category
53
+ JOINT_TO_NOISE_CATEGORY = {
54
+ 0: 'core', # Hip
55
+ 1: 'shoulders', # Right hip (treated as shoulder-like for movement)
56
+ 2: 'elbows', # Right knee
57
+ 3: 'wrists', # Right ankle
58
+ 4: 'shoulders', # Left hip
59
+ 5: 'elbows', # Left knee
60
+ 6: 'wrists', # Left ankle
61
+ 7: 'core', # Spine
62
+ 8: 'core', # Thorax
63
+ 9: 'shoulders', # Neck
64
+ 10: 'shoulders', # Head
65
+ 11: 'shoulders', # Left shoulder
66
+ 12: 'elbows', # Left elbow
67
+ 13: 'wrists', # Left wrist
68
+ 14: 'shoulders', # Right shoulder
69
+ 15: 'elbows', # Right elbow
70
+ 16: 'wrists', # Right wrist
71
+ }
72
+
73
+ # Joint pairs for calculating angles (parent-child relationships)
74
+ JOINT_PAIRS = [
75
+ (0, 1), # Hip -> Right Hip
76
+ (1, 2), # Right Hip -> Right Knee
77
+ (2, 3), # Right Knee -> Right Ankle
78
+ (0, 4), # Hip -> Left Hip
79
+ (4, 5), # Left Hip -> Left Knee
80
+ (5, 6), # Left Knee -> Left Ankle
81
+ (0, 7), # Hip -> Spine
82
+ (7, 8), # Spine -> Thorax
83
+ (8, 9), # Thorax -> Neck
84
+ (9, 10), # Neck -> Head
85
+ (8, 11), # Thorax -> Left Shoulder
86
+ (11, 12), # Left Shoulder -> Left Elbow
87
+ (12, 13), # Left Elbow -> Left Wrist
88
+ (8, 14), # Thorax -> Right Shoulder
89
+ (14, 15), # Right Shoulder -> Right Elbow
90
+ (15, 16), # Right Elbow -> Right Wrist
91
+ ]
92
+
93
+
94
+ def get_body_part_joints(part_name):
95
+ """
96
+ Get joint indices for a body part group
97
+
98
+ Args:
99
+ part_name: Name of body part (e.g., 'right_arm', 'core')
100
+
101
+ Returns:
102
+ List of joint indices
103
+ """
104
+ if part_name not in JOINT_GROUPS:
105
+ raise ValueError(f"Unknown body part: {part_name}. Available: {list(JOINT_GROUPS.keys())}")
106
+ return JOINT_GROUPS[part_name]
107
+
108
+
109
+ def get_joint_noise_level(joint_idx):
110
+ """
111
+ Get noise level for a specific joint
112
+
113
+ Args:
114
+ joint_idx: Joint index (0-16)
115
+
116
+ Returns:
117
+ Noise level (float) as fraction of body scale
118
+ """
119
+ if joint_idx not in JOINT_TO_NOISE_CATEGORY:
120
+ return 0.05 # Default
121
+ category = JOINT_TO_NOISE_CATEGORY[joint_idx]
122
+ return JOINT_NOISE_LEVELS[category]
123
+
124
+
125
+ def get_all_body_parts():
126
+ """
127
+ Get all available body part names
128
+
129
+ Returns:
130
+ List of body part names
131
+ """
132
+ return list(JOINT_GROUPS.keys())
133
+
134
+
135
+ def get_joint_name(joint_idx):
136
+ """
137
+ Get human-readable name for a joint
138
+
139
+ Args:
140
+ joint_idx: Joint index (0-16)
141
+
142
+ Returns:
143
+ Joint name string
144
+ """
145
+ if 0 <= joint_idx < len(JOINT_NAMES):
146
+ return JOINT_NAMES[joint_idx]
147
+ return f"Joint_{joint_idx}"
148
+
149
+
150
+ def get_joints_for_exercise(exercise_type):
151
+ """
152
+ Get relevant body parts for a specific exercise type
153
+
154
+ Args:
155
+ exercise_type: Type of exercise (e.g., 'pushup', 'squat', 'plank')
156
+
157
+ Returns:
158
+ List of body part names relevant to the exercise
159
+ """
160
+ exercise_focus = {
161
+ 'pushup': ['core', 'right_arm', 'left_arm', 'torso'],
162
+ 'squat': ['core', 'right_leg', 'left_leg', 'torso'],
163
+ 'plank': ['core', 'torso', 'right_arm', 'left_arm'],
164
+ 'lunge': ['core', 'right_leg', 'left_leg', 'torso'],
165
+ 'all': list(JOINT_GROUPS.keys()),
166
+ }
167
+
168
+ return exercise_focus.get(exercise_type.lower(), exercise_focus['all'])
169
+
170
+
171
+ def calculate_body_scale(poses):
172
+ """
173
+ Calculate body scale (hip-to-shoulder distance) for normalization
174
+
175
+ Args:
176
+ poses: Array of shape [frames, 17, 3] or [17, 3]
177
+
178
+ Returns:
179
+ Average body scale (float)
180
+ """
181
+ poses = np.array(poses)
182
+ if len(poses.shape) == 2:
183
+ poses = poses[np.newaxis, :, :]
184
+
185
+ # Hip (0) to Thorax (8) distance
186
+ hip_to_thorax = np.linalg.norm(poses[:, 0, :] - poses[:, 8, :], axis=1)
187
+ return np.mean(hip_to_thorax)
188
+
189
+
190
+ if __name__ == "__main__":
191
+ # Test the module
192
+ print("Body Part Groups:")
193
+ for part, joints in JOINT_GROUPS.items():
194
+ joint_names = [JOINT_NAMES[j] for j in joints]
195
+ print(f" {part}: {joints} - {joint_names}")
196
+
197
+ print("\nJoint Noise Levels:")
198
+ for i in range(17):
199
+ print(f" {JOINT_NAMES[i]}: {get_joint_noise_level(i)}")
200
+
201
+ print("\nExercise Focus (Push-up):")
202
+ print(f" {get_joints_for_exercise('pushup')}")
203
+
fitness_coach/comparison.py ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Motion Comparison Module
3
+ Main module that compares user poses to reference and generates scores
4
+ """
5
+
6
+ import numpy as np
7
+ from .temporal_align import align_poses_sequences, find_phase_alignment
8
+ from .noise_scoring import score_with_statistical_bounds, score_with_noisy_reference
9
+ from .utils import normalize_body_scale, center_poses, calculate_joint_distances
10
+ from .body_parts import get_joints_for_exercise, get_body_part_joints
11
+
12
+
13
+ def compare_motions(user_poses, ref_poses, noisy_samples=None, exercise_type='pushup',
14
+ use_dtw=True, scoring_method='statistical'):
15
+ """
16
+ Compare user motion to reference and generate comprehensive scores
17
+
18
+ Args:
19
+ user_poses: User pose sequence [frames, 17, 3]
20
+ ref_poses: Reference pose sequence [frames, 17, 3]
21
+ noisy_samples: Pre-generated noisy samples [n_samples, frames, 17, 3] (optional)
22
+ exercise_type: Type of exercise for body part focus
23
+ use_dtw: If True, use DTW for temporal alignment (slower but more accurate)
24
+ scoring_method: 'statistical' (faster) or 'noisy_samples' (more accurate)
25
+
26
+ Returns:
27
+ Dictionary with comprehensive scoring results
28
+ """
29
+ user_poses = np.array(user_poses)
30
+ ref_poses = np.array(ref_poses)
31
+
32
+ # Convert lists to arrays if needed
33
+ if isinstance(user_poses, list):
34
+ user_poses = np.array(user_poses)
35
+ if isinstance(ref_poses, list):
36
+ ref_poses = np.array(ref_poses)
37
+
38
+ print(f"Comparing motions:")
39
+ print(f" User: {len(user_poses)} frames")
40
+ print(f" Reference: {len(ref_poses)} frames")
41
+
42
+ # Step 1: Temporal alignment
43
+ alignment_score = None
44
+ if use_dtw:
45
+ print("\n[1/4] Aligning sequences with DTW...")
46
+ try:
47
+ user_aligned, ref_aligned, alignment_score = find_phase_alignment(user_poses, ref_poses)
48
+ print(f" Alignment score: {alignment_score:.4f}")
49
+ except Exception as e:
50
+ print(f" DTW failed, using interpolation: {e}")
51
+ from .utils import interpolate_sequence
52
+ target_length = max(len(user_poses), len(ref_poses))
53
+ user_aligned = interpolate_sequence(user_poses, target_length)
54
+ ref_aligned = interpolate_sequence(ref_poses, target_length)
55
+ else:
56
+ print("\n[1/4] Aligning sequences with interpolation...")
57
+ from .utils import interpolate_sequence
58
+ target_length = max(len(user_poses), len(ref_poses))
59
+ user_aligned = interpolate_sequence(user_poses, target_length)
60
+ ref_aligned = interpolate_sequence(ref_poses, target_length)
61
+
62
+ # Step 2: Spatial normalization
63
+ print("\n[2/4] Normalizing poses...")
64
+ user_norm, user_scale = normalize_body_scale(user_aligned)
65
+ ref_norm, ref_scale = normalize_body_scale(ref_aligned, reference_scale=user_scale)
66
+
67
+ # Center both poses at hip
68
+ user_centered = center_poses(user_norm)
69
+ ref_centered = center_poses(ref_norm)
70
+
71
+ # Step 3: Calculate scores
72
+ print(f"\n[3/4] Calculating scores ({scoring_method} method)...")
73
+
74
+ if scoring_method == 'noisy_samples' and noisy_samples is not None:
75
+ # Use noisy samples method
76
+ # Align noisy samples too
77
+ from .utils import interpolate_sequence
78
+ target_length = len(user_centered)
79
+ noisy_aligned = np.array([
80
+ interpolate_sequence(sample, target_length)
81
+ for sample in noisy_samples
82
+ ])
83
+ noisy_norm = np.array([
84
+ normalize_body_scale(sample, reference_scale=ref_scale)[0]
85
+ for sample in noisy_aligned
86
+ ])
87
+ noisy_centered = np.array([center_poses(sample) for sample in noisy_norm])
88
+
89
+ scores = score_with_noisy_reference(
90
+ user_centered,
91
+ ref_centered,
92
+ noisy_samples=noisy_centered
93
+ )
94
+ else:
95
+ # Use statistical bounds method (faster)
96
+ scores = score_with_statistical_bounds(user_centered, ref_centered)
97
+
98
+ # Step 4: Exercise-specific analysis
99
+ print("\n[4/4] Generating exercise-specific feedback...")
100
+ relevant_parts = get_joints_for_exercise(exercise_type)
101
+
102
+ # Filter scores to relevant body parts
103
+ relevant_scores = {
104
+ part: scores['body_part_scores'][part]
105
+ for part in relevant_parts
106
+ if part in scores['body_part_scores']
107
+ }
108
+
109
+ # Calculate average for relevant parts
110
+ relevant_avg = np.mean(list(relevant_scores.values())) if relevant_scores else scores['overall_score']
111
+
112
+ # Generate feedback
113
+ feedback = generate_feedback(scores, relevant_scores, exercise_type)
114
+
115
+ # Compile results
116
+ results = {
117
+ 'overall_score': float(scores['overall_score']),
118
+ 'relevant_score': float(relevant_avg), # Score for exercise-specific body parts
119
+ 'body_part_scores': scores['body_part_scores'],
120
+ 'relevant_body_part_scores': relevant_scores,
121
+ 'frame_scores': scores.get('frame_scores', []),
122
+ 'per_joint_scores': scores.get('per_joint_scores', []),
123
+ 'feedback': feedback,
124
+ 'exercise_type': exercise_type,
125
+ 'num_frames_user': len(user_poses),
126
+ 'num_frames_ref': len(ref_poses),
127
+ 'num_frames_aligned': len(user_centered),
128
+ 'details': {
129
+ 'reference_poses': ref_centered,
130
+ 'user_poses': user_poses,
131
+ 'aligned_user_poses': user_centered,
132
+ 'body_part_details': scores.get('body_part_details', {}),
133
+ 'alignment_score': alignment_score if use_dtw else None,
134
+ }
135
+ }
136
+
137
+ print(f"\n✓ Comparison complete!")
138
+ print(f" Overall score: {results['overall_score']:.2f}")
139
+ print(f" Relevant score: {results['relevant_score']:.2f}")
140
+
141
+ return results
142
+
143
+
144
+ def generate_feedback(scores, relevant_scores, exercise_type):
145
+ """
146
+ Generate human-readable feedback based on scores
147
+
148
+ Args:
149
+ scores: Full scoring dictionary
150
+ relevant_scores: Scores for exercise-specific body parts
151
+ exercise_type: Type of exercise
152
+
153
+ Returns:
154
+ List of feedback strings
155
+ """
156
+ feedback = []
157
+
158
+ # Overall feedback
159
+ overall = scores['overall_score']
160
+ if overall >= 90:
161
+ feedback.append("Excellent form! Keep up the great work.")
162
+ elif overall >= 75:
163
+ feedback.append("Good form overall. Minor adjustments can improve your technique.")
164
+ elif overall >= 60:
165
+ feedback.append("Decent form, but there's room for improvement.")
166
+ else:
167
+ feedback.append("Focus on improving your form. Consider reviewing the reference video.")
168
+
169
+ # Body part specific feedback
170
+ if exercise_type.lower() == 'pushup':
171
+ # Check core
172
+ if 'core' in relevant_scores:
173
+ core_score = relevant_scores['core']
174
+ if core_score < 70:
175
+ feedback.append("Keep your core engaged and back straight throughout the movement.")
176
+
177
+ # Check arms
178
+ arm_scores = [relevant_scores.get('right_arm', 0), relevant_scores.get('left_arm', 0)]
179
+ avg_arm = np.mean(arm_scores)
180
+ if avg_arm < 70:
181
+ feedback.append("Focus on maintaining consistent arm positioning. Both arms should move symmetrically.")
182
+ elif abs(arm_scores[0] - arm_scores[1]) > 15:
183
+ feedback.append("Your arms are moving asymmetrically. Try to keep both sides balanced.")
184
+
185
+ elif exercise_type.lower() == 'squat':
186
+ # Check legs
187
+ leg_scores = [relevant_scores.get('right_leg', 0), relevant_scores.get('left_leg', 0)]
188
+ avg_leg = np.mean(leg_scores)
189
+ if avg_leg < 70:
190
+ feedback.append("Focus on proper leg positioning and depth in your squats.")
191
+ elif abs(leg_scores[0] - leg_scores[1]) > 15:
192
+ feedback.append("Your legs are moving asymmetrically. Focus on balanced movement.")
193
+
194
+ # Find worst performing body part
195
+ if relevant_scores:
196
+ worst_part = min(relevant_scores.items(), key=lambda x: x[1])
197
+ if worst_part[1] < 65:
198
+ feedback.append(f"Pay special attention to your {worst_part[0].replace('_', ' ')} - it needs the most improvement.")
199
+
200
+ return feedback
201
+
202
+
203
+ def score_exercise(user_video_path, reference_id='pushup', references_dir='references',
204
+ use_dtw=True, scoring_method='statistical', force_reprocess=False):
205
+ """
206
+ Complete pipeline: process user video and score against reference
207
+
208
+ Args:
209
+ user_video_path: Path to user video
210
+ reference_id: Exercise type / reference ID
211
+ references_dir: Directory containing references
212
+ use_dtw: Use DTW for alignment
213
+ scoring_method: Scoring method to use
214
+ force_reprocess: Force reprocessing even if cached data exists
215
+
216
+ Returns:
217
+ Scoring results dictionary
218
+ """
219
+ from .user_processor import process_user_video
220
+ from .reference_processor import load_reference
221
+ import shutil
222
+ from pathlib import Path
223
+
224
+ print("="*60)
225
+ print("EXERCISE SCORING PIPELINE")
226
+ print("="*60)
227
+
228
+ # Load reference
229
+ print(f"\nLoading reference: {reference_id}")
230
+ ref_data = load_reference(reference_id, references_dir=references_dir)
231
+ ref_poses = ref_data['poses_3d']
232
+ noisy_samples = ref_data.get('noisy_samples')
233
+ metadata = ref_data['metadata']
234
+
235
+ print(f" Reference frames: {len(ref_poses)}")
236
+ print(f" Exercise type: {metadata['exercise_type']}")
237
+
238
+ # Clear cache if force reprocess
239
+ if force_reprocess:
240
+ cache_dir = Path('user_videos_cache') / Path(user_video_path).stem
241
+ if cache_dir.exists():
242
+ print(f"\n⚠ Clearing cache for {Path(user_video_path).name}")
243
+ shutil.rmtree(cache_dir)
244
+
245
+ # Process user video (uses cache if available)
246
+ print(f"\nProcessing user video: {user_video_path}")
247
+ user_data = process_user_video(user_video_path, cleanup=False)
248
+ user_poses = user_data['poses_3d']
249
+
250
+ print(f" User frames: {len(user_poses)}")
251
+
252
+ # Compare
253
+ print(f"\nComparing motions...")
254
+ results = compare_motions(
255
+ user_poses,
256
+ ref_poses,
257
+ noisy_samples=noisy_samples,
258
+ exercise_type=metadata['exercise_type'],
259
+ use_dtw=use_dtw,
260
+ scoring_method=scoring_method
261
+ )
262
+
263
+ return results
264
+
265
+
266
+ if __name__ == "__main__":
267
+ import argparse
268
+
269
+ parser = argparse.ArgumentParser(description='Compare user video to reference')
270
+ parser.add_argument('--user-video', type=str, required=True, help='Path to user video')
271
+ parser.add_argument('--reference', type=str, default='pushup', help='Reference ID')
272
+ parser.add_argument('--references-dir', type=str, default='references', help='References directory')
273
+ parser.add_argument('--no-dtw', action='store_true', help='Disable DTW alignment')
274
+ parser.add_argument('--method', type=str, default='statistical', choices=['statistical', 'noisy_samples'],
275
+ help='Scoring method')
276
+ parser.add_argument('--force-reprocess', action='store_true', help='Force reprocessing (ignore cache)')
277
+ parser.add_argument('--json', action='store_true', help='Output results as JSON for API consumption')
278
+ parser.add_argument('--output', type=str, help='Save JSON output to file')
279
+ parser.add_argument('--generate-video', action='store_true', help='Generate side-by-side comparison video')
280
+ parser.add_argument('--video-output', type=str, help='Path for comparison video (default: comparison_<user_video>.mp4)')
281
+ parser.add_argument('--video-fps', type=int, default=30, help='FPS for comparison video')
282
+
283
+ args = parser.parse_args()
284
+
285
+ try:
286
+ results = score_exercise(
287
+ args.user_video,
288
+ reference_id=args.reference,
289
+ references_dir=args.references_dir,
290
+ use_dtw=not args.no_dtw,
291
+ scoring_method=args.method,
292
+ force_reprocess=args.force_reprocess
293
+ )
294
+
295
+ # Format output for API/LLM consumption
296
+ if args.json:
297
+ import json
298
+ from pathlib import Path
299
+
300
+ # Create clean API response
301
+ api_response = {
302
+ "status": "success",
303
+ "exercise": {
304
+ "type": results['exercise_type'],
305
+ "reference": args.reference,
306
+ "user_video": str(Path(args.user_video).name)
307
+ },
308
+ "scores": {
309
+ "overall": float(round(results['overall_score'], 2)),
310
+ "relevant": float(round(results['relevant_score'], 2)),
311
+ "body_parts": {
312
+ part: float(round(score, 2))
313
+ for part, score in results['relevant_body_part_scores'].items()
314
+ }
315
+ },
316
+ "metrics": {
317
+ "frames": {
318
+ "user": int(results['num_frames_user']),
319
+ "reference": int(results['num_frames_ref']),
320
+ "aligned": int(results['num_frames_aligned'])
321
+ },
322
+ "alignment_quality": float(round(results['details'].get('alignment_score', 0), 4)) if results['details'].get('alignment_score') else None,
323
+ "body_part_details": {
324
+ part: {
325
+ "position_error_avg": float(round(metrics.get('position_error', 0), 4)),
326
+ "position_error_max": float(round(metrics.get('max_position_error', 0), 4)),
327
+ "tolerance_threshold": float(round(metrics.get('tolerance_threshold', 0), 4)),
328
+ "in_tolerance_percentage": float(round(metrics.get('in_tolerance_percentage', 0), 1))
329
+ }
330
+ for part, metrics in results['details'].get('body_part_details', {}).items()
331
+ if part in results['relevant_body_part_scores']
332
+ }
333
+ },
334
+ "feedback": results['feedback'],
335
+ "llm_context": {
336
+ "description": f"User performed {results['exercise_type']} exercise",
337
+ "scoring_method": args.method,
338
+ "interpretation": {
339
+ "score_range": "0-100, where 100 is perfect form matching the reference",
340
+ "position_error": "Lower is better. Measures average distance from reference pose in normalized units",
341
+ "in_tolerance": "Percentage of time user's form was within acceptable bounds"
342
+ }
343
+ }
344
+ }
345
+
346
+ # Output to file or stdout
347
+ json_output = json.dumps(api_response, indent=2)
348
+ if args.output:
349
+ with open(args.output, 'w') as f:
350
+ f.write(json_output)
351
+ print(f"✓ Results saved to {args.output}")
352
+ else:
353
+ print(json_output)
354
+ else:
355
+ # Human-readable output
356
+ print("\n" + "="*60)
357
+ print("SCORING RESULTS")
358
+ print("="*60)
359
+ print(f"\nOverall Score: {results['overall_score']:.2f}/100")
360
+ print(f"Relevant Score: {results['relevant_score']:.2f}/100")
361
+ print(f"\nBody Part Scores:")
362
+ for part, score in results['relevant_body_part_scores'].items():
363
+ print(f" {part.replace('_', ' ').title()}: {score:.2f}/100")
364
+ print(f"\nFeedback:")
365
+ for i, fb in enumerate(results['feedback'], 1):
366
+ print(f" {i}. {fb}")
367
+
368
+ # Debug information
369
+ print("\n" + "="*60)
370
+ print("DEBUG INFORMATION")
371
+ print("="*60)
372
+ details = results.get('details', {})
373
+ print(f"\nFrame Counts:")
374
+ print(f" Reference frames: {len(details.get('reference_poses', []))}")
375
+ print(f" User frames (original): {len(details.get('user_poses', []))}")
376
+ print(f" User frames (aligned): {len(details.get('aligned_user_poses', []))}")
377
+
378
+ if details.get('alignment_score') is not None:
379
+ print(f"\nAlignment:")
380
+ print(f" DTW alignment score: {details['alignment_score']:.4f}")
381
+
382
+ print(f"\nDetailed Body Part Metrics:")
383
+ for part, metrics in details.get('body_part_details', {}).items():
384
+ if part in results['relevant_body_part_scores']:
385
+ print(f"\n{part.replace('_', ' ').title()}:")
386
+ print(f" Position Error (avg): {metrics.get('position_error', 0):.4f}")
387
+ print(f" Position Error (max): {metrics.get('max_position_error', 0):.4f}")
388
+ print(f" Tolerance Threshold: {metrics.get('tolerance_threshold', 0):.4f}")
389
+ print(f" In-tolerance %: {metrics.get('in_tolerance_percentage', 0):.1f}%")
390
+
391
+ # Generate comparison video if requested
392
+ if args.generate_video:
393
+ from pathlib import Path
394
+
395
+ print("\n" + "="*60)
396
+ print("GENERATING COMPARISON VIDEO")
397
+ print("="*60)
398
+
399
+ try:
400
+ from .video_from_images import create_comparison_video_from_images
401
+ from .user_processor import process_user_video
402
+ from .reference_processor import load_reference
403
+
404
+ # Determine output path
405
+ if args.video_output:
406
+ video_output = args.video_output
407
+ else:
408
+ user_video_stem = Path(args.user_video).stem
409
+ video_output = f"comparison_{user_video_stem}.mp4"
410
+
411
+ # Find the pose3D image directories
412
+ # User images: user_videos_cache/{video_name}/pose3D
413
+ user_video_name = Path(args.user_video).stem
414
+ user_image_dir = Path('user_videos_cache') / user_video_name / 'pose3D'
415
+
416
+ # Reference images: references/{exercise}/temp_processing/pose3D
417
+ ref_data = load_reference(args.reference, references_dir=args.references_dir)
418
+ ref_dir = Path(ref_data['ref_dir'])
419
+ reference_image_dir = ref_dir / 'temp_processing' / 'pose3D'
420
+
421
+ # Check if directories exist
422
+ if not user_image_dir.exists():
423
+ print(f"⚠ Warning: User pose3D images not found at {user_image_dir}")
424
+ print(" Attempting to process user video to generate images...")
425
+ process_user_video(args.user_video, cleanup=False)
426
+ user_image_dir = Path('user_videos_cache') / user_video_name / 'pose3D'
427
+
428
+ if not reference_image_dir.exists():
429
+ # Try alternative location
430
+ reference_image_dir = ref_dir / 'pose3D'
431
+ if not reference_image_dir.exists():
432
+ raise FileNotFoundError(
433
+ f"Reference pose3D images not found. Tried:\n"
434
+ f" {ref_dir / 'temp_processing' / 'pose3D'}\n"
435
+ f" {ref_dir / 'pose3D'}"
436
+ )
437
+
438
+ print(f" User images: {user_image_dir}")
439
+ print(f" Reference images: {reference_image_dir}")
440
+
441
+ # Create the video from existing images
442
+ create_comparison_video_from_images(
443
+ user_image_dir=str(user_image_dir),
444
+ reference_image_dir=str(reference_image_dir),
445
+ output_path=video_output,
446
+ user_video_name="Your Form",
447
+ reference_name="Correct Form",
448
+ fps=args.video_fps
449
+ )
450
+
451
+ except ImportError as e:
452
+ print(f"✗ Error: Missing dependency for video generation")
453
+ print(f" {e}")
454
+ print("\nPlease ensure matplotlib and ffmpeg are installed:")
455
+ print(" pip install matplotlib")
456
+ print(" And install FFmpeg from: https://ffmpeg.org/download.html")
457
+ except Exception as e:
458
+ print(f"✗ Error generating comparison video: {e}")
459
+ import traceback
460
+ traceback.print_exc()
461
+
462
+ except Exception as e:
463
+ print(f"\nERROR: {e}")
464
+ import traceback
465
+ traceback.print_exc()
466
+
fitness_coach/noise_scoring.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Noise-Based Scoring System
3
+ Generates noisy reference samples and scores user poses against them
4
+ """
5
+
6
+ import numpy as np
7
+ from .body_parts import (
8
+ get_joint_noise_level,
9
+ calculate_body_scale,
10
+ JOINT_GROUPS
11
+ )
12
+
13
+
14
+ def create_noisy_samples(ref_poses, n_samples=100, noise_std=None, per_joint_noise=True):
15
+ """
16
+ Create noisy reference samples for scoring
17
+
18
+ Args:
19
+ ref_poses: Reference poses [frames, 17, 3]
20
+ n_samples: Number of noisy samples to generate
21
+ noise_std: Overall noise standard deviation (as fraction of body scale)
22
+ If None, uses per-joint noise levels
23
+ per_joint_noise: If True, use different noise levels per joint
24
+
25
+ Returns:
26
+ noisy_samples: Array of shape [n_samples, frames, 17, 3]
27
+ """
28
+ ref_poses = np.array(ref_poses)
29
+ body_scale = calculate_body_scale(ref_poses)
30
+
31
+ noisy_samples = []
32
+
33
+ for _ in range(n_samples):
34
+ noisy_pose = ref_poses.copy()
35
+
36
+ for frame_idx in range(len(ref_poses)):
37
+ for joint_idx in range(17):
38
+ if per_joint_noise:
39
+ # Use joint-specific noise level
40
+ joint_noise_std = get_joint_noise_level(joint_idx) * body_scale
41
+ else:
42
+ # Use uniform noise
43
+ if noise_std is None:
44
+ noise_std = 0.05 # Default 5% of body scale
45
+ joint_noise_std = noise_std * body_scale
46
+
47
+ # Add Gaussian noise to each coordinate
48
+ noise = np.random.normal(
49
+ loc=0.0,
50
+ scale=joint_noise_std,
51
+ size=3
52
+ )
53
+ noisy_pose[frame_idx, joint_idx, :] += noise
54
+
55
+ noisy_samples.append(noisy_pose)
56
+
57
+ return np.array(noisy_samples)
58
+
59
+
60
+ def calculate_statistical_bounds(ref_poses, noise_std=0.015, confidence=0.95):
61
+ """
62
+ Calculate statistical bounds (mean ± std) for reference poses
63
+
64
+ Args:
65
+ ref_poses: Reference poses [frames, 17, 3]
66
+ noise_std: Noise standard deviation (as fraction of body scale, default 1.5%)
67
+ confidence: Confidence level (0.95 = 95%)
68
+
69
+ Returns:
70
+ mean_poses: Mean poses [frames, 17, 3]
71
+ lower_bound: Lower bound [frames, 17, 3]
72
+ upper_bound: Upper bound [frames, 17, 3]
73
+ tolerance: Tolerance per joint [frames, 17]
74
+ """
75
+ ref_poses = np.array(ref_poses)
76
+ body_scale = calculate_body_scale(ref_poses)
77
+
78
+ # Generate many samples and calculate statistics
79
+ n_samples = 1000
80
+ noisy_samples = create_noisy_samples(
81
+ ref_poses,
82
+ n_samples=n_samples,
83
+ noise_std=noise_std,
84
+ per_joint_noise=False
85
+ )
86
+
87
+ # Calculate mean and std
88
+ mean_poses = np.mean(noisy_samples, axis=0)
89
+ std_poses = np.std(noisy_samples, axis=0)
90
+
91
+ # Calculate bounds based on confidence level
92
+ # For 95% confidence, use ~2 standard deviations
93
+ z_score = 1.96 if confidence == 0.95 else 2.576 # 99% confidence
94
+
95
+ lower_bound = mean_poses - z_score * std_poses
96
+ upper_bound = mean_poses + z_score * std_poses
97
+
98
+ # Tolerance is the distance from mean to bound
99
+ tolerance = z_score * std_poses
100
+
101
+ return mean_poses, lower_bound, upper_bound, tolerance
102
+
103
+
104
+ def score_with_noisy_reference(user_poses, ref_poses, noisy_samples=None, n_samples=100):
105
+ """
106
+ Score user poses against noisy reference samples
107
+
108
+ Args:
109
+ user_poses: User pose sequence [frames, 17, 3]
110
+ ref_poses: Reference pose sequence [frames, 17, 3]
111
+ noisy_samples: Pre-generated noisy samples [n_samples, frames, 17, 3]
112
+ If None, generates them
113
+ n_samples: Number of samples to generate if noisy_samples is None
114
+
115
+ Returns:
116
+ scores: Dictionary with overall and per-body-part scores
117
+ """
118
+ user_poses = np.array(user_poses)
119
+ ref_poses = np.array(ref_poses)
120
+
121
+ # Generate noisy samples if not provided
122
+ if noisy_samples is None:
123
+ noisy_samples = create_noisy_samples(ref_poses, n_samples=n_samples)
124
+
125
+ # Align temporally (simple resampling for now, DTW in comparison.py)
126
+ from .utils import interpolate_sequence
127
+ target_length = max(len(user_poses), len(ref_poses))
128
+ user_aligned = interpolate_sequence(user_poses, target_length)
129
+ ref_aligned = interpolate_sequence(ref_poses, target_length)
130
+ noisy_aligned = np.array([
131
+ interpolate_sequence(sample, target_length)
132
+ for sample in noisy_samples
133
+ ])
134
+
135
+ # Normalize by body scale
136
+ from .utils import normalize_body_scale
137
+ user_norm, _ = normalize_body_scale(user_aligned)
138
+ ref_norm, ref_scale = normalize_body_scale(ref_aligned)
139
+ noisy_norm = np.array([
140
+ normalize_body_scale(sample, reference_scale=ref_scale)[0]
141
+ for sample in noisy_aligned
142
+ ])
143
+
144
+ # Calculate scores per body part
145
+ body_part_scores = {}
146
+ frame_scores = []
147
+
148
+ for frame_idx in range(len(user_norm)):
149
+ user_frame = user_norm[frame_idx] # [17, 3]
150
+ ref_frame = ref_norm[frame_idx]
151
+ noisy_frames = noisy_norm[:, frame_idx, :, :] # [n_samples, 17, 3]
152
+
153
+ # Calculate distance from user to reference
154
+ user_to_ref_dist = np.linalg.norm(user_frame - ref_frame, axis=1) # [17]
155
+
156
+ # Calculate distances from each noisy sample to reference
157
+ noisy_to_ref_dists = np.array([
158
+ np.linalg.norm(noisy_frame - ref_frame, axis=1)
159
+ for noisy_frame in noisy_frames
160
+ ]) # [n_samples, 17]
161
+
162
+ # Score: percentage of noisy samples that are "worse" than user
163
+ # (i.e., user is within acceptable range)
164
+ frame_scores_per_joint = []
165
+ for joint_idx in range(17):
166
+ user_dist = user_to_ref_dist[joint_idx]
167
+ noisy_dists = noisy_to_ref_dists[:, joint_idx]
168
+
169
+ # How many noisy samples are further from reference than user?
170
+ better_than = np.sum(noisy_dists > user_dist)
171
+ score = (better_than / len(noisy_dists)) * 100
172
+ frame_scores_per_joint.append(score)
173
+
174
+ frame_scores.append(frame_scores_per_joint)
175
+
176
+ frame_scores = np.array(frame_scores) # [frames, 17]
177
+
178
+ # Aggregate by body part
179
+ for part_name, joint_indices in JOINT_GROUPS.items():
180
+ part_scores = frame_scores[:, joint_indices]
181
+ body_part_scores[part_name] = float(np.mean(part_scores))
182
+
183
+ # Overall score
184
+ overall_score = float(np.mean(frame_scores))
185
+
186
+ return {
187
+ 'overall_score': overall_score,
188
+ 'body_part_scores': body_part_scores,
189
+ 'frame_scores': frame_scores.tolist(),
190
+ 'per_joint_scores': np.mean(frame_scores, axis=0).tolist()
191
+ }
192
+
193
+
194
+ def score_with_statistical_bounds(user_poses, ref_poses, noise_std=0.015):
195
+ """
196
+ Score using statistical bounds (faster than noisy samples)
197
+
198
+ Args:
199
+ user_poses: User pose sequence [frames, 17, 3]
200
+ ref_poses: Reference pose sequence [frames, 17, 3]
201
+ noise_std: Noise standard deviation (as fraction of body scale)
202
+ Default 0.015 = 1.5% tolerance, good for form checking
203
+
204
+ Returns:
205
+ scores: Dictionary with overall and per-body-part scores
206
+ """
207
+ user_poses = np.array(user_poses)
208
+ ref_poses = np.array(ref_poses)
209
+
210
+ # Calculate bounds (already accounts for body scale)
211
+ mean_poses, lower_bound, upper_bound, tolerance = calculate_statistical_bounds(
212
+ ref_poses, noise_std=noise_std
213
+ )
214
+
215
+ # Align temporally
216
+ from .utils import interpolate_sequence
217
+ target_length = max(len(user_poses), len(ref_poses))
218
+ user_aligned = interpolate_sequence(user_poses, target_length)
219
+ mean_aligned = interpolate_sequence(mean_poses, target_length)
220
+
221
+ # Normalize poses (but not tolerance - it's already in the right scale)
222
+ from .utils import normalize_body_scale
223
+ user_norm, user_scale = normalize_body_scale(user_aligned)
224
+ mean_norm, _ = normalize_body_scale(mean_aligned)
225
+
226
+ # Scale the tolerance by the same factor used for normalization
227
+ # This keeps it proportional to the noise_std parameter
228
+ body_scale = calculate_body_scale(user_aligned)
229
+ tolerance_scaled = tolerance * (1.0 / body_scale)
230
+ tolerance_aligned = interpolate_sequence(tolerance_scaled, target_length)
231
+
232
+ # Check if user poses are within tolerance
233
+ distances = np.linalg.norm(user_norm - mean_norm, axis=2) # [frames, 17]
234
+ tolerance_per_joint = np.linalg.norm(tolerance_aligned, axis=2) # [frames, 17]
235
+
236
+ # Score: percentage of time within tolerance
237
+ within_tolerance = distances < tolerance_per_joint
238
+ joint_scores = np.mean(within_tolerance, axis=0) * 100 # [17]
239
+ frame_scores = np.mean(within_tolerance, axis=1) * 100 # [frames]
240
+
241
+ # Aggregate by body part with detailed metrics
242
+ body_part_scores = {}
243
+ body_part_details = {}
244
+
245
+ for part_name, joint_indices in JOINT_GROUPS.items():
246
+ # Score
247
+ body_part_scores[part_name] = float(np.mean(joint_scores[joint_indices]))
248
+
249
+ # Detailed metrics for this body part
250
+ part_distances = distances[:, joint_indices] # [frames, num_joints_in_part]
251
+ part_tolerance = tolerance_per_joint[:, joint_indices]
252
+ part_within = within_tolerance[:, joint_indices]
253
+
254
+ body_part_details[part_name] = {
255
+ 'position_error': float(np.mean(part_distances)),
256
+ 'max_position_error': float(np.max(part_distances)),
257
+ 'in_tolerance_percentage': float(np.mean(part_within) * 100),
258
+ 'tolerance_threshold': float(np.mean(part_tolerance)),
259
+ }
260
+
261
+ overall_score = float(np.mean(frame_scores))
262
+
263
+ return {
264
+ 'overall_score': overall_score,
265
+ 'body_part_scores': body_part_scores,
266
+ 'body_part_details': body_part_details,
267
+ 'frame_scores': frame_scores.tolist(),
268
+ 'per_joint_scores': joint_scores.tolist()
269
+ }
270
+
271
+
272
+ if __name__ == "__main__":
273
+ # Test noise scoring
274
+ print("Testing noise-based scoring...")
275
+
276
+ # Create test data
277
+ ref_poses = np.random.randn(50, 17, 3)
278
+ user_poses = ref_poses + np.random.normal(0, 0.1, ref_poses.shape) # Slightly different
279
+
280
+ # Test noisy sample generation
281
+ noisy_samples = create_noisy_samples(ref_poses, n_samples=50)
282
+ print(f"Generated {len(noisy_samples)} noisy samples")
283
+ print(f"Noisy samples shape: {noisy_samples.shape}")
284
+
285
+ # Test statistical bounds
286
+ mean, lower, upper, tolerance = calculate_statistical_bounds(ref_poses)
287
+ print(f"Statistical bounds calculated: mean shape {mean.shape}")
288
+
289
+ # Test scoring
290
+ scores = score_with_statistical_bounds(user_poses, ref_poses)
291
+ print(f"\nScoring results:")
292
+ print(f" Overall score: {scores['overall_score']:.2f}")
293
+ print(f" Body part scores: {scores['body_part_scores']}")
294
+
295
+ print("\nNoise scoring tests passed!")
296
+
fitness_coach/reference_processor.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Reference Video Processor
3
+ Processes reference videos once and saves noisy samples for scoring
4
+ """
5
+
6
+ import os
7
+ import sys
8
+ import numpy as np
9
+ import json
10
+ from pathlib import Path
11
+
12
+ # Add parent directory and demo directory to path
13
+ project_root = Path(__file__).parent.parent
14
+ sys.path.insert(0, str(project_root))
15
+ sys.path.insert(0, str(project_root / 'demo'))
16
+
17
+ from fitness_coach.noise_scoring import create_noisy_samples, calculate_statistical_bounds
18
+ from fitness_coach.body_parts import calculate_body_scale, get_joints_for_exercise
19
+
20
+
21
+ def process_reference_video(video_path, exercise_type='pushup', output_dir=None, n_samples=100):
22
+ """
23
+ Process a reference video and generate noisy samples for scoring
24
+
25
+ Args:
26
+ video_path: Path to reference video file
27
+ exercise_type: Type of exercise (e.g., 'pushup', 'squat')
28
+ output_dir: Directory to save processed data (default: references/{exercise_type}/)
29
+ n_samples: Number of noisy samples to generate
30
+
31
+ Returns:
32
+ Dictionary with paths to saved files and metadata
33
+ """
34
+ # Change to project root for imports to work correctly
35
+ original_cwd = os.getcwd()
36
+ os.chdir(project_root)
37
+
38
+ try:
39
+ # Import after changing directory
40
+ from demo.vis import get_pose2D, get_pose3D
41
+ finally:
42
+ os.chdir(original_cwd)
43
+
44
+ video_path = Path(video_path)
45
+ if not video_path.exists():
46
+ raise FileNotFoundError(f"Video not found: {video_path}")
47
+
48
+ # Set up output directory
49
+ if output_dir is None:
50
+ output_dir = Path('references') / exercise_type
51
+ else:
52
+ output_dir = Path(output_dir)
53
+
54
+ output_dir.mkdir(parents=True, exist_ok=True)
55
+
56
+ print(f"Processing reference video: {video_path.name}")
57
+ print(f"Exercise type: {exercise_type}")
58
+ print(f"Output directory: {output_dir}")
59
+
60
+ # Create temporary output directory for processing
61
+ temp_output = output_dir / 'temp_processing'
62
+ temp_output.mkdir(exist_ok=True)
63
+
64
+ # Format output directory string (get_pose3D expects trailing slash)
65
+ # Use absolute path to avoid issues when changing directories
66
+ temp_output_abs = temp_output.resolve()
67
+ output_dir_str = str(temp_output_abs).replace('\\', '/')
68
+ if not output_dir_str.endswith('/'):
69
+ output_dir_str += '/'
70
+
71
+ video_path_abs = video_path.resolve()
72
+
73
+ # Change to project root for processing
74
+ os.chdir(project_root)
75
+
76
+ # Save original argv and temporarily clear it to avoid argparse conflicts
77
+ original_argv = sys.argv.copy()
78
+ sys.argv = [sys.argv[0]] # Keep only script name
79
+
80
+ try:
81
+ # Step 1: Extract 2D poses
82
+ print("\n[1/4] Extracting 2D poses...")
83
+ try:
84
+ # get_pose2D expects output_dir with trailing slash
85
+ # It adds 'input_2D/' to it (line 95 in vis.py)
86
+ get_pose2D(str(video_path_abs), output_dir_str)
87
+ except Exception as e:
88
+ print(f"Error in 2D pose extraction: {e}")
89
+ raise
90
+
91
+ # Step 2: Extract 3D poses
92
+ print("\n[2/4] Extracting 3D poses...")
93
+ try:
94
+ # get_pose3D also expects output_dir with trailing slash
95
+ # It looks for output_dir + 'input_2D/keypoints.npz' (line 190 in vis.py)
96
+ get_pose3D(str(video_path_abs), output_dir_str)
97
+ except Exception as e:
98
+ print(f"Error in 3D pose extraction: {e}")
99
+ raise
100
+ finally:
101
+ sys.argv = original_argv # Restore original argv
102
+ os.chdir(original_cwd)
103
+
104
+ # Step 3: Load 3D poses
105
+ # get_pose3D saves to output_dir + 'keypoints_3D.npz' (line 279 in vis.py)
106
+ keypoints_3d_path = temp_output_abs / 'keypoints_3D.npz'
107
+
108
+ if not keypoints_3d_path.exists():
109
+ # Try alternative locations in case path handling differs
110
+ alt_paths = [
111
+ temp_output_abs / 'keypoints_3D.npz',
112
+ temp_output_abs.parent / 'keypoints_3D.npz',
113
+ ]
114
+ for alt_path in alt_paths:
115
+ if alt_path.exists():
116
+ keypoints_3d_path = alt_path
117
+ break
118
+ else:
119
+ # List what files actually exist to help debug
120
+ print(f"\nDebug: Looking for keypoints_3D.npz")
121
+ print(f"Expected location: {keypoints_3d_path}")
122
+ print(f"Files in temp_processing:")
123
+ if temp_output_abs.exists():
124
+ for item in temp_output_abs.rglob('*'):
125
+ if item.is_file():
126
+ print(f" {item}")
127
+ raise FileNotFoundError(f"3D keypoints not found: {keypoints_3d_path}")
128
+
129
+ keypoints_3d = np.load(str(keypoints_3d_path), allow_pickle=True)['reconstruction']
130
+ print(f"Loaded {len(keypoints_3d)} frames of 3D poses")
131
+
132
+ # Convert to numpy array if needed
133
+ if isinstance(keypoints_3d, list):
134
+ keypoints_3d = np.array(keypoints_3d)
135
+
136
+ # Step 4: Generate noisy samples
137
+ print(f"\n[3/4] Generating {n_samples} noisy samples...")
138
+ noisy_samples = create_noisy_samples(keypoints_3d, n_samples=n_samples, per_joint_noise=True)
139
+ print(f"Generated noisy samples shape: {noisy_samples.shape}")
140
+
141
+ # Step 5: Calculate metadata
142
+ print("\n[4/4] Calculating metadata...")
143
+ body_scale = calculate_body_scale(keypoints_3d)
144
+ relevant_body_parts = get_joints_for_exercise(exercise_type)
145
+
146
+ # Calculate statistical bounds
147
+ mean_poses, lower_bound, upper_bound, tolerance = calculate_statistical_bounds(
148
+ keypoints_3d, noise_std=0.05
149
+ )
150
+
151
+ metadata = {
152
+ 'exercise_type': exercise_type,
153
+ 'video_path': str(video_path),
154
+ 'video_name': video_path.stem,
155
+ 'num_frames': len(keypoints_3d),
156
+ 'body_scale': float(body_scale),
157
+ 'relevant_body_parts': relevant_body_parts,
158
+ 'n_samples': n_samples,
159
+ 'timestamp': str(Path(video_path).stat().st_mtime) if video_path.exists() else None
160
+ }
161
+
162
+ # Step 6: Save everything
163
+ print("\nSaving processed data...")
164
+
165
+ # Save 3D poses
166
+ poses_3d_path = output_dir / 'keypoints_3D.npz'
167
+ np.savez_compressed(str(poses_3d_path), reconstruction=keypoints_3d)
168
+ print(f" Saved 3D poses: {poses_3d_path}")
169
+
170
+ # Save noisy samples
171
+ noisy_samples_path = output_dir / 'noisy_samples.npz'
172
+ np.savez_compressed(str(noisy_samples_path), samples=noisy_samples)
173
+ print(f" Saved noisy samples: {noisy_samples_path}")
174
+
175
+ # Save statistical bounds
176
+ bounds_path = output_dir / 'statistical_bounds.npz'
177
+ np.savez_compressed(
178
+ str(bounds_path),
179
+ mean=mean_poses,
180
+ lower_bound=lower_bound,
181
+ upper_bound=upper_bound,
182
+ tolerance=tolerance
183
+ )
184
+ print(f" Saved statistical bounds: {bounds_path}")
185
+
186
+ # Save metadata
187
+ metadata_path = output_dir / 'metadata.json'
188
+ with open(metadata_path, 'w') as f:
189
+ json.dump(metadata, f, indent=2)
190
+ print(f" Saved metadata: {metadata_path}")
191
+
192
+ # Clean up temporary files (optional - keep 2D poses for debugging)
193
+ # import shutil
194
+ # shutil.rmtree(temp_output, ignore_errors=True)
195
+
196
+ print(f"\n✓ Reference video processed successfully!")
197
+ print(f" Output directory: {output_dir}")
198
+
199
+ return {
200
+ 'output_dir': str(output_dir),
201
+ 'poses_3d_path': str(poses_3d_path),
202
+ 'noisy_samples_path': str(noisy_samples_path),
203
+ 'bounds_path': str(bounds_path),
204
+ 'metadata_path': str(metadata_path),
205
+ 'metadata': metadata
206
+ }
207
+
208
+
209
+ def load_reference(exercise_type, references_dir='references'):
210
+ """
211
+ Load a processed reference
212
+
213
+ Args:
214
+ exercise_type: Type of exercise (e.g., 'pushup')
215
+ references_dir: Directory containing references
216
+
217
+ Returns:
218
+ Dictionary with loaded data
219
+ """
220
+ ref_dir = Path(references_dir) / exercise_type
221
+
222
+ if not ref_dir.exists():
223
+ raise FileNotFoundError(f"Reference not found: {ref_dir}")
224
+
225
+ # Load metadata
226
+ metadata_path = ref_dir / 'metadata.json'
227
+ if not metadata_path.exists():
228
+ raise FileNotFoundError(f"Metadata not found: {metadata_path}")
229
+
230
+ with open(metadata_path, 'r') as f:
231
+ metadata = json.load(f)
232
+
233
+ # Load 3D poses
234
+ poses_3d_path = ref_dir / 'keypoints_3D.npz'
235
+ if not poses_3d_path.exists():
236
+ raise FileNotFoundError(f"3D poses not found: {poses_3d_path}")
237
+
238
+ poses_3d = np.load(str(poses_3d_path), allow_pickle=True)['reconstruction']
239
+ if isinstance(poses_3d, list):
240
+ poses_3d = np.array(poses_3d)
241
+
242
+ # Load noisy samples
243
+ noisy_samples_path = ref_dir / 'noisy_samples.npz'
244
+ noisy_samples = None
245
+ if noisy_samples_path.exists():
246
+ noisy_samples = np.load(str(noisy_samples_path), allow_pickle=True)['samples']
247
+
248
+ # Load statistical bounds
249
+ bounds_path = ref_dir / 'statistical_bounds.npz'
250
+ bounds = None
251
+ if bounds_path.exists():
252
+ bounds_data = np.load(str(bounds_path), allow_pickle=True)
253
+ bounds = {
254
+ 'mean': bounds_data['mean'],
255
+ 'lower_bound': bounds_data['lower_bound'],
256
+ 'upper_bound': bounds_data['upper_bound'],
257
+ 'tolerance': bounds_data['tolerance']
258
+ }
259
+
260
+ return {
261
+ 'poses_3d': poses_3d,
262
+ 'noisy_samples': noisy_samples,
263
+ 'bounds': bounds,
264
+ 'metadata': metadata,
265
+ 'ref_dir': str(ref_dir)
266
+ }
267
+
268
+
269
+ if __name__ == "__main__":
270
+ import argparse
271
+
272
+ parser = argparse.ArgumentParser(description='Process reference video for scoring')
273
+ parser.add_argument('--video', type=str, required=True, help='Path to reference video')
274
+ parser.add_argument('--exercise', type=str, default='pushup', help='Exercise type')
275
+ parser.add_argument('--output', type=str, default=None, help='Output directory')
276
+ parser.add_argument('--samples', type=int, default=100, help='Number of noisy samples')
277
+
278
+ args = parser.parse_args()
279
+
280
+ try:
281
+ result = process_reference_video(
282
+ args.video,
283
+ exercise_type=args.exercise,
284
+ output_dir=args.output,
285
+ n_samples=args.samples
286
+ )
287
+ print("\n" + "="*50)
288
+ print("SUCCESS!")
289
+ print("="*50)
290
+ print(f"Reference saved to: {result['output_dir']}")
291
+ except Exception as e:
292
+ print(f"\nERROR: {e}")
293
+ import traceback
294
+ traceback.print_exc()
295
+ sys.exit(1)
296
+
fitness_coach/temporal_align.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Temporal Alignment using Dynamic Time Warping (DTW)
3
+ Aligns sequences of different lengths for comparison
4
+ """
5
+
6
+ import numpy as np
7
+ try:
8
+ from fastdtw import fastdtw
9
+ from scipy.spatial.distance import euclidean
10
+ HAS_FASTDTW = True
11
+ except ImportError:
12
+ HAS_FASTDTW = False
13
+ print("Warning: fastdtw not installed. Using simple interpolation instead.")
14
+ print("Install with: pip install fastdtw")
15
+
16
+
17
+ def align_sequences_dtw(seq1, seq2, distance_func=None):
18
+ """
19
+ Align two sequences using Dynamic Time Warping
20
+
21
+ Args:
22
+ seq1: First sequence [frames, ...]
23
+ seq2: Second sequence [frames, ...]
24
+ distance_func: Distance function (default: euclidean)
25
+
26
+ Returns:
27
+ aligned_seq1, aligned_seq2: Aligned sequences of same length
28
+ path: DTW alignment path
29
+ """
30
+ if not HAS_FASTDTW:
31
+ # Fallback: simple interpolation to same length
32
+ target_length = max(len(seq1), len(seq2))
33
+ from .utils import interpolate_sequence
34
+ if len(seq1.shape) == 3: # [frames, joints, coords]
35
+ aligned_seq1 = interpolate_sequence(seq1, target_length)
36
+ aligned_seq2 = interpolate_sequence(seq2, target_length)
37
+ else:
38
+ # Flatten for interpolation
39
+ original_shape1 = seq1.shape
40
+ original_shape2 = seq2.shape
41
+ seq1_flat = seq1.reshape(len(seq1), -1)
42
+ seq2_flat = seq2.reshape(len(seq2), -1)
43
+ aligned_seq1_flat = interpolate_sequence(seq1_flat, target_length)
44
+ aligned_seq2_flat = interpolate_sequence(seq2_flat, target_length)
45
+ aligned_seq1 = aligned_seq1_flat.reshape((target_length,) + original_shape1[1:])
46
+ aligned_seq2 = aligned_seq2_flat.reshape((target_length,) + original_shape2[1:])
47
+ return aligned_seq1, aligned_seq2, None
48
+
49
+ # Flatten sequences for DTW
50
+ seq1_flat = seq1.reshape(len(seq1), -1)
51
+ seq2_flat = seq2.reshape(len(seq2), -1)
52
+
53
+ # Use provided distance function or default
54
+ if distance_func is None:
55
+ distance_func = euclidean
56
+
57
+ # Compute DTW
58
+ distance, path = fastdtw(seq1_flat, seq2_flat, dist=distance_func)
59
+
60
+ # Create aligned sequences using the path
61
+ aligned_seq1_indices = [p[0] for p in path]
62
+ aligned_seq2_indices = [p[1] for p in path]
63
+
64
+ aligned_seq1 = seq1[aligned_seq1_indices]
65
+ aligned_seq2 = seq2[aligned_seq2_indices]
66
+
67
+ return aligned_seq1, aligned_seq2, path
68
+
69
+
70
+ def align_poses_sequences(poses1, poses2):
71
+ """
72
+ Align two pose sequences temporally
73
+
74
+ Args:
75
+ poses1: First pose sequence [frames, 17, 3]
76
+ poses2: Second pose sequence [frames, 17, 3]
77
+
78
+ Returns:
79
+ aligned_poses1, aligned_poses2: Aligned pose sequences
80
+ """
81
+ poses1 = np.array(poses1)
82
+ poses2 = np.array(poses2)
83
+
84
+ # Use DTW to align
85
+ aligned_poses1, aligned_poses2, _ = align_sequences_dtw(poses1, poses2)
86
+
87
+ return aligned_poses1, aligned_poses2
88
+
89
+
90
+ def find_phase_alignment(user_poses, ref_poses):
91
+ """
92
+ Find optimal phase alignment between user and reference sequences
93
+ Uses DTW to handle different speeds and timing
94
+
95
+ Args:
96
+ user_poses: User pose sequence [frames, 17, 3]
97
+ ref_poses: Reference pose sequence [frames, 17, 3]
98
+
99
+ Returns:
100
+ aligned_user, aligned_ref: Phase-aligned sequences
101
+ alignment_score: Quality of alignment (lower is better)
102
+ """
103
+ user_poses = np.array(user_poses)
104
+ ref_poses = np.array(ref_poses)
105
+
106
+ # Align sequences
107
+ aligned_user, aligned_ref, path = align_sequences_dtw(user_poses, ref_poses)
108
+
109
+ # Calculate alignment quality (mean distance after alignment)
110
+ if path is not None and HAS_FASTDTW:
111
+ # Calculate average distance along path
112
+ distances = []
113
+ for i, j in path:
114
+ dist = np.linalg.norm(user_poses[i] - ref_poses[j])
115
+ distances.append(dist)
116
+ alignment_score = np.mean(distances)
117
+ else:
118
+ # Fallback: mean distance between aligned sequences
119
+ alignment_score = np.mean(np.linalg.norm(aligned_user - aligned_ref, axis=2))
120
+
121
+ return aligned_user, aligned_ref, alignment_score
122
+
123
+
124
+ def resample_to_common_length(poses1, poses2, target_length=None):
125
+ """
126
+ Resample both sequences to common length
127
+
128
+ Args:
129
+ poses1: First pose sequence [frames, 17, 3]
130
+ poses2: Second pose sequence [frames, 17, 3]
131
+ target_length: Target length (default: average of both)
132
+
133
+ Returns:
134
+ resampled_poses1, resampled_poses2: Resampled sequences
135
+ """
136
+ from fitness_coach.utils import interpolate_sequence
137
+
138
+ poses1 = np.array(poses1)
139
+ poses2 = np.array(poses2)
140
+
141
+ if target_length is None:
142
+ target_length = (len(poses1) + len(poses2)) // 2
143
+
144
+ resampled_poses1 = interpolate_sequence(poses1, target_length)
145
+ resampled_poses2 = interpolate_sequence(poses2, target_length)
146
+
147
+ return resampled_poses1, resampled_poses2
148
+
149
+
150
+ if __name__ == "__main__":
151
+ # Test temporal alignment
152
+ print("Testing temporal alignment...")
153
+
154
+ # Create test sequences of different lengths
155
+ seq1 = np.random.randn(50, 17, 3)
156
+ seq2 = np.random.randn(75, 17, 3)
157
+
158
+ print(f"Original lengths: {len(seq1)} vs {len(seq2)}")
159
+
160
+ # Test alignment
161
+ aligned_seq1, aligned_seq2, path = align_sequences_dtw(seq1, seq2)
162
+ print(f"Aligned lengths: {len(aligned_seq1)} vs {len(aligned_seq2)}")
163
+
164
+ if path is not None:
165
+ print(f"DTW path length: {len(path)}")
166
+ else:
167
+ print("Using interpolation fallback")
168
+
169
+ # Test phase alignment
170
+ aligned_user, aligned_ref, score = find_phase_alignment(seq1, seq2)
171
+ print(f"Alignment score: {score:.4f}")
172
+
173
+ print("Temporal alignment tests passed!")
174
+
fitness_coach/test_modules.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test script for fitness_coach modules
3
+ """
4
+
5
+ import numpy as np
6
+ import sys
7
+ import os
8
+
9
+ # Add parent directory to path
10
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
11
+
12
+ from fitness_coach.body_parts import (
13
+ get_body_part_joints,
14
+ get_joint_noise_level,
15
+ get_joints_for_exercise,
16
+ calculate_body_scale
17
+ )
18
+ from fitness_coach.utils import (
19
+ normalize_body_scale,
20
+ center_poses,
21
+ calculate_joint_distances,
22
+ interpolate_sequence
23
+ )
24
+ from fitness_coach.temporal_align import (
25
+ align_sequences_dtw,
26
+ align_poses_sequences
27
+ )
28
+ from fitness_coach.noise_scoring import (
29
+ create_noisy_samples,
30
+ score_with_statistical_bounds
31
+ )
32
+
33
+
34
+ def test_body_parts():
35
+ """Test body parts module"""
36
+ print("=" * 50)
37
+ print("Testing body_parts module...")
38
+ print("=" * 50)
39
+
40
+ # Test getting body part joints
41
+ right_arm = get_body_part_joints('right_arm')
42
+ print(f"Right arm joints: {right_arm}")
43
+ assert right_arm == [14, 15, 16], "Right arm joints incorrect"
44
+
45
+ # Test noise levels
46
+ hip_noise = get_joint_noise_level(0)
47
+ print(f"Hip noise level: {hip_noise}")
48
+ assert hip_noise == 0.02, "Hip noise level incorrect"
49
+
50
+ # Test exercise focus
51
+ pushup_parts = get_joints_for_exercise('pushup')
52
+ print(f"Push-up body parts: {pushup_parts}")
53
+ assert 'core' in pushup_parts, "Push-up should include core"
54
+
55
+ # Test body scale calculation
56
+ test_poses = np.random.randn(10, 17, 3)
57
+ scale = calculate_body_scale(test_poses)
58
+ print(f"Body scale: {scale:.4f}")
59
+ assert scale > 0, "Body scale should be positive"
60
+
61
+ print("[OK] body_parts module tests passed!\n")
62
+
63
+
64
+ def test_utils():
65
+ """Test utils module"""
66
+ print("=" * 50)
67
+ print("Testing utils module...")
68
+ print("=" * 50)
69
+
70
+ # Test normalization
71
+ test_poses = np.random.randn(10, 17, 3) * 10
72
+ normalized, scale = normalize_body_scale(test_poses)
73
+ print(f"Normalization: scale = {scale:.4f}")
74
+ assert normalized.shape == test_poses.shape, "Normalized shape should match"
75
+
76
+ # Test centering
77
+ centered = center_poses(test_poses)
78
+ hip_pos = centered[0, 0]
79
+ print(f"Centering: hip position = {hip_pos}")
80
+ assert np.allclose(hip_pos, [0, 0, 0]), "Hip should be at origin"
81
+
82
+ # Test distances
83
+ pose1 = test_poses[0]
84
+ pose2 = test_poses[1]
85
+ dists = calculate_joint_distances(pose1, pose2)
86
+ print(f"Joint distances: mean = {np.mean(dists):.4f}")
87
+ assert len(dists) == 17, "Should have 17 joint distances"
88
+
89
+ # Test interpolation
90
+ short_seq = np.random.randn(5, 17, 3)
91
+ long_seq = interpolate_sequence(short_seq, 10)
92
+ print(f"Interpolation: {len(short_seq)} -> {len(long_seq)} frames")
93
+ assert len(long_seq) == 10, "Interpolated length should be 10"
94
+
95
+ print("[OK] utils module tests passed!\n")
96
+
97
+
98
+ def test_temporal_align():
99
+ """Test temporal alignment module"""
100
+ print("=" * 50)
101
+ print("Testing temporal_align module...")
102
+ print("=" * 50)
103
+
104
+ # Create sequences of different lengths
105
+ seq1 = np.random.randn(30, 17, 3)
106
+ seq2 = np.random.randn(50, 17, 3)
107
+
108
+ print(f"Original lengths: {len(seq1)} vs {len(seq2)}")
109
+
110
+ # Test alignment
111
+ aligned_seq1, aligned_seq2, path = align_sequences_dtw(seq1, seq2)
112
+ print(f"Aligned lengths: {len(aligned_seq1)} vs {len(aligned_seq2)}")
113
+ assert len(aligned_seq1) == len(aligned_seq2), "Aligned sequences should have same length"
114
+
115
+ # Test pose sequence alignment
116
+ aligned_poses1, aligned_poses2 = align_poses_sequences(seq1, seq2)
117
+ print(f"Pose alignment: {len(aligned_poses1)} vs {len(aligned_poses2)}")
118
+ assert len(aligned_poses1) == len(aligned_poses2), "Aligned poses should have same length"
119
+
120
+ print("[OK] temporal_align module tests passed!\n")
121
+
122
+
123
+ def test_noise_scoring():
124
+ """Test noise scoring module"""
125
+ print("=" * 50)
126
+ print("Testing noise_scoring module...")
127
+ print("=" * 50)
128
+
129
+ # Create test data
130
+ ref_poses = np.random.randn(20, 17, 3)
131
+ user_poses = ref_poses + np.random.normal(0, 0.05, ref_poses.shape)
132
+
133
+ # Test noisy sample generation
134
+ noisy_samples = create_noisy_samples(ref_poses, n_samples=20)
135
+ print(f"Generated {len(noisy_samples)} noisy samples")
136
+ assert noisy_samples.shape == (20, 20, 17, 3), "Noisy samples shape incorrect"
137
+
138
+ # Test scoring
139
+ scores = score_with_statistical_bounds(user_poses, ref_poses)
140
+ print(f"Overall score: {scores['overall_score']:.2f}")
141
+ print(f"Body part scores: {list(scores['body_part_scores'].keys())}")
142
+
143
+ assert 'overall_score' in scores, "Should have overall_score"
144
+ assert 'body_part_scores' in scores, "Should have body_part_scores"
145
+ assert 0 <= scores['overall_score'] <= 100, "Score should be 0-100"
146
+
147
+ print("[OK] noise_scoring module tests passed!\n")
148
+
149
+
150
+ def main():
151
+ """Run all tests"""
152
+ print("\n" + "=" * 50)
153
+ print("FITNESS COACH MODULE TESTS")
154
+ print("=" * 50 + "\n")
155
+
156
+ try:
157
+ test_body_parts()
158
+ test_utils()
159
+ test_temporal_align()
160
+ test_noise_scoring()
161
+
162
+ print("=" * 50)
163
+ print("ALL TESTS PASSED! [OK]")
164
+ print("=" * 50)
165
+
166
+ except Exception as e:
167
+ print(f"\n[FAILED] TEST FAILED: {e}")
168
+ import traceback
169
+ traceback.print_exc()
170
+ sys.exit(1)
171
+
172
+
173
+ if __name__ == "__main__":
174
+ main()
175
+
fitness_coach/user_processor.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ User Video Processor
3
+ Processes user videos and extracts 3D poses for scoring
4
+ """
5
+
6
+ import os
7
+ import sys
8
+ import numpy as np
9
+ from pathlib import Path
10
+
11
+ # Add parent directory and demo directory to path
12
+ project_root = Path(__file__).parent.parent
13
+ sys.path.insert(0, str(project_root))
14
+ sys.path.insert(0, str(project_root / 'demo'))
15
+
16
+
17
+ def process_user_video(video_path, output_dir=None, cleanup=True):
18
+ """
19
+ Process a user video and extract 3D poses
20
+
21
+ Args:
22
+ video_path: Path to user video file
23
+ output_dir: Directory to save processed data (default: temp_user_processing/)
24
+ cleanup: If True, remove intermediate files after processing
25
+
26
+ Returns:
27
+ Dictionary with paths and 3D poses
28
+ """
29
+ # Change to project root for imports to work correctly
30
+ original_cwd = os.getcwd()
31
+ os.chdir(project_root)
32
+
33
+ try:
34
+ # Import after changing directory
35
+ from demo.vis import get_pose2D, get_pose3D
36
+ finally:
37
+ os.chdir(original_cwd)
38
+
39
+ video_path = Path(video_path)
40
+ if not video_path.exists():
41
+ raise FileNotFoundError(f"Video not found: {video_path}")
42
+
43
+ # Set up output directory with caching
44
+ if output_dir is None:
45
+ output_dir = Path('user_videos_cache') / video_path.stem
46
+ else:
47
+ output_dir = Path(output_dir)
48
+
49
+ output_dir.mkdir(parents=True, exist_ok=True)
50
+
51
+ # Check if already processed (cache hit)
52
+ keypoints_3d_path = output_dir / 'keypoints_3D.npz'
53
+ if keypoints_3d_path.exists():
54
+ print(f"✓ Using cached processing for: {video_path.name}")
55
+ print(f" Cache location: {output_dir}")
56
+ keypoints_3d = np.load(str(keypoints_3d_path), allow_pickle=True)['reconstruction']
57
+ print(f" Loaded {len(keypoints_3d)} frames from cache\n")
58
+
59
+ return {
60
+ 'keypoints_3d': keypoints_3d,
61
+ 'poses_3d': keypoints_3d, # Alias for compatibility
62
+ 'video_path': video_path,
63
+ 'output_dir': output_dir,
64
+ 'num_frames': len(keypoints_3d)
65
+ }
66
+
67
+ print(f"Processing user video: {video_path.name}")
68
+ print(f"Output directory: {output_dir}")
69
+
70
+ # Format output directory string (both functions expect trailing slash)
71
+ # Use absolute path to avoid issues when changing directories
72
+ output_dir_abs = output_dir.resolve()
73
+ output_dir_str = str(output_dir_abs).replace('\\', '/')
74
+ if not output_dir_str.endswith('/'):
75
+ output_dir_str += '/'
76
+
77
+ video_path_abs = video_path.resolve()
78
+
79
+ # Change to project root for processing
80
+ os.chdir(project_root)
81
+
82
+ # Save original argv and temporarily clear it to avoid argparse conflicts
83
+ original_argv = sys.argv.copy()
84
+ sys.argv = [sys.argv[0]] # Keep only script name
85
+
86
+ try:
87
+ # Step 1: Extract 2D poses
88
+ print("\n[1/2] Extracting 2D poses...")
89
+ try:
90
+ # get_pose2D adds 'input_2D/' to output_dir (line 95 in vis.py)
91
+ get_pose2D(str(video_path_abs), output_dir_str)
92
+ except Exception as e:
93
+ print(f"Error in 2D pose extraction: {e}")
94
+ raise
95
+
96
+ # Step 2: Extract 3D poses
97
+ print("\n[2/2] Extracting 3D poses...")
98
+ try:
99
+ # get_pose3D looks for output_dir + 'input_2D/keypoints.npz' (line 190 in vis.py)
100
+ get_pose3D(str(video_path_abs), output_dir_str)
101
+ except Exception as e:
102
+ print(f"Error in 3D pose extraction: {e}")
103
+ raise
104
+ finally:
105
+ sys.argv = original_argv # Restore original argv
106
+ os.chdir(original_cwd)
107
+
108
+ # Step 3: Load 3D poses
109
+ # get_pose3D saves to output_dir + 'keypoints_3D.npz' (line 279 in vis.py)
110
+ keypoints_3d_path = output_dir_abs / 'keypoints_3D.npz'
111
+ if not keypoints_3d_path.exists():
112
+ raise FileNotFoundError(f"3D keypoints not found: {keypoints_3d_path}")
113
+
114
+ keypoints_3d = np.load(str(keypoints_3d_path), allow_pickle=True)['reconstruction']
115
+ print(f"Loaded {len(keypoints_3d)} frames of 3D poses")
116
+
117
+ # Convert to numpy array if needed
118
+ if isinstance(keypoints_3d, list):
119
+ keypoints_3d = np.array(keypoints_3d)
120
+
121
+ result = {
122
+ 'poses_3d': keypoints_3d,
123
+ 'output_dir': str(output_dir),
124
+ 'keypoints_3d_path': str(keypoints_3d_path),
125
+ 'num_frames': len(keypoints_3d)
126
+ }
127
+
128
+ # Cleanup intermediate files if requested
129
+ if cleanup:
130
+ # Keep only the 3D keypoints
131
+ import shutil
132
+ for item in output_dir.iterdir():
133
+ if item.is_dir() and item.name != 'input_2D': # Keep input_2D for debugging
134
+ shutil.rmtree(item, ignore_errors=True)
135
+ elif item.is_file() and item.name != 'keypoints_3D.npz':
136
+ item.unlink(missing_ok=True)
137
+
138
+ print(f"\n✓ User video processed successfully!")
139
+ print(f" Frames: {len(keypoints_3d)}")
140
+ print(f" Output: {output_dir}")
141
+
142
+ return result
143
+
144
+
145
+ def load_user_poses(keypoints_path):
146
+ """
147
+ Load user poses from a saved file
148
+
149
+ Args:
150
+ keypoints_path: Path to keypoints_3D.npz file
151
+
152
+ Returns:
153
+ poses_3d: Array of shape [frames, 17, 3]
154
+ """
155
+ keypoints_path = Path(keypoints_path)
156
+ if not keypoints_path.exists():
157
+ raise FileNotFoundError(f"Keypoints file not found: {keypoints_path}")
158
+
159
+ data = np.load(str(keypoints_path), allow_pickle=True)
160
+ poses_3d = data['reconstruction']
161
+
162
+ if isinstance(poses_3d, list):
163
+ poses_3d = np.array(poses_3d)
164
+
165
+ return poses_3d
166
+
167
+
168
+ if __name__ == "__main__":
169
+ import argparse
170
+
171
+ parser = argparse.ArgumentParser(description='Process user video for scoring')
172
+ parser.add_argument('--video', type=str, required=True, help='Path to user video')
173
+ parser.add_argument('--output', type=str, default=None, help='Output directory')
174
+ parser.add_argument('--keep-files', action='store_true', help='Keep intermediate files')
175
+
176
+ args = parser.parse_args()
177
+
178
+ try:
179
+ result = process_user_video(
180
+ args.video,
181
+ output_dir=args.output,
182
+ cleanup=not args.keep_files
183
+ )
184
+ print("\n" + "="*50)
185
+ print("SUCCESS!")
186
+ print("="*50)
187
+ print(f"3D poses extracted: {result['num_frames']} frames")
188
+ print(f"Saved to: {result['keypoints_3d_path']}")
189
+ except Exception as e:
190
+ print(f"\nERROR: {e}")
191
+ import traceback
192
+ traceback.print_exc()
193
+ sys.exit(1)
194
+
fitness_coach/utils.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility Functions for Pose Processing
3
+ Helper functions for normalization, distance calculations, and interpolation
4
+ """
5
+
6
+ import numpy as np
7
+
8
+
9
+ def normalize_body_scale(poses, reference_scale=None):
10
+ """
11
+ Normalize poses by body scale (hip-to-thorax distance)
12
+
13
+ Args:
14
+ poses: Array of shape [frames, 17, 3] or [17, 3]
15
+ reference_scale: Optional reference scale to normalize to
16
+
17
+ Returns:
18
+ Normalized poses, scale used
19
+ """
20
+ poses = np.array(poses)
21
+ original_shape = poses.shape
22
+
23
+ if len(poses.shape) == 2:
24
+ poses = poses[np.newaxis, :, :]
25
+
26
+ # Calculate body scale (hip to thorax distance)
27
+ hip_to_thorax = np.linalg.norm(poses[:, 0, :] - poses[:, 8, :], axis=1)
28
+ body_scale = np.mean(hip_to_thorax)
29
+
30
+ if body_scale == 0:
31
+ return poses.reshape(original_shape), 1.0
32
+
33
+ # Normalize
34
+ if reference_scale is not None:
35
+ scale_factor = reference_scale / body_scale
36
+ else:
37
+ scale_factor = 1.0 / body_scale
38
+
39
+ normalized_poses = poses * scale_factor
40
+
41
+ return normalized_poses.reshape(original_shape), body_scale
42
+
43
+
44
+ def center_poses(poses, joint_idx=0):
45
+ """
46
+ Center poses at a specific joint (default: hip)
47
+
48
+ Args:
49
+ poses: Array of shape [frames, 17, 3] or [17, 3]
50
+ joint_idx: Joint to center on (default: 0 = hip)
51
+
52
+ Returns:
53
+ Centered poses
54
+ """
55
+ poses = np.array(poses)
56
+ original_shape = poses.shape
57
+
58
+ if len(poses.shape) == 2:
59
+ poses = poses[np.newaxis, :, :]
60
+
61
+ # Subtract the reference joint position
62
+ centered = poses - poses[:, joint_idx:joint_idx+1, :]
63
+
64
+ return centered.reshape(original_shape)
65
+
66
+
67
+ def calculate_joint_distances(pose1, pose2):
68
+ """
69
+ Calculate Euclidean distances between corresponding joints
70
+
71
+ Args:
72
+ pose1: Array of shape [17, 3] or [frames, 17, 3]
73
+ pose2: Array of shape [17, 3] or [frames, 17, 3]
74
+
75
+ Returns:
76
+ Distances per joint: [17] or [frames, 17]
77
+ """
78
+ pose1 = np.array(pose1)
79
+ pose2 = np.array(pose2)
80
+
81
+ if len(pose1.shape) == 2 and len(pose2.shape) == 2:
82
+ # Single frame
83
+ distances = np.linalg.norm(pose1 - pose2, axis=1)
84
+ else:
85
+ # Multiple frames
86
+ if len(pose1.shape) == 2:
87
+ pose1 = pose1[np.newaxis, :, :]
88
+ if len(pose2.shape) == 2:
89
+ pose2 = pose2[np.newaxis, :, :]
90
+
91
+ distances = np.linalg.norm(pose1 - pose2, axis=2)
92
+
93
+ return distances
94
+
95
+
96
+ def calculate_joint_angles(poses, joint_pairs):
97
+ """
98
+ Calculate angles between joint pairs
99
+
100
+ Args:
101
+ poses: Array of shape [frames, 17, 3] or [17, 3]
102
+ joint_pairs: List of (parent, child) joint index tuples
103
+
104
+ Returns:
105
+ Angles in radians: [frames, n_pairs] or [n_pairs]
106
+ """
107
+ poses = np.array(poses)
108
+ original_shape = poses.shape
109
+
110
+ if len(poses.shape) == 2:
111
+ poses = poses[np.newaxis, :, :]
112
+
113
+ angles = []
114
+ for parent_idx, child_idx in joint_pairs:
115
+ # Vector from parent to child
116
+ vectors = poses[:, child_idx, :] - poses[:, parent_idx, :]
117
+
118
+ # Calculate angle (simplified - angle with vertical)
119
+ # For more accurate angles, would need to consider parent-child relationships
120
+ vertical = np.array([0, 1, 0])
121
+ vertical = np.tile(vertical, (vectors.shape[0], 1))
122
+
123
+ # Dot product and angle
124
+ dot_products = np.sum(vectors * vertical, axis=1)
125
+ vector_norms = np.linalg.norm(vectors, axis=1)
126
+ vertical_norm = np.linalg.norm(vertical, axis=1)
127
+
128
+ # Avoid division by zero
129
+ cosines = np.clip(dot_products / (vector_norms * vertical_norm + 1e-8), -1, 1)
130
+ angle = np.arccos(cosines)
131
+
132
+ angles.append(angle)
133
+
134
+ angles = np.array(angles).T # [frames, n_pairs]
135
+
136
+ if len(original_shape) == 2:
137
+ return angles[0]
138
+ return angles
139
+
140
+
141
+ def interpolate_sequence(poses, target_length):
142
+ """
143
+ Interpolate pose sequence to target length
144
+
145
+ Args:
146
+ poses: Array of shape [frames, 17, 3]
147
+ target_length: Target number of frames
148
+
149
+ Returns:
150
+ Interpolated poses: [target_length, 17, 3]
151
+ """
152
+ poses = np.array(poses)
153
+ original_length = poses.shape[0]
154
+
155
+ if original_length == target_length:
156
+ return poses
157
+
158
+ # Create interpolation indices
159
+ original_indices = np.linspace(0, original_length - 1, original_length)
160
+ target_indices = np.linspace(0, original_length - 1, target_length)
161
+
162
+ # Interpolate each joint and coordinate
163
+ interpolated = np.zeros((target_length, poses.shape[1], poses.shape[2]))
164
+
165
+ for joint_idx in range(poses.shape[1]):
166
+ for coord_idx in range(poses.shape[2]):
167
+ interpolated[:, joint_idx, coord_idx] = np.interp(
168
+ target_indices,
169
+ original_indices,
170
+ poses[:, joint_idx, coord_idx]
171
+ )
172
+
173
+ return interpolated
174
+
175
+
176
+ def smooth_poses(poses, window_size=5):
177
+ """
178
+ Apply moving average smoothing to pose sequence
179
+
180
+ Args:
181
+ poses: Array of shape [frames, 17, 3]
182
+ window_size: Size of smoothing window
183
+
184
+ Returns:
185
+ Smoothed poses
186
+ """
187
+ poses = np.array(poses)
188
+ if len(poses) < window_size:
189
+ return poses
190
+
191
+ # Pad with edge values
192
+ pad_width = window_size // 2
193
+ padded = np.pad(poses, ((pad_width, pad_width), (0, 0), (0, 0)), mode='edge')
194
+
195
+ # Apply moving average
196
+ smoothed = np.zeros_like(poses)
197
+ for i in range(len(poses)):
198
+ smoothed[i] = np.mean(padded[i:i+window_size], axis=0)
199
+
200
+ return smoothed
201
+
202
+
203
+ def align_poses_spatially(poses1, poses2):
204
+ """
205
+ Align two pose sequences spatially (rotation and translation)
206
+ Uses Procrustes alignment
207
+
208
+ Args:
209
+ poses1: Reference poses [frames, 17, 3]
210
+ poses2: Poses to align [frames, 17, 3]
211
+
212
+ Returns:
213
+ Aligned poses2
214
+ """
215
+ from scipy.spatial.transform import Rotation
216
+
217
+ poses1 = np.array(poses1)
218
+ poses2 = np.array(poses2)
219
+
220
+ # Center both
221
+ poses1_centered = center_poses(poses1)
222
+ poses2_centered = center_poses(poses2)
223
+
224
+ # For each frame, find optimal rotation
225
+ aligned = np.zeros_like(poses2_centered)
226
+
227
+ for frame_idx in range(len(poses1_centered)):
228
+ p1 = poses1_centered[frame_idx]
229
+ p2 = poses2_centered[frame_idx]
230
+
231
+ # Find rotation using SVD (Procrustes)
232
+ H = p2.T @ p1
233
+ U, S, Vt = np.linalg.svd(H)
234
+ R = Vt.T @ U.T
235
+
236
+ # Apply rotation
237
+ aligned[frame_idx] = p2 @ R.T
238
+
239
+ return aligned
240
+
241
+
242
+ if __name__ == "__main__":
243
+ # Test functions
244
+ print("Testing utility functions...")
245
+
246
+ # Create dummy pose data
247
+ test_poses = np.random.randn(10, 17, 3)
248
+
249
+ # Test normalization
250
+ normalized, scale = normalize_body_scale(test_poses)
251
+ print(f"Normalization: original scale ~{scale:.2f}")
252
+
253
+ # Test centering
254
+ centered = center_poses(test_poses)
255
+ print(f"Centering: hip position = {centered[0, 0]}")
256
+
257
+ # Test distances
258
+ dists = calculate_joint_distances(test_poses[0], test_poses[1])
259
+ print(f"Joint distances: mean = {np.mean(dists):.2f}")
260
+
261
+ # Test interpolation
262
+ interpolated = interpolate_sequence(test_poses, 20)
263
+ print(f"Interpolation: {test_poses.shape[0]} -> {interpolated.shape[0]} frames")
264
+
265
+ print("All tests passed!")
266
+
fitness_coach/video_comparison.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generate side-by-side comparison videos of user vs reference 3D poses.
3
+ Uses the same visualization as the original pose3D images.
4
+ """
5
+ import numpy as np
6
+ import matplotlib.pyplot as plt
7
+ from matplotlib.animation import FuncAnimation, FFMpegWriter
8
+ import matplotlib
9
+ matplotlib.use('Agg') # Use non-interactive backend
10
+ from pathlib import Path
11
+ import argparse
12
+ import sys
13
+ import os
14
+
15
+ # Import the original show3Dpose function from demo/vis.py
16
+ # Add demo directory to path
17
+ project_root = Path(__file__).parent.parent
18
+ demo_path = str(project_root / 'demo')
19
+ if demo_path not in sys.path:
20
+ sys.path.insert(0, demo_path)
21
+
22
+ from vis import show3Dpose
23
+
24
+
25
+ def load_3d_poses(pose_file):
26
+ """Load 3D poses from npz file."""
27
+ data = np.load(pose_file, allow_pickle=True)
28
+ if 'reconstruction' in data:
29
+ poses = data['reconstruction']
30
+ elif 'poses_3d' in data:
31
+ poses = data['poses_3d']
32
+ else:
33
+ # Try to get the first array
34
+ poses = data[list(data.keys())[0]]
35
+
36
+ return poses
37
+
38
+
39
+ def plot_pose_3d(ax, pose, title):
40
+ """Plot a single 3D pose using the original show3Dpose function."""
41
+ ax.clear()
42
+
43
+ # Use the original show3Dpose function (same as pose3D images)
44
+ show3Dpose(pose, ax)
45
+
46
+ # Add title
47
+ ax.set_title(title, fontsize=12, fontweight='bold', pad=10)
48
+
49
+
50
+ def create_comparison_video(user_poses, reference_poses, output_path,
51
+ user_video_name="User", reference_name="Reference",
52
+ fps=30, elev=15, azim=70):
53
+ """
54
+ Create a side-by-side comparison video.
55
+
56
+ Args:
57
+ user_poses: User 3D poses (N_frames, 17, 3)
58
+ reference_poses: Reference 3D poses (N_frames, 17, 3)
59
+ output_path: Path to save output video
60
+ user_video_name: Display name for user
61
+ reference_name: Display name for reference
62
+ fps: Frames per second for output video
63
+ elev: Elevation angle for 3D view
64
+ azim: Azimuth angle for 3D view
65
+ """
66
+ print(f"\nCreating comparison video...")
67
+ print(f" User frames: {len(user_poses)}")
68
+ print(f" Reference frames: {len(reference_poses)}")
69
+
70
+ # Ensure same number of frames (use minimum)
71
+ n_frames = min(len(user_poses), len(reference_poses))
72
+ user_poses = user_poses[:n_frames]
73
+ reference_poses = reference_poses[:n_frames]
74
+
75
+ # Create figure with two subplots
76
+ fig = plt.figure(figsize=(16, 8))
77
+ ax1 = fig.add_subplot(121, projection='3d')
78
+ ax2 = fig.add_subplot(122, projection='3d')
79
+
80
+ # Add main title
81
+ fig.suptitle('Exercise Form Comparison', fontsize=16, fontweight='bold')
82
+
83
+ def update(frame):
84
+ """Update function for animation."""
85
+ plot_pose_3d(ax1, reference_poses[frame],
86
+ f'{reference_name}\nFrame {frame+1}/{n_frames}')
87
+ plot_pose_3d(ax2, user_poses[frame],
88
+ f'{user_video_name}\nFrame {frame+1}/{n_frames}')
89
+
90
+ if frame % 30 == 0:
91
+ print(f" Progress: {frame}/{n_frames} frames ({100*frame//n_frames}%)")
92
+
93
+ return ax1, ax2
94
+
95
+ # Create animation
96
+ anim = FuncAnimation(fig, update, frames=n_frames,
97
+ interval=1000/fps, blit=False)
98
+
99
+ # Save video - try MP4 first, fall back to GIF if FFmpeg not available
100
+ print(f" Saving video to: {output_path}")
101
+
102
+ # Try MP4 first (requires FFmpeg)
103
+ try:
104
+ writer = FFMpegWriter(fps=fps, bitrate=5000, codec='libx264')
105
+ anim.save(str(output_path), writer=writer, dpi=100)
106
+ print(f"✓ Video saved successfully!")
107
+ print(f" Output: {output_path}")
108
+ print(f" Duration: {n_frames/fps:.2f} seconds")
109
+ print(f" Format: MP4")
110
+ except (FileNotFoundError, OSError) as e:
111
+ # FFmpeg not found, try GIF instead
112
+ print(f" ⚠ FFmpeg not found, saving as GIF instead...")
113
+ gif_path = str(output_path).replace('.mp4', '.gif')
114
+
115
+ try:
116
+ # Use Pillow writer for GIF (built into matplotlib)
117
+ from matplotlib.animation import PillowWriter
118
+ writer = PillowWriter(fps=fps)
119
+ anim.save(gif_path, writer=writer, dpi=100)
120
+ print(f"✓ GIF saved successfully!")
121
+ print(f" Output: {gif_path}")
122
+ print(f" Duration: {n_frames/fps:.2f} seconds")
123
+ print(f" Format: GIF")
124
+ print(f"\n Note: For MP4 format, install FFmpeg:")
125
+ print(f" conda install -c conda-forge ffmpeg")
126
+ print(f" or: winget install ffmpeg")
127
+ except Exception as gif_error:
128
+ print(f"✗ Error saving GIF: {gif_error}")
129
+ print(f"\nOriginal MP4 error: {e}")
130
+ print("\nTo enable MP4 output, install FFmpeg:")
131
+ print(" conda install -c conda-forge ffmpeg")
132
+ print(" or: winget install ffmpeg")
133
+ raise
134
+ except Exception as e:
135
+ print(f"✗ Error saving video: {e}")
136
+ raise
137
+ finally:
138
+ plt.close(fig)
139
+
140
+
141
+ def main():
142
+ parser = argparse.ArgumentParser(
143
+ description='Generate side-by-side comparison video of 3D poses'
144
+ )
145
+ parser.add_argument('--user-poses', required=True,
146
+ help='Path to user 3D poses npz file')
147
+ parser.add_argument('--reference-poses', required=True,
148
+ help='Path to reference 3D poses npz file')
149
+ parser.add_argument('--output', default='comparison_output.mp4',
150
+ help='Output video path')
151
+ parser.add_argument('--user-name', default='Your Form',
152
+ help='Display name for user')
153
+ parser.add_argument('--reference-name', default='Correct Form',
154
+ help='Display name for reference')
155
+ parser.add_argument('--fps', type=int, default=30,
156
+ help='Frames per second')
157
+ parser.add_argument('--elev', type=float, default=15,
158
+ help='Elevation angle for 3D view (degrees)')
159
+ parser.add_argument('--azim', type=float, default=70,
160
+ help='Azimuth angle for 3D view (degrees)')
161
+
162
+ args = parser.parse_args()
163
+
164
+ print("="*60)
165
+ print("3D POSE COMPARISON VIDEO GENERATOR")
166
+ print("="*60)
167
+
168
+ # Load poses
169
+ print(f"\nLoading user poses from: {args.user_poses}")
170
+ user_poses = load_3d_poses(args.user_poses)
171
+ print(f" Loaded {len(user_poses)} frames")
172
+
173
+ print(f"\nLoading reference poses from: {args.reference_poses}")
174
+ reference_poses = load_3d_poses(args.reference_poses)
175
+ print(f" Loaded {len(reference_poses)} frames")
176
+
177
+ # Create video
178
+ create_comparison_video(
179
+ user_poses=user_poses,
180
+ reference_poses=reference_poses,
181
+ output_path=args.output,
182
+ user_video_name=args.user_name,
183
+ reference_name=args.reference_name,
184
+ fps=args.fps,
185
+ elev=args.elev,
186
+ azim=args.azim
187
+ )
188
+
189
+ print("\n" + "="*60)
190
+ print("Done!")
191
+ print("="*60)
192
+
193
+
194
+ if __name__ == '__main__':
195
+ main()
196
+
fitness_coach/video_from_images.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generate side-by-side comparison videos from existing pose3D images.
3
+ Much simpler - just combines the existing PNG images!
4
+ """
5
+ import numpy as np
6
+ from PIL import Image
7
+ import glob
8
+ from pathlib import Path
9
+ import argparse
10
+ from matplotlib.animation import FuncAnimation, FFMpegWriter, PillowWriter
11
+ import matplotlib.pyplot as plt
12
+ import matplotlib
13
+ matplotlib.use('Agg')
14
+
15
+
16
+ def load_image_sequence(image_dir):
17
+ """Load all PNG images from a directory, sorted by filename."""
18
+ image_dir = Path(image_dir)
19
+ if not image_dir.exists():
20
+ raise FileNotFoundError(f"Image directory not found: {image_dir}")
21
+
22
+ # Find all PNG files matching the pattern (e.g., 0000_3D.png, 0001_3D.png)
23
+ image_files = sorted(glob.glob(str(image_dir / '*_3D.png')))
24
+
25
+ if not image_files:
26
+ raise FileNotFoundError(f"No pose3D images found in {image_dir}")
27
+
28
+ print(f" Found {len(image_files)} images in {image_dir}")
29
+ return image_files
30
+
31
+
32
+ def create_comparison_video_from_images(user_image_dir, reference_image_dir, output_path,
33
+ user_video_name="Your Form", reference_name="Correct Form",
34
+ fps=30):
35
+ """
36
+ Create side-by-side video from existing pose3D images.
37
+
38
+ Args:
39
+ user_image_dir: Directory containing user pose3D images
40
+ reference_image_dir: Directory containing reference pose3D images
41
+ output_path: Path to save output video
42
+ user_video_name: Display name for user
43
+ reference_name: Display name for reference
44
+ fps: Frames per second
45
+ """
46
+ print(f"\nCreating comparison video from existing images...")
47
+
48
+ # Load image sequences
49
+ print(f"\nLoading user images from: {user_image_dir}")
50
+ user_images = load_image_sequence(user_image_dir)
51
+
52
+ print(f"\nLoading reference images from: {reference_image_dir}")
53
+ reference_images = load_image_sequence(reference_image_dir)
54
+
55
+ # Use minimum length to ensure both sequences are the same
56
+ n_frames = min(len(user_images), len(reference_images))
57
+ user_images = user_images[:n_frames]
58
+ reference_images = reference_images[:n_frames]
59
+
60
+ print(f"\n Using {n_frames} frames for comparison")
61
+
62
+ # Load first images to get dimensions
63
+ user_img = Image.open(user_images[0])
64
+ ref_img = Image.open(reference_images[0])
65
+
66
+ # Get dimensions (assuming they're similar)
67
+ img_height = max(user_img.height, ref_img.height)
68
+ img_width = max(user_img.width, ref_img.width)
69
+
70
+ # Create figure for side-by-side display
71
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 8))
72
+ ax1.axis('off')
73
+ ax2.axis('off')
74
+
75
+ # Add titles
76
+ fig.suptitle('Exercise Form Comparison', fontsize=16, fontweight='bold')
77
+ ax1.set_title(f'{reference_name}', fontsize=14, fontweight='bold', pad=10)
78
+ ax2.set_title(f'{user_video_name}', fontsize=14, fontweight='bold', pad=10)
79
+
80
+ def update(frame):
81
+ """Update function for animation."""
82
+ # Load images
83
+ ref_img = Image.open(reference_images[frame])
84
+ user_img = Image.open(user_images[frame])
85
+
86
+ # Display images
87
+ ax1.clear()
88
+ ax1.imshow(ref_img)
89
+ ax1.axis('off')
90
+ ax1.set_title(f'{reference_name}\nFrame {frame+1}/{n_frames}',
91
+ fontsize=12, fontweight='bold', pad=10)
92
+
93
+ ax2.clear()
94
+ ax2.imshow(user_img)
95
+ ax2.axis('off')
96
+ ax2.set_title(f'{user_video_name}\nFrame {frame+1}/{n_frames}',
97
+ fontsize=12, fontweight='bold', pad=10)
98
+
99
+ if frame % 30 == 0:
100
+ print(f" Progress: {frame}/{n_frames} frames ({100*frame//n_frames}%)")
101
+
102
+ return ax1, ax2
103
+
104
+ # Create animation
105
+ anim = FuncAnimation(fig, update, frames=n_frames,
106
+ interval=1000/fps, blit=False)
107
+
108
+ # Save video - try MP4 first, fall back to GIF if FFmpeg not available
109
+ print(f"\n Saving video to: {output_path}")
110
+
111
+ try:
112
+ writer = FFMpegWriter(fps=fps, bitrate=5000, codec='libx264')
113
+ anim.save(str(output_path), writer=writer, dpi=100)
114
+ print(f"✓ Video saved successfully!")
115
+ print(f" Output: {output_path}")
116
+ print(f" Duration: {n_frames/fps:.2f} seconds")
117
+ print(f" Format: MP4")
118
+ except (FileNotFoundError, OSError) as e:
119
+ # FFmpeg not found, try GIF instead
120
+ print(f" ⚠ FFmpeg not found, saving as GIF instead...")
121
+ gif_path = str(output_path).replace('.mp4', '.gif')
122
+
123
+ try:
124
+ writer = PillowWriter(fps=fps)
125
+ anim.save(gif_path, writer=writer, dpi=100)
126
+ print(f"✓ GIF saved successfully!")
127
+ print(f" Output: {gif_path}")
128
+ print(f" Duration: {n_frames/fps:.2f} seconds")
129
+ print(f" Format: GIF")
130
+ print(f"\n Note: For MP4 format, install FFmpeg:")
131
+ print(f" conda install -c conda-forge ffmpeg")
132
+ except Exception as gif_error:
133
+ print(f"✗ Error saving GIF: {gif_error}")
134
+ raise
135
+ except Exception as e:
136
+ print(f"✗ Error saving video: {e}")
137
+ raise
138
+ finally:
139
+ plt.close(fig)
140
+
141
+
142
+ def main():
143
+ parser = argparse.ArgumentParser(
144
+ description='Generate side-by-side comparison video from existing pose3D images'
145
+ )
146
+ parser.add_argument('--user-images', required=True,
147
+ help='Directory containing user pose3D images (e.g., user_videos_cache/user/pose3D)')
148
+ parser.add_argument('--reference-images', required=True,
149
+ help='Directory containing reference pose3D images')
150
+ parser.add_argument('--output', default='comparison_from_images.mp4',
151
+ help='Output video path')
152
+ parser.add_argument('--user-name', default='Your Form',
153
+ help='Display name for user')
154
+ parser.add_argument('--reference-name', default='Correct Form',
155
+ help='Display name for reference')
156
+ parser.add_argument('--fps', type=int, default=30,
157
+ help='Frames per second')
158
+
159
+ args = parser.parse_args()
160
+
161
+ print("="*60)
162
+ print("3D POSE COMPARISON VIDEO FROM IMAGES")
163
+ print("="*60)
164
+
165
+ create_comparison_video_from_images(
166
+ user_image_dir=args.user_images,
167
+ reference_image_dir=args.reference_images,
168
+ output_path=args.output,
169
+ user_video_name=args.user_name,
170
+ reference_name=args.reference_name,
171
+ fps=args.fps
172
+ )
173
+
174
+ print("\n" + "="*60)
175
+ print("Done!")
176
+ print("="*60)
177
+
178
+
179
+ if __name__ == '__main__':
180
+ main()
181
+