Spaces:
Sleeping
Sleeping
Sean Carnahan commited on
Commit ·
56926a1
1
Parent(s): e7825c1
Force MoveNet to run on CPU to avoid GPU JIT errors on HF Spaces
Browse files
app.py
CHANGED
|
@@ -156,7 +156,7 @@ def predict_pose_cnn(img_path):
|
|
| 156 |
finally:
|
| 157 |
cleanup_memory()
|
| 158 |
|
| 159 |
-
@app.route('/static/uploads/<path:filename>')
|
| 160 |
def serve_video(filename):
|
| 161 |
response = send_from_directory(app.config['UPLOAD_FOLDER'], filename, as_attachment=False)
|
| 162 |
# Ensure correct content type, especially for Safari/iOS if issues arise
|
|
@@ -185,18 +185,11 @@ def process_video_movenet(video_path):
|
|
| 185 |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 186 |
print(f"[DEBUG] Video properties - FPS: {fps}, Width: {width}, Height: {height}, Total Frames: {total_frames}")
|
| 187 |
|
| 188 |
-
#
|
| 189 |
-
print("[DEBUG]
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
movenet_model = hub.load("https://tfhub.dev/google/movenet/singlepose/lightning/4")
|
| 194 |
-
movenet = movenet_model.signatures['serving_default']
|
| 195 |
-
else:
|
| 196 |
-
print("[DEBUG] No GPU found, using CPU for MoveNet")
|
| 197 |
-
with tf.device('/CPU:0'):
|
| 198 |
-
movenet_model = hub.load("https://tfhub.dev/google/movenet/singlepose/lightning/4")
|
| 199 |
-
movenet = movenet_model.signatures['serving_default']
|
| 200 |
|
| 201 |
# Create output video writer
|
| 202 |
output_filename = f'output_movenet_lightning.mp4'
|
|
@@ -210,14 +203,18 @@ def process_video_movenet(video_path):
|
|
| 210 |
|
| 211 |
frame_count = 0
|
| 212 |
processed_frames = 0
|
|
|
|
| 213 |
|
| 214 |
while cap.isOpened():
|
| 215 |
ret, frame = cap.read()
|
| 216 |
-
|
|
|
|
|
|
|
| 217 |
break
|
|
|
|
|
|
|
|
|
|
| 218 |
frame_count += 1
|
| 219 |
-
if frame_count % 10 != 0: # Process every 10th frame
|
| 220 |
-
continue
|
| 221 |
try:
|
| 222 |
# Ensure frame size matches VideoWriter
|
| 223 |
if frame.shape[1] != width or frame.shape[0] != height:
|
|
@@ -227,15 +224,10 @@ def process_video_movenet(video_path):
|
|
| 227 |
img = frame.copy()
|
| 228 |
img = tf.image.resize_with_pad(tf.expand_dims(img, axis=0), 192, 192)
|
| 229 |
img = tf.cast(img, dtype=tf.int32)
|
| 230 |
-
#
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
keypoints = results['output_0'].numpy()
|
| 235 |
-
else:
|
| 236 |
-
with tf.device('/CPU:0'):
|
| 237 |
-
results = movenet(img)
|
| 238 |
-
keypoints = results['output_0'].numpy()
|
| 239 |
# Process keypoints and draw on frame
|
| 240 |
y, x, c = frame.shape
|
| 241 |
shaped = np.squeeze(keypoints)
|
|
@@ -260,7 +252,9 @@ def process_video_movenet(video_path):
|
|
| 260 |
print(f"[DEBUG] Output video file size: {file_size} bytes")
|
| 261 |
if processed_frames == 0 or file_size < 1000:
|
| 262 |
raise ValueError(f"Output video file is empty or too small: {output_path}")
|
| 263 |
-
|
|
|
|
|
|
|
| 264 |
except Exception as e:
|
| 265 |
print(f"[ERROR] Error in process_video_movenet: {str(e)}")
|
| 266 |
traceback.print_exc()
|
|
@@ -400,7 +394,9 @@ def process_video_mediapipe(video_path):
|
|
| 400 |
if frame_count == 0:
|
| 401 |
raise ValueError("No frames were processed from the video by MediaPipe")
|
| 402 |
logger.info(f"MediaPipe video processing completed. Processed {frame_count} frames. Output: {output_path}")
|
| 403 |
-
|
|
|
|
|
|
|
| 404 |
except Exception as e:
|
| 405 |
logger.error(f'Error in process_video_mediapipe: {e}')
|
| 406 |
traceback.print_exc()
|
|
|
|
| 156 |
finally:
|
| 157 |
cleanup_memory()
|
| 158 |
|
| 159 |
+
@app.route('/static/uploads/<path:filename>', endpoint='serve_video')
|
| 160 |
def serve_video(filename):
|
| 161 |
response = send_from_directory(app.config['UPLOAD_FOLDER'], filename, as_attachment=False)
|
| 162 |
# Ensure correct content type, especially for Safari/iOS if issues arise
|
|
|
|
| 185 |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 186 |
print(f"[DEBUG] Video properties - FPS: {fps}, Width: {width}, Height: {height}, Total Frames: {total_frames}")
|
| 187 |
|
| 188 |
+
# Force MoveNet to CPU to avoid GPU JIT error
|
| 189 |
+
print("[DEBUG] Forcing CPU for MoveNet (due to GPU JIT error)")
|
| 190 |
+
with tf.device('/CPU:0'):
|
| 191 |
+
movenet_model = hub.load("https://tfhub.dev/google/movenet/singlepose/lightning/4")
|
| 192 |
+
movenet = movenet_model.signatures['serving_default']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 193 |
|
| 194 |
# Create output video writer
|
| 195 |
output_filename = f'output_movenet_lightning.mp4'
|
|
|
|
| 203 |
|
| 204 |
frame_count = 0
|
| 205 |
processed_frames = 0
|
| 206 |
+
first_frame_shape = None
|
| 207 |
|
| 208 |
while cap.isOpened():
|
| 209 |
ret, frame = cap.read()
|
| 210 |
+
print(f"[DEBUG] Frame {frame_count+1}: ret={ret}, frame is None: {frame is None}")
|
| 211 |
+
if not ret or frame is None:
|
| 212 |
+
print(f"[DEBUG] Stopping at frame {frame_count+1}: ret={ret}, frame is None: {frame is None}")
|
| 213 |
break
|
| 214 |
+
if first_frame_shape is None:
|
| 215 |
+
first_frame_shape = frame.shape
|
| 216 |
+
print(f"[DEBUG] First frame shape: {first_frame_shape}")
|
| 217 |
frame_count += 1
|
|
|
|
|
|
|
| 218 |
try:
|
| 219 |
# Ensure frame size matches VideoWriter
|
| 220 |
if frame.shape[1] != width or frame.shape[0] != height:
|
|
|
|
| 224 |
img = frame.copy()
|
| 225 |
img = tf.image.resize_with_pad(tf.expand_dims(img, axis=0), 192, 192)
|
| 226 |
img = tf.cast(img, dtype=tf.int32)
|
| 227 |
+
# Always run inference on CPU
|
| 228 |
+
with tf.device('/CPU:0'):
|
| 229 |
+
results = movenet(img)
|
| 230 |
+
keypoints = results['output_0'].numpy()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 231 |
# Process keypoints and draw on frame
|
| 232 |
y, x, c = frame.shape
|
| 233 |
shaped = np.squeeze(keypoints)
|
|
|
|
| 252 |
print(f"[DEBUG] Output video file size: {file_size} bytes")
|
| 253 |
if processed_frames == 0 or file_size < 1000:
|
| 254 |
raise ValueError(f"Output video file is empty or too small: {output_path}")
|
| 255 |
+
video_url = url_for('serve_video', filename=output_filename, _external=False)
|
| 256 |
+
print(f"[DEBUG] Returning video URL: {video_url}")
|
| 257 |
+
return video_url
|
| 258 |
except Exception as e:
|
| 259 |
print(f"[ERROR] Error in process_video_movenet: {str(e)}")
|
| 260 |
traceback.print_exc()
|
|
|
|
| 394 |
if frame_count == 0:
|
| 395 |
raise ValueError("No frames were processed from the video by MediaPipe")
|
| 396 |
logger.info(f"MediaPipe video processing completed. Processed {frame_count} frames. Output: {output_path}")
|
| 397 |
+
video_url = url_for('serve_video', filename=output_filename, _external=False)
|
| 398 |
+
print(f"[DEBUG] Returning video URL: {video_url}")
|
| 399 |
+
return video_url
|
| 400 |
except Exception as e:
|
| 401 |
logger.error(f'Error in process_video_mediapipe: {e}')
|
| 402 |
traceback.print_exc()
|