Spaces:
Sleeping
Sleeping
| import cv2 | |
| import mediapipe as mp | |
| import numpy as np | |
| import gradio as gr | |
| mp_pose = mp.solutions.pose | |
| mp_drawing = mp.solutions.drawing_utils | |
| pose = mp_pose.Pose(static_image_mode=False, min_detection_confidence=0.5, min_tracking_confidence=0.5) | |
| def calculate_angle(a, b, c): | |
| a = np.array(a) | |
| b = np.array(b) | |
| c = np.array(c) | |
| ba = a - b | |
| bc = c - b | |
| cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc)) | |
| angle = np.arccos(cosine_angle) | |
| return np.degrees(angle) | |
| def detect_pose_video(video_path): | |
| cap = cv2.VideoCapture(video_path) | |
| output_frames = [] | |
| while True: | |
| ret, frame = cap.read() | |
| if not ret: | |
| break | |
| frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
| results = pose.process(frame_rgb) | |
| if results.pose_landmarks: | |
| mp_drawing.draw_landmarks( | |
| frame, | |
| results.pose_landmarks, | |
| mp_pose.POSE_CONNECTIONS, | |
| mp_drawing.DrawingSpec(color=(0,255,0), thickness=2, circle_radius=2), | |
| mp_drawing.DrawingSpec(color=(0,0,255), thickness=2) | |
| ) | |
| # Example: left elbow angle | |
| h, w, _ = frame.shape | |
| landmarks = results.pose_landmarks.landmark | |
| shoulder = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x * w, | |
| landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y * h] | |
| elbow = [landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].x * w, | |
| landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].y * h] | |
| wrist = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x * w, | |
| landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y * h] | |
| angle = calculate_angle(shoulder, elbow, wrist) | |
| cv2.putText(frame, f"Left Elbow: {int(angle)} deg", (20,40), | |
| cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,0), 2) | |
| output_frames.append(frame) | |
| cap.release() | |
| # Convert frames to video file | |
| out_path = "annotated_video.mp4" | |
| fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
| out = cv2.VideoWriter(out_path, fourcc, 20.0, (frame.shape[1], frame.shape[0])) | |
| for f in output_frames: | |
| out.write(f) | |
| out.release() | |
| return out_path | |
| # Gradio interface | |
| demo = gr.Interface( | |
| fn=detect_pose_video, | |
| inputs=gr.Video(label="Upload Video"), | |
| outputs=gr.Video(label="Annotated Video"), | |
| title="Human Pose Estimation on Video", | |
| description="Upload a video and see pose landmarks & joint angles detected in real-time." | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |