Spaces:
Sleeping
Sleeping
File size: 6,760 Bytes
bebf8ca 0c9d791 bebf8ca 26aa229 096798d 0983a8d bebf8ca 0983a8d 096798d bebf8ca 12df91d bebf8ca 12df91d bebf8ca 26aa229 5a26cf2 26aa229 cbd62d1 5a26cf2 26aa229 5a26cf2 26aa229 49a721f 26aa229 5a26cf2 26aa229 751ce7d 26aa229 5a26cf2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
import gradio as gr
import cv2
import mediapipe as mp
import numpy as np
# Initialize mediapipe pose class
mp_pose = mp.solutions.pose
pose = None
mp_drawing = mp.solutions.drawing_utils
# Function to calculate the angle between three points
def calculate_angle(a, b, c):
a = np.array([a.x, a.y]) # First point
b = np.array([b.x, b.y]) # Mid point
c = np.array([c.x, c.y]) # End point
radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0])
angle = np.abs(radians * 180.0 / np.pi)
if angle > 180.0:
angle = 360 - angle
return angle
# Define a function to classify yoga poses
def classify_pose(landmarks, output_image):
label = 'Unknown Pose'
# Calculate the required angles
left_elbow_angle = calculate_angle(
landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value],
landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value],
landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value])
right_elbow_angle = calculate_angle(
landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value],
landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value],
landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value])
left_shoulder_angle = calculate_angle(
landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value],
landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value],
landmarks[mp_pose.PoseLandmark.LEFT_HIP.value])
right_shoulder_angle = calculate_angle(
landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value],
landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value],
landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value])
left_knee_angle = calculate_angle(
landmarks[mp_pose.PoseLandmark.LEFT_HIP.value],
landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value],
landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value])
right_knee_angle = calculate_angle(
landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value],
landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value],
landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value])
# Check for Five-Pointed Star Pose
if abs(landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y - landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y) < 0.1 and \
abs(landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y - landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].y) < 0.1 and \
abs(landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x - landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].x) > 0.2 and \
abs(landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x - landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x) > 0.2:
label = "Five-Pointed Star Pose"
# Check for Warrior II pose
if 165 < left_elbow_angle < 195 and 165 < right_elbow_angle < 195 and \
80 < left_shoulder_angle < 110 and 80 < right_shoulder_angle < 110:
if (165 < left_knee_angle < 195 or 165 < right_knee_angle < 195) and \
(90 < left_knee_angle < 120 or 90 < right_knee_angle < 120):
label = 'Warrior II Pose'
# Check for T pose
if 165 < left_elbow_angle < 195 and 165 < right_elbow_angle < 195 and \
80 < left_shoulder_angle < 110 and 80 < right_shoulder_angle < 110 and \
160 < left_knee_angle < 195 and 160 < right_knee_angle < 195:
label = 'T Pose'
# Check for Tree Pose
if (165 < left_knee_angle < 195 or 165 < right_knee_angle < 195) and \
(315 < left_knee_angle < 335 or 25 < right_knee_angle < 45):
label = 'Tree Pose'
# Check for Upward Salute Pose
if abs(landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x - landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].x) < 0.1 and \
abs(landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x - landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].x) < 0.1 and \
landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y < landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y and \
landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y < landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].y and \
abs(landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y - landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].y) < 0.05:
label = "Upward Salute Pose"
# Check for Hands Under Feet Pose
if landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y > landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].y and \
landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y > landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].y and \
abs(landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x - landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x) < 0.05 and \
abs(landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x - landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].x) < 0.05:
label = "Hands Under Feet Pose"
# Check for Plank Pose
if 160 < left_shoulder_angle < 200 and 160 < right_shoulder_angle < 200 and \
160 < left_knee_angle < 200 and 160 < right_knee_angle < 200:
label = "Plank Pose"
# Write the label on the output image
cv2.putText(output_image, label, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2)
return output_image, label
def detect_and_classify_pose(input_image, complexity, confidence, background_color):
global pose
pose = mp_pose.Pose(static_image_mode=False, min_detection_confidence=confidence, model_complexity=complexity)
input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
if background_color == 'White':
input_image[:] = [255, 255, 255]
elif background_color == 'Green':
input_image[:] = [0, 255, 0]
elif background_color == 'Black':
input_image[:] = [0, 0, 0]
results = pose.process(input_image)
pose_classification = "No pose detected"
if results.pose_landmarks:
mp_drawing.draw_landmarks(input_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
input_image, pose_classification = classify_pose(results.pose_landmarks.landmark, input_image)
input_image = cv2.cvtColor(input_image, cv2.COLOR_RGB2BGR) # Convert back to BGR for correct display in Gradio
return input_image, pose_classification
iface = gr.Interface(
fn=detect_and_classify_pose,
inputs=[
gr.Image(type="numpy", label="Upload an Image"),
gr.Slider(minimum=0, maximum=2, value=1, label="Model Complexity"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.5, label="Detection Confidence"),
gr.Radio(choices=['White', 'Green', 'Black'], value='White', label="Background Color")
],
outputs=["image", "text"],
title="Live Yoga Pose Detection and Classification",
description="This app detects and classifies yoga poses from the live camera feed using MediaPipe.",
)
iface.launch()
|