Piarasingh85 commited on
Commit
5a26cf2
·
verified ·
1 Parent(s): 0c9d791

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -1
app.py CHANGED
@@ -91,4 +91,52 @@ def classify_pose(landmarks, output_image, display=False):
91
  label = "Upward Salute Pose"
92
 
93
  # Check for Hands Under Feet Pose
94
- if landmarks[mp_pose.Pose
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  label = "Upward Salute Pose"
92
 
93
  # Check for Hands Under Feet Pose
94
+ if landmarks[mp_pose.Pose.Landmark.LEFT_WRIST.value].y > landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].y and
95
+ landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y > landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].y and abs(landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x - landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x) < 0.05 and abs(landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x - landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].x) < 0.05:
96
+ label = "Hands Under Feet Pose"
97
+
98
+ # Check for Plank Pose
99
+ if left_shoulder_angle > 160 and left_shoulder_angle < 200 and \
100
+ right_shoulder_angle > 160 and right_shoulder_angle < 200 and \
101
+ left_knee_angle > 160 and left_knee_angle < 200 and \
102
+ right_knee_angle > 160 and right_knee_angle < 200:
103
+ label = "Plank Pose"
104
+
105
+ # Write the label on the output image
106
+ cv2.putText(output_image, label, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2)
107
+
108
+ return output_image, label
109
+
110
+ def detect_and_classify_pose(input_image, complexity, confidence, background_color):
111
+ global pose
112
+ pose = mp_pose.Pose(static_image_mode=False, min_detection_confidence=confidence, model_complexity=complexity)
113
+
114
+ input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
115
+ if background_color == 'White':
116
+ input_image[:] = [255, 255, 255]
117
+ elif background_color == 'Green':
118
+ input_image[:] = [0, 255, 0]
119
+ elif background_color == 'Black':
120
+ input_image[:] = [0, 0, 0]
121
+
122
+ results = pose.process(input_image)
123
+ pose_classification = "No pose detected"
124
+ if results.pose_landmarks:
125
+ mp_drawing.draw_landmarks(input_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
126
+ input_image, pose_classification = classify_pose(results.pose_landmarks.landmark, input_image)
127
+ return input_image, pose_classification
128
+
129
+ iface = gr.Interface(
130
+ fn=detect_and_classify_pose,
131
+ inputs=[
132
+ gr.inputs.Image(type="numpy", label="Upload an Image"),
133
+ gr.inputs.Slider(minimum=0, maximum=2, default=1, label="Model Complexity"),
134
+ gr.inputs.Slider(minimum=0.1, maximum=1.0, default=0.5, label="Detection Confidence"),
135
+ gr.inputs.Radio(choices=['White', 'Green', 'Black'], default='White', label="Background Color")
136
+ ],
137
+ outputs=["image", "text"],
138
+ title="Live Yoga Pose Detection and Classification",
139
+ description="This app detects and classifies yoga poses from the live camera feed using MediaPipe.",
140
+ )
141
+
142
+ iface.launch()