Piarasingh85 commited on
Commit
26aa229
·
verified ·
1 Parent(s): cbd62d1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -38
app.py CHANGED
@@ -23,7 +23,7 @@ def calculate_angle(a, b, c):
23
  return angle
24
 
25
  # Define a function to classify yoga poses
26
- def classify_pose(landmarks, output_image, display=False):
27
  label = 'Unknown Pose'
28
 
29
  # Calculate the required angles
@@ -91,51 +91,53 @@ def classify_pose(landmarks, output_image, display=False):
91
  label = "Upward Salute Pose"
92
 
93
  # Check for Hands Under Feet Pose
94
- if landmarks[mp_pose.Pose.Landmark.LEFT_WRIST.value].y > landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].y and landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y > landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].y and abs(landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x - landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x) < 0.05 and abs(landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x - landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].x) < 0.05:
 
 
 
95
  label = "Hands Under Feet Pose"
96
 
97
- # Check for Plank Pose
98
- if left_shoulder_angle > 160 and left_shoulder_angle < 200 and \
99
- right_shoulder_angle > 160 and right_shoulder_angle < 200 and \
100
- left_knee_angle > 160 and left_knee_angle < 200 and \
101
- right_knee_angle > 160 and right_knee_angle < 200:
102
  label = "Plank Pose"
103
 
104
- # Write the label on the output image
105
- cv2.putText(output_image, label, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2)
106
-
107
- return output_image, label
108
 
109
  def detect_and_classify_pose(input_image, complexity, confidence, background_color):
110
- global pose
111
- pose = mp_pose.Pose(static_image_mode=False, min_detection_confidence=confidence, model_complexity=complexity)
112
-
113
- input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
114
- if background_color == 'White':
115
- input_image[:] = [255, 255, 255]
116
- elif background_color == 'Green':
117
- input_image[:] = [0, 255, 0]
118
- elif background_color == 'Black':
119
- input_image[:] = [0, 0, 0]
120
-
121
- results = pose.process(input_image)
122
- pose_classification = "No pose detected"
123
- if results.pose_landmarks:
124
- mp_drawing.draw_landmarks(input_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
125
- input_image, pose_classification = classify_pose(results.pose_landmarks.landmark, input_image)
126
- return input_image, pose_classification
 
127
 
128
  iface = gr.Interface(
129
- fn=detect_and_classify_pose,
130
- inputs=[
131
- gr.inputs.Image(type="numpy", label="Upload an Image"),
132
- gr.inputs.Slider(minimum=0, maximum=2, default=1, label="Model Complexity"),
133
- gr.inputs.Slider(minimum=0.1, maximum=1.0, default=0.5, label="Detection Confidence"),
134
- gr.inputs.Radio(choices=['White', 'Green', 'Black'], default='White', label="Background Color")
135
- ],
136
- outputs=["image", "text"],
137
- title="Live Yoga Pose Detection and Classification",
138
- description="This app detects and classifies yoga poses from the live camera feed using MediaPipe.",
139
  )
140
 
141
  iface.launch()
 
23
  return angle
24
 
25
  # Define a function to classify yoga poses
26
+ def classify_pose(landmarks, output_image):
27
  label = 'Unknown Pose'
28
 
29
  # Calculate the required angles
 
91
  label = "Upward Salute Pose"
92
 
93
  # Check for Hands Under Feet Pose
94
+ if landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y > landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].y and \
95
+ landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y > landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].y and \
96
+ abs(landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x - landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x) < 0.05 and \
97
+ abs(landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x - landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].x) < 0.05:
98
  label = "Hands Under Feet Pose"
99
 
100
+ # Check for Plank Pose
101
+ if 160 < left_shoulder_angle < 200 and 160 < right_shoulder_angle < 200 and \
102
+ 160 < left_knee_angle < 200 and 160 < right_knee_angle < 200:
 
 
103
  label = "Plank Pose"
104
 
105
+ # Write the label on the output image
106
+ cv2.putText(output_image, label, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2)
107
+
108
+ return output_image, label
109
 
110
  def detect_and_classify_pose(input_image, complexity, confidence, background_color):
111
+ global pose
112
+ pose = mp_pose.Pose(static_image_mode=False, min_detection_confidence=confidence, model_complexity=complexity)
113
+
114
+ input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
115
+ if background_color == 'White':
116
+ input_image[:] = [255, 255, 255]
117
+ elif background_color == 'Green':
118
+ input_image[:] = [0, 255, 0]
119
+ elif background_color == 'Black':
120
+ input_image[:] = [0, 0, 0]
121
+
122
+ results = pose.process(input_image)
123
+ pose_classification = "No pose detected"
124
+ if results.pose_landmarks:
125
+ mp_drawing.draw_landmarks(input_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
126
+ input_image, pose_classification = classify_pose(results.pose_landmarks.landmark, input_image)
127
+
128
+ return input_image, pose_classification
129
 
130
  iface = gr.Interface(
131
+ fn=detect_and_classify_pose,
132
+ inputs=[
133
+ gr.inputs.Image(type="numpy", label="Upload an Image"),
134
+ gr.inputs.Slider(minimum=0, maximum=2, default=1, label="Model Complexity"),
135
+ gr.inputs.Slider(minimum=0.1, maximum=1.0, default=0.5, label="Detection Confidence"),
136
+ gr.inputs.Radio(choices=['White', 'Green', 'Black'], default='White', label="Background Color")
137
+ ],
138
+ outputs=["image", "text"],
139
+ title="Live Yoga Pose Detection and Classification",
140
+ description="This app detects and classifies yoga poses from the live camera feed using MediaPipe.",
141
  )
142
 
143
  iface.launch()