Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -8,7 +8,6 @@ mp_hands = mp.solutions.hands
|
|
| 8 |
mp_drawing = mp.solutions.drawing_utils
|
| 9 |
hands = mp_hands.Hands(max_num_hands=1, min_detection_confidence=0.7)
|
| 10 |
|
| 11 |
-
# Gesture classifier
|
| 12 |
def classify_gesture(landmarks):
|
| 13 |
if not landmarks:
|
| 14 |
return "Unknown"
|
|
@@ -21,75 +20,36 @@ def classify_gesture(landmarks):
|
|
| 21 |
wrist = landmarks[0]
|
| 22 |
|
| 23 |
def dist(a, b):
|
| 24 |
-
return ((a.x - b.x)**2 + (a.y - b.y)**2)
|
| 25 |
|
| 26 |
-
if thumb_tip.y < index_tip.y
|
| 27 |
return "A (Thumb Up)"
|
| 28 |
-
|
| 29 |
-
fingers_extended = (
|
| 30 |
-
index_tip.y < wrist.y and
|
| 31 |
-
middle_tip.y < wrist.y and
|
| 32 |
-
ring_tip.y < wrist.y and
|
| 33 |
-
pinky_tip.y < wrist.y
|
| 34 |
-
)
|
| 35 |
-
thumb_side = thumb_tip.x < wrist.x or thumb_tip.x > wrist.x
|
| 36 |
-
if fingers_extended and thumb_side:
|
| 37 |
return "B (Flat Hand)"
|
| 38 |
-
|
| 39 |
-
curve = dist(index_tip, thumb_tip)
|
| 40 |
-
palm = dist(wrist, index_tip)
|
| 41 |
-
if 0.15 < curve < 0.3 and palm > 0.25:
|
| 42 |
-
return "C (Curved Hand)"
|
| 43 |
-
|
| 44 |
-
if dist(thumb_tip, index_tip) < 0.05:
|
| 45 |
-
return "OK"
|
| 46 |
-
|
| 47 |
-
if (index_tip.y < wrist.y and middle_tip.y < wrist.y and
|
| 48 |
-
ring_tip.y > wrist.y and pinky_tip.y > wrist.y):
|
| 49 |
-
return "V / Peace"
|
| 50 |
-
|
| 51 |
-
if (index_tip.y > wrist.y and
|
| 52 |
-
middle_tip.y > wrist.y and
|
| 53 |
-
ring_tip.y > wrist.y and
|
| 54 |
-
pinky_tip.y > wrist.y):
|
| 55 |
-
return "Fist"
|
| 56 |
-
|
| 57 |
return "Unknown"
|
| 58 |
|
| 59 |
-
# Process webcam frame
|
| 60 |
def process_frame(frame):
|
| 61 |
frame = cv2.flip(frame, 1)
|
| 62 |
-
h, w, _ = frame.shape
|
| 63 |
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 64 |
result = hands.process(rgb)
|
| 65 |
-
|
| 66 |
gesture = "No hand detected"
|
| 67 |
|
| 68 |
if result.multi_hand_landmarks:
|
| 69 |
for hand_landmarks in result.multi_hand_landmarks:
|
| 70 |
-
mp_drawing.draw_landmarks(
|
| 71 |
-
frame, hand_landmarks, mp_hands.HAND_CONNECTIONS
|
| 72 |
-
)
|
| 73 |
gesture = classify_gesture(hand_landmarks.landmark)
|
| 74 |
|
| 75 |
-
cv2.putText(frame, f"Sign: {gesture}",
|
| 76 |
-
(10, 40), cv2.FONT_HERSHEY_SIMPLEX,
|
| 77 |
-
1, (0, 255, 0), 2)
|
| 78 |
-
|
| 79 |
-
cv2.putText(frame, "Made by Simar",
|
| 80 |
-
(10, h - 10), cv2.FONT_HERSHEY_SIMPLEX,
|
| 81 |
-
0.8, (255, 0, 255), 2)
|
| 82 |
-
|
| 83 |
return frame
|
| 84 |
|
| 85 |
-
# Gradio
|
| 86 |
demo = gr.Interface(
|
| 87 |
fn=process_frame,
|
| 88 |
-
inputs=gr.
|
| 89 |
-
outputs=gr.Image(),
|
| 90 |
live=True,
|
| 91 |
title="Sign Language Recognition",
|
| 92 |
-
description="
|
| 93 |
)
|
| 94 |
|
| 95 |
demo.launch()
|
|
|
|
| 8 |
mp_drawing = mp.solutions.drawing_utils
|
| 9 |
hands = mp_hands.Hands(max_num_hands=1, min_detection_confidence=0.7)
|
| 10 |
|
|
|
|
| 11 |
def classify_gesture(landmarks):
|
| 12 |
if not landmarks:
|
| 13 |
return "Unknown"
|
|
|
|
| 20 |
wrist = landmarks[0]
|
| 21 |
|
| 22 |
def dist(a, b):
|
| 23 |
+
return ((a.x - b.x)**2 + (a.y - b.y)**2)**0.5
|
| 24 |
|
| 25 |
+
if thumb_tip.y < index_tip.y:
|
| 26 |
return "A (Thumb Up)"
|
| 27 |
+
if index_tip.y < wrist.y and middle_tip.y < wrist.y and ring_tip.y < wrist.y and pinky_tip.y < wrist.y:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
return "B (Flat Hand)"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
return "Unknown"
|
| 30 |
|
|
|
|
| 31 |
def process_frame(frame):
|
| 32 |
frame = cv2.flip(frame, 1)
|
|
|
|
| 33 |
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 34 |
result = hands.process(rgb)
|
|
|
|
| 35 |
gesture = "No hand detected"
|
| 36 |
|
| 37 |
if result.multi_hand_landmarks:
|
| 38 |
for hand_landmarks in result.multi_hand_landmarks:
|
| 39 |
+
mp_drawing.draw_landmarks(frame, hand_landmarks, mp_hands.HAND_CONNECTIONS)
|
|
|
|
|
|
|
| 40 |
gesture = classify_gesture(hand_landmarks.landmark)
|
| 41 |
|
| 42 |
+
cv2.putText(frame, f"Sign: {gesture}", (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
return frame
|
| 44 |
|
| 45 |
+
# Gradio 5.x interface: Use Video input for live webcam
|
| 46 |
demo = gr.Interface(
|
| 47 |
fn=process_frame,
|
| 48 |
+
inputs=gr.Video(source="webcam"), # webcam live input
|
| 49 |
+
outputs=gr.Image(type="numpy"),
|
| 50 |
live=True,
|
| 51 |
title="Sign Language Recognition",
|
| 52 |
+
description="Live sign recognition using MediaPipe + OpenCV"
|
| 53 |
)
|
| 54 |
|
| 55 |
demo.launch()
|