import cv2 import mediapipe as mp import gradio as gr import numpy as np # Initialize MediaPipe Hands mp_hands = mp.solutions.hands mp_drawing = mp.solutions.drawing_utils hands = mp_hands.Hands(max_num_hands=1, min_detection_confidence=0.7) def classify_gesture(landmarks): if not landmarks: return "Unknown" thumb_tip = landmarks[4] index_tip = landmarks[8] if thumb_tip.y < index_tip.y: return "A (Thumb Up)" return "Unknown" def process_frame(frame): frame = cv2.flip(frame, 1) rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) result = hands.process(rgb) gesture = "No hand detected" if result.multi_hand_landmarks: for hand_landmarks in result.multi_hand_landmarks: mp_drawing.draw_landmarks(frame, hand_landmarks, mp_hands.HAND_CONNECTIONS) gesture = classify_gesture(hand_landmarks.landmark) cv2.putText(frame, f"Sign: {gesture}", (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) return frame # Gradio interface (compatible with old Gradio versions on Spaces) demo = gr.Interface( fn=process_frame, inputs=gr.Image(type="numpy", tool="editor"), # webcam snapshot button works here outputs=gr.Image(type="numpy"), title="Sign Language Recognition", description="Take a webcam snapshot or upload an image to detect hand signs" ) demo.launch()