import os import streamlit as st import mediapipe as mp import cv2 from streamlit_webrtc import webrtc_streamer, VideoTransformerBase import av # Initialize MediaPipe Hands mp_hands = mp.solutions.hands mp_drawing = mp.solutions.drawing_utils # Path to the gesture recognizer model model_path = "model/gesture_recognizer.task" if not os.path.exists(model_path): st.error(f"Model file not found at {model_path}. Please upload it to the correct directory.") st.stop() class VideoProcessor(VideoTransformerBase): def __init__(self): # Initialize MediaPipe Hands self.hands = mp_hands.Hands( max_num_hands=2, model_complexity=0, min_detection_confidence=0.5, min_tracking_confidence=0.5, ) def recv(self, frame): img = frame.to_ndarray(format="bgr24") img = cv2.flip(img, 1) # Flip for a mirror effect img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Process the frame for hand landmarks result = self.hands.process(img_rgb) # Draw hand landmarks if result.multi_hand_landmarks: for hand_landmarks in result.multi_hand_landmarks: mp_drawing.draw_landmarks( img, hand_landmarks, mp_hands.HAND_CONNECTIONS ) return av.VideoFrame.from_ndarray(img, format="bgr24") # Streamlit UI st.title("Gesture & Hand Landmark Detection 🚀") st.write("This app uses MediaPipe and Streamlit to detect hand landmarks in real-time.") # WebRTC streamer for live video webrtc_streamer( key="gesture-detection", video_processor_factory=VideoProcessor, rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}, ) # Footer st.markdown( """ """, unsafe_allow_html=True, )