import streamlit as st from streamlit_webrtc import webrtc_streamer, VideoTransformerBase import mediapipe as mp import cv2 import av # Initialize MediaPipe Hands mp_hands = mp.solutions.hands mp_drawing = mp.solutions.drawing_utils class VideoProcessor(VideoTransformerBase): def __init__(self): # Initialize MediaPipe Hands self.hands = mp_hands.Hands( max_num_hands=2, min_detection_confidence=0.5, min_tracking_confidence=0.5, ) def recv(self, frame): # Convert the input frame to a numpy array img = frame.to_ndarray(format="bgr24") img = cv2.flip(img, 1) # Flip horizontally for a mirror effect img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Process the frame for hand landmarks result = self.hands.process(img_rgb) # Draw hand landmarks on the frame if result.multi_hand_landmarks: for hand_landmarks in result.multi_hand_landmarks: mp_drawing.draw_landmarks( img, hand_landmarks, mp_hands.HAND_CONNECTIONS ) # Return the processed frame back to the WebRTC streamer return av.VideoFrame.from_ndarray(img, format="bgr24") # Streamlit UI st.title("Gesture & Hand Landmark Detection 🚀") st.write("This app uses MediaPipe and Streamlit to detect hand landmarks in real-time from your webcam.") # WebRTC streamer for live video feed webrtc_streamer( key="gesture-detection", video_processor_factory=VideoProcessor, rtc_configuration={ "iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}] }, ) # Footer st.markdown( """ """, unsafe_allow_html=True, )