File size: 1,888 Bytes
d8e3daf a95228d d8e3daf a95228d d8e3daf 3a4cfd6 d8e3daf a95228d e2d9e99 a95228d e2d9e99 a95228d e2d9e99 a95228d e2d9e99 a95228d 1740e15 a95228d d8e3daf e2d9e99 a95228d e2d9e99 1740e15 f62ea1f a95228d e2d9e99 a95228d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
import streamlit as st
from streamlit_webrtc import webrtc_streamer, VideoTransformerBase
import mediapipe as mp
import cv2
import av
# Initialize MediaPipe Hands
mp_hands = mp.solutions.hands
mp_drawing = mp.solutions.drawing_utils
class VideoProcessor(VideoTransformerBase):
def __init__(self):
# Initialize MediaPipe Hands
self.hands = mp_hands.Hands(
max_num_hands=2,
min_detection_confidence=0.5,
min_tracking_confidence=0.5,
)
def recv(self, frame):
# Convert the input frame to a numpy array
img = frame.to_ndarray(format="bgr24")
img = cv2.flip(img, 1) # Flip horizontally for a mirror effect
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Process the frame for hand landmarks
result = self.hands.process(img_rgb)
# Draw hand landmarks on the frame
if result.multi_hand_landmarks:
for hand_landmarks in result.multi_hand_landmarks:
mp_drawing.draw_landmarks(
img, hand_landmarks, mp_hands.HAND_CONNECTIONS
)
# Return the processed frame back to the WebRTC streamer
return av.VideoFrame.from_ndarray(img, format="bgr24")
# Streamlit UI
st.title("Gesture & Hand Landmark Detection 🚀")
st.write("This app uses MediaPipe and Streamlit to detect hand landmarks in real-time from your webcam.")
# WebRTC streamer for live video feed
webrtc_streamer(
key="gesture-detection",
video_processor_factory=VideoProcessor,
rtc_configuration={
"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
},
)
# Footer
st.markdown(
"""
<style>
.footer {text-align: center; font-size: 12px; color: grey; margin-top: 20px;}
</style>
<p class="footer">Made with g Streamlit & MediaPipe</p>
""",
unsafe_allow_html=True,
)
|