Update app.py
Browse files
app.py
CHANGED
|
@@ -1,133 +1,3 @@
|
|
| 1 |
-
# without hand landmarking
|
| 2 |
-
# import streamlit as st
|
| 3 |
-
# import mediapipe as mp
|
| 4 |
-
# import cv2
|
| 5 |
-
# import os
|
| 6 |
-
# import time
|
| 7 |
-
# from queue import Queue
|
| 8 |
-
|
| 9 |
-
# # Import necessary components from MediaPipe
|
| 10 |
-
# BaseOptions = mp.tasks.BaseOptions
|
| 11 |
-
# GestureRecognizer = mp.tasks.vision.GestureRecognizer
|
| 12 |
-
# GestureRecognizerOptions = mp.tasks.vision.GestureRecognizerOptions
|
| 13 |
-
# GestureRecognizerResult = mp.tasks.vision.GestureRecognizerResult
|
| 14 |
-
# VisionRunningMode = mp.tasks.vision.RunningMode
|
| 15 |
-
|
| 16 |
-
# # Correct path to the Gesture Recognizer model file
|
| 17 |
-
# model_path = './model/model/gesture_recognizer.task'
|
| 18 |
-
|
| 19 |
-
# # Check if file exists
|
| 20 |
-
# if not os.path.exists(model_path):
|
| 21 |
-
# raise FileNotFoundError(f"Model file not found at {model_path}")
|
| 22 |
-
|
| 23 |
-
# # Queue to share results between the callback and main thread
|
| 24 |
-
# gesture_queue = Queue()
|
| 25 |
-
|
| 26 |
-
# # Callback function to process results and add them to the queue
|
| 27 |
-
# def print_result(result: GestureRecognizerResult, output_image: mp.Image, timestamp_ms: int):
|
| 28 |
-
# results = [] # Collect gesture results
|
| 29 |
-
# if result.gestures:
|
| 30 |
-
# for hand_gestures in result.gestures:
|
| 31 |
-
# for gesture in hand_gestures:
|
| 32 |
-
# results.append(f"Gesture: **{gesture.category_name}**, Confidence: **{gesture.score:.2f}**")
|
| 33 |
-
# else:
|
| 34 |
-
# results.append("No gestures detected.")
|
| 35 |
-
# gesture_queue.put(results)
|
| 36 |
-
|
| 37 |
-
# # Configure the Gesture Recognizer
|
| 38 |
-
# options = GestureRecognizerOptions(
|
| 39 |
-
# base_options=BaseOptions(model_asset_path=model_path),
|
| 40 |
-
# running_mode=VisionRunningMode.LIVE_STREAM,
|
| 41 |
-
# result_callback=print_result
|
| 42 |
-
# )
|
| 43 |
-
|
| 44 |
-
# # Custom App Header
|
| 45 |
-
# st.markdown("<h1 style='text-align: center; color: #4CAF50;'>Gesture Recognition App 🚀</h1>", unsafe_allow_html=True)
|
| 46 |
-
# st.markdown("<p style='text-align: center; color: grey;'>Recognize hand gestures in real time with MediaPipe and Streamlit</p>", unsafe_allow_html=True)
|
| 47 |
-
|
| 48 |
-
# # Sidebar for User Controls
|
| 49 |
-
# st.sidebar.title("Control Panel")
|
| 50 |
-
# run_app = st.sidebar.button("Start Gesture Recognition")
|
| 51 |
-
# st.sidebar.write("Toggle the button above to start the app.")
|
| 52 |
-
|
| 53 |
-
# # Placeholder for video feed and results
|
| 54 |
-
# video_placeholder = st.empty() # Placeholder for the video feed
|
| 55 |
-
# result_placeholder = st.empty() # Placeholder for gesture results
|
| 56 |
-
|
| 57 |
-
# # Footer with branding
|
| 58 |
-
# st.sidebar.markdown(
|
| 59 |
-
# "<hr><p style='text-align: center;'>Made with ❤️ using Streamlit</p>", unsafe_allow_html=True
|
| 60 |
-
# )
|
| 61 |
-
|
| 62 |
-
# if run_app:
|
| 63 |
-
# st.markdown("<h2 style='text-align: center;'>Processing Video Feed...</h2>", unsafe_allow_html=True)
|
| 64 |
-
# cap = cv2.VideoCapture(0)
|
| 65 |
-
|
| 66 |
-
# # Initialize a monotonically increasing timestamp
|
| 67 |
-
# start_time = time.time()
|
| 68 |
-
|
| 69 |
-
# with GestureRecognizer.create_from_options(options) as recognizer:
|
| 70 |
-
# while cap.isOpened():
|
| 71 |
-
# success, frame = cap.read()
|
| 72 |
-
# if not success:
|
| 73 |
-
# st.warning("No frames available from the video feed.")
|
| 74 |
-
# break
|
| 75 |
-
|
| 76 |
-
# # Compute the current timestamp in milliseconds
|
| 77 |
-
# current_time_ms = int((time.time() - start_time) * 1000)
|
| 78 |
-
|
| 79 |
-
# # Convert frame to a MediaPipe Image
|
| 80 |
-
# mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=frame)
|
| 81 |
-
|
| 82 |
-
# # Perform gesture recognition asynchronously
|
| 83 |
-
# recognizer.recognize_async(mp_image, current_time_ms)
|
| 84 |
-
|
| 85 |
-
# # Display the frame in Streamlit
|
| 86 |
-
# frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 87 |
-
# video_placeholder.image(frame_rgb, channels="RGB", caption="Gesture Recognition", use_column_width=True)
|
| 88 |
-
|
| 89 |
-
# # Retrieve and display gesture results from the queue
|
| 90 |
-
# while not gesture_queue.empty():
|
| 91 |
-
# results = gesture_queue.get()
|
| 92 |
-
# result_placeholder.markdown(
|
| 93 |
-
# "<h3 style='text-align: center; color: #FF5722;'>Detected Gestures</h3>",
|
| 94 |
-
# unsafe_allow_html=True,
|
| 95 |
-
# )
|
| 96 |
-
# result_placeholder.markdown(
|
| 97 |
-
# "<ul>" + "".join([f"<li>{result}</li>" for result in results]) + "</ul>",
|
| 98 |
-
# unsafe_allow_html=True,
|
| 99 |
-
# )
|
| 100 |
-
|
| 101 |
-
# cap.release()
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
# with hand landmark
|
| 131 |
import streamlit as st
|
| 132 |
import mediapipe as mp
|
| 133 |
import cv2
|
|
@@ -148,8 +18,8 @@ GestureRecognizerResult = mp.tasks.vision.GestureRecognizerResult
|
|
| 148 |
VisionRunningMode = mp.tasks.vision.RunningMode
|
| 149 |
|
| 150 |
# Correct path to the Gesture Recognizer model file
|
| 151 |
-
model_path = './model/model/gesture_recognizer.task'
|
| 152 |
-
|
| 153 |
# Check if file exists
|
| 154 |
if not os.path.exists(model_path):
|
| 155 |
raise FileNotFoundError(f"Model file not found at {model_path}")
|
|
@@ -327,22 +197,3 @@ if st.session_state.run_app:
|
|
| 327 |
video_placeholder.image(frame, channels="BGR", caption="Gesture & Hand Landmark Detection", use_column_width=True)
|
| 328 |
|
| 329 |
cap.release()
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import mediapipe as mp
|
| 3 |
import cv2
|
|
|
|
| 18 |
VisionRunningMode = mp.tasks.vision.RunningMode
|
| 19 |
|
| 20 |
# Correct path to the Gesture Recognizer model file
|
| 21 |
+
#model_path = './model/model/gesture_recognizer.task'
|
| 22 |
+
model_path = 'model/gesture_recognizer.task'
|
| 23 |
# Check if file exists
|
| 24 |
if not os.path.exists(model_path):
|
| 25 |
raise FileNotFoundError(f"Model file not found at {model_path}")
|
|
|
|
| 197 |
video_placeholder.image(frame, channels="BGR", caption="Gesture & Hand Landmark Detection", use_column_width=True)
|
| 198 |
|
| 199 |
cap.release()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|