osheina commited on
Commit
86a3859
·
verified ·
1 Parent(s): e6df0f6

Update pages/Camera.py

Browse files
Files changed (1) hide show
  1. pages/Camera.py +29 -20
pages/Camera.py CHANGED
@@ -6,11 +6,25 @@ import asyncio
6
  import streamlit as st
7
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
8
  import av
9
-
10
  from utils import SLInference
11
 
12
  logger = logging.getLogger(__name__)
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  def main(config_path):
15
  """
16
  Main function of the app.
@@ -21,47 +35,42 @@ def main(config_path):
21
  gestures_deque = deque(maxlen=5)
22
 
23
  # Set up Streamlit interface
24
- st.set_page_config(page_title="Gesture Recognition", layout="wide")
25
  st.title("Sign Language Recognition Demo")
 
26
  text_output = st.empty()
27
  last_5_gestures = st.empty()
28
  st.markdown(
29
  """
30
  This application is designed to recognize sign language using a webcam feed.
31
  The model has been trained to recognize various sign language gestures and display the corresponding text in real-time.
 
32
  The project is open for collaboration. If you have any suggestions or want to contribute, please feel free to reach out.
33
  """
34
  )
35
 
36
- def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
37
- img_rgb = frame.to_ndarray(format="rgb24")
38
- inference_thread.input_queue.append(cv2.resize(img_rgb, (224, 224)))
39
- return frame
40
-
41
  webrtc_ctx = webrtc_streamer(
42
  key="sign-language-recognition",
43
  mode=WebRtcMode.SENDRECV,
44
- video_frame_callback=video_frame_callback,
45
  media_stream_constraints={"video": True, "audio": False},
46
  async_processing=True,
47
  )
48
 
49
  while True:
50
- if not webrtc_ctx.state.playing:
51
- continue
52
 
53
- gesture = inference_thread.pred
54
- if gesture not in ['no', '']:
55
- if not gestures_deque:
56
- gestures_deque.append(gesture)
57
- elif gesture != gestures_deque[-1]:
58
- gestures_deque.append(gesture)
59
 
60
- text_output.markdown(f'<p style="font-size:20px"> Current gesture: {gesture}</p>',
61
- unsafe_allow_html=True)
62
- last_5_gestures.markdown(f'<p style="font-size:20px"> Last 5 gestures: {" ".join(gestures_deque)}</p>',
63
  unsafe_allow_html=True)
64
- print(gestures_deque)
 
 
65
 
66
  if __name__ == "__main__":
67
  asyncio.set_event_loop(asyncio.new_event_loop())
 
6
  import streamlit as st
7
  from streamlit_webrtc import WebRtcMode, webrtc_streamer
8
  import av
 
9
  from utils import SLInference
10
 
11
  logger = logging.getLogger(__name__)
12
 
13
+ # Callback function to process video frames
14
+ def video_frame_callback(frame: av.VideoFrame, inference_thread, gestures_deque):
15
+ img = frame.to_ndarray(format="rgb24")
16
+
17
+ inference_thread.input_queue.append(img)
18
+ gesture = inference_thread.pred
19
+
20
+ if gesture not in ['no', '']:
21
+ if not gestures_deque:
22
+ gestures_deque.append(gesture)
23
+ elif gesture != gestures_deque[-1]:
24
+ gestures_deque.append(gesture)
25
+
26
+ return av.VideoFrame.from_ndarray(img, format="rgb24")
27
+
28
  def main(config_path):
29
  """
30
  Main function of the app.
 
35
  gestures_deque = deque(maxlen=5)
36
 
37
  # Set up Streamlit interface
 
38
  st.title("Sign Language Recognition Demo")
39
+ image_place = st.empty()
40
  text_output = st.empty()
41
  last_5_gestures = st.empty()
42
  st.markdown(
43
  """
44
  This application is designed to recognize sign language using a webcam feed.
45
  The model has been trained to recognize various sign language gestures and display the corresponding text in real-time.
46
+
47
  The project is open for collaboration. If you have any suggestions or want to contribute, please feel free to reach out.
48
  """
49
  )
50
 
 
 
 
 
 
51
  webrtc_ctx = webrtc_streamer(
52
  key="sign-language-recognition",
53
  mode=WebRtcMode.SENDRECV,
54
+ video_frame_callback=lambda frame: video_frame_callback(frame, inference_thread, gestures_deque),
55
  media_stream_constraints={"video": True, "audio": False},
56
  async_processing=True,
57
  )
58
 
59
  while True:
60
+ if webrtc_ctx.video_receiver:
61
+ gesture = inference_thread.pred
62
 
63
+ if gesture not in ['no', '']:
64
+ if not gestures_deque:
65
+ gestures_deque.append(gesture)
66
+ elif gesture != gestures_deque[-1]:
67
+ gestures_deque.append(gesture)
 
68
 
69
+ text_output.markdown(f'<p style="font-size:20px"> Current gesture: {gesture}</p>',
 
 
70
  unsafe_allow_html=True)
71
+ last_5_gestures.markdown(f'<p style="font-size:20px"> Last 5 gestures: {" ".join(gestures_deque)}</p>',
72
+ unsafe_allow_html=True)
73
+ print(gestures_deque)
74
 
75
  if __name__ == "__main__":
76
  asyncio.set_event_loop(asyncio.new_event_loop())