|
|
import streamlit as st |
|
|
import cv2 |
|
|
import numpy as np |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
st.set_page_config( |
|
|
page_title="Hand2Voice", |
|
|
layout="wide" |
|
|
) |
|
|
|
|
|
st.title("π€ Hand2Voice") |
|
|
st.subheader("Hand Gestures to Speech Interface") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
col1, col2 = st.columns(2) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with col1: |
|
|
st.markdown("### π· Camera Feed") |
|
|
camera_placeholder = st.empty() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with col2: |
|
|
st.markdown("### π Recognized Text") |
|
|
text_placeholder = st.markdown( |
|
|
"<h2 style='color:#333;'>HELLO <span style='background-color:yellow;'>HOW</span> ARE YOU</h2>", |
|
|
unsafe_allow_html=True |
|
|
) |
|
|
|
|
|
st.markdown("### π Spoken Output") |
|
|
st.info("Currently speaking: HOW") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
st.markdown("---") |
|
|
st.success("Show your hand clearly in front of the camera") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cap = cv2.VideoCapture(0) |
|
|
|
|
|
while cap.isOpened(): |
|
|
ret, frame = cap.read() |
|
|
if not ret: |
|
|
st.error("Camera not accessible") |
|
|
break |
|
|
|
|
|
|
|
|
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
|
|
|
camera_placeholder.image(frame, channels="RGB") |
|
|
|
|
|
cap.release() |
|
|
|