trainer / app.py
wesam0099's picture
Update app.py
7acfb3a verified
import streamlit as st
from cvzone.PoseModule import PoseDetector
import cv2
import time
from gtts import gTTS
import os
# Initialize the PoseDetector class
detector = PoseDetector(staticMode=False,
modelComplexity=1,
smoothLandmarks=True,
enableSegmentation=True,
smoothSegmentation=True,
detectionCon=0.7,
trackCon=0.4)
# Global variables for position checking
hold_time = 0
start_time = None
cooldown_time = 2
last_spoken_time = time.time() - cooldown_time
# Define text labels
correct_position_text = "Correct Knee Hug Position"
incorrect_position_text = "Incorrect Position"
hold_complete_text = "Hold Complete!"
font_scale = 0.6
correct_color = (0, 255, 0) # Green for correct
incorrect_color = (0, 0, 255) # Red for incorrect
# Function for TTS using gTTS
def speak(text, filename="output.mp3"):
tts = gTTS(text)
tts.save(filename)
st.audio(filename, format="audio/mp3")
# Function to detect the pose
def run_pose_detection():
global hold_time, start_time, last_spoken_time
# Capture webcam
cap = cv2.VideoCapture(0)
while cap.isOpened():
success, img = cap.read()
if not success:
break
# Process the frame
img = detector.findPose(img)
lmList, bboxInfo = detector.findPosition(img, draw=False, bboxWithHands=False)
if lmList:
# Calculate angles for the knees
LHK_angle, img = detector.findAngle(lmList[23][0:2], lmList[25][0:2], lmList[27][0:2], img=img, color=(255, 0, 0), scale=1)
RHK_angle, img = detector.findAngle(lmList[24][0:2], lmList[26][0:2], lmList[28][0:2], img=img, color=(255, 0, 0), scale=1)
# Define the correct position range
if 170 <= LHK_angle <= 340 and 300 <= RHK_angle <= 340:
cv2.putText(img, correct_position_text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, font_scale, correct_color, 2, cv2.LINE_AA)
if start_time is None:
start_time = time.time()
else:
hold_time = time.time() - start_time
countdown_text = f"Hold for: {int(5 - hold_time)} seconds"
cv2.putText(img, countdown_text, (50, 200), cv2.FONT_HERSHEY_SIMPLEX, font_scale, correct_color, 2, cv2.LINE_AA)
current_time = time.time()
if current_time - last_spoken_time >= cooldown_time:
speak("Correct position!")
last_spoken_time = current_time
if hold_time >= 5:
cv2.putText(img, hold_complete_text, (50, 250), cv2.FONT_HERSHEY_SIMPLEX, font_scale, (0, 255, 255), 2, cv2.LINE_AA)
if current_time - last_spoken_time >= cooldown_time:
speak("Hold Complete!")
last_spoken_time = current_time
else:
cv2.putText(img, incorrect_position_text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, font_scale, incorrect_color, 2, cv2.LINE_AA)
hold_time = 0
start_time = None
# Convert to RGB for Streamlit
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Show frame in Streamlit
st.image(img, channels="RGB")
# Streamlit refresh every 30ms
time.sleep(0.03)
# Streamlit user interface
st.title("Physaio - Knee Hug Pose Detection")
st.write("This app detects the correctness of your knee hug pose using your webcam.")
# Start button for pose detection
if st.button("Start Pose Detection"):
run_pose_detection()
# Display footer
st.write("Ensure your webcam is connected and working properly for this application.")