|
|
import streamlit as st |
|
|
from cvzone.PoseModule import PoseDetector |
|
|
import cv2 |
|
|
import time |
|
|
from gtts import gTTS |
|
|
import os |
|
|
|
|
|
|
|
|
detector = PoseDetector(staticMode=False, |
|
|
modelComplexity=1, |
|
|
smoothLandmarks=True, |
|
|
enableSegmentation=True, |
|
|
smoothSegmentation=True, |
|
|
detectionCon=0.7, |
|
|
trackCon=0.4) |
|
|
|
|
|
|
|
|
hold_time = 0 |
|
|
start_time = None |
|
|
cooldown_time = 2 |
|
|
last_spoken_time = time.time() - cooldown_time |
|
|
|
|
|
|
|
|
correct_position_text = "Correct Knee Hug Position" |
|
|
incorrect_position_text = "Incorrect Position" |
|
|
hold_complete_text = "Hold Complete!" |
|
|
font_scale = 0.6 |
|
|
correct_color = (0, 255, 0) |
|
|
incorrect_color = (0, 0, 255) |
|
|
|
|
|
|
|
|
def speak(text, filename="output.mp3"): |
|
|
tts = gTTS(text) |
|
|
tts.save(filename) |
|
|
st.audio(filename, format="audio/mp3") |
|
|
|
|
|
|
|
|
def run_pose_detection(): |
|
|
global hold_time, start_time, last_spoken_time |
|
|
|
|
|
|
|
|
cap = cv2.VideoCapture(0) |
|
|
|
|
|
while cap.isOpened(): |
|
|
success, img = cap.read() |
|
|
if not success: |
|
|
break |
|
|
|
|
|
|
|
|
img = detector.findPose(img) |
|
|
lmList, bboxInfo = detector.findPosition(img, draw=False, bboxWithHands=False) |
|
|
|
|
|
if lmList: |
|
|
|
|
|
LHK_angle, img = detector.findAngle(lmList[23][0:2], lmList[25][0:2], lmList[27][0:2], img=img, color=(255, 0, 0), scale=1) |
|
|
RHK_angle, img = detector.findAngle(lmList[24][0:2], lmList[26][0:2], lmList[28][0:2], img=img, color=(255, 0, 0), scale=1) |
|
|
|
|
|
|
|
|
if 170 <= LHK_angle <= 340 and 300 <= RHK_angle <= 340: |
|
|
cv2.putText(img, correct_position_text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, font_scale, correct_color, 2, cv2.LINE_AA) |
|
|
|
|
|
if start_time is None: |
|
|
start_time = time.time() |
|
|
else: |
|
|
hold_time = time.time() - start_time |
|
|
|
|
|
countdown_text = f"Hold for: {int(5 - hold_time)} seconds" |
|
|
cv2.putText(img, countdown_text, (50, 200), cv2.FONT_HERSHEY_SIMPLEX, font_scale, correct_color, 2, cv2.LINE_AA) |
|
|
|
|
|
current_time = time.time() |
|
|
if current_time - last_spoken_time >= cooldown_time: |
|
|
speak("Correct position!") |
|
|
last_spoken_time = current_time |
|
|
|
|
|
if hold_time >= 5: |
|
|
cv2.putText(img, hold_complete_text, (50, 250), cv2.FONT_HERSHEY_SIMPLEX, font_scale, (0, 255, 255), 2, cv2.LINE_AA) |
|
|
if current_time - last_spoken_time >= cooldown_time: |
|
|
speak("Hold Complete!") |
|
|
last_spoken_time = current_time |
|
|
else: |
|
|
cv2.putText(img, incorrect_position_text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, font_scale, incorrect_color, 2, cv2.LINE_AA) |
|
|
hold_time = 0 |
|
|
start_time = None |
|
|
|
|
|
|
|
|
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
|
|
|
st.image(img, channels="RGB") |
|
|
|
|
|
|
|
|
time.sleep(0.03) |
|
|
|
|
|
|
|
|
st.title("Physaio - Knee Hug Pose Detection") |
|
|
st.write("This app detects the correctness of your knee hug pose using your webcam.") |
|
|
|
|
|
|
|
|
if st.button("Start Pose Detection"): |
|
|
run_pose_detection() |
|
|
|
|
|
|
|
|
st.write("Ensure your webcam is connected and working properly for this application.") |
|
|
|