Spaces:
Sleeping
Sleeping
Upload 4 files
Browse files- Dockerfile +17 -14
- README.md +27 -12
- app.py +763 -0
- requirements.txt +9 -3
Dockerfile
CHANGED
|
@@ -1,20 +1,23 @@
|
|
| 1 |
-
FROM python:3.
|
| 2 |
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
build-essential \
|
| 7 |
-
curl \
|
| 8 |
-
git \
|
| 9 |
-
&& rm -rf /var/lib/apt/lists/*
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
| 13 |
|
| 14 |
-
|
|
|
|
| 15 |
|
| 16 |
-
|
|
|
|
| 17 |
|
| 18 |
-
|
|
|
|
| 19 |
|
| 20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10-slim
|
| 2 |
|
| 3 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 4 |
+
libgl1-mesa-glx libglib2.0-0 ffmpeg && \
|
| 5 |
+
rm -rf /var/lib/apt/lists/*
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
+
RUN useradd -m -u 1000 user
|
| 8 |
+
WORKDIR /app
|
| 9 |
|
| 10 |
+
COPY requirements.txt .
|
| 11 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 12 |
|
| 13 |
+
COPY . .
|
| 14 |
+
RUN chown -R user:user /app
|
| 15 |
|
| 16 |
+
USER user
|
| 17 |
+
EXPOSE 7860
|
| 18 |
|
| 19 |
+
CMD ["streamlit", "run", "app.py", \
|
| 20 |
+
"--server.port=7860", \
|
| 21 |
+
"--server.address=0.0.0.0", \
|
| 22 |
+
"--server.headless=true", \
|
| 23 |
+
"--browser.gatherUsageStats=false"]
|
README.md
CHANGED
|
@@ -1,19 +1,34 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: docker
|
| 7 |
-
app_port:
|
| 8 |
-
tags:
|
| 9 |
-
- streamlit
|
| 10 |
pinned: false
|
| 11 |
-
short_description: An AI-based smart coach
|
| 12 |
---
|
| 13 |
|
| 14 |
-
#
|
| 15 |
|
| 16 |
-
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: BecomeAPro Exercise Tracker
|
| 3 |
+
emoji: "\U0001F3CB"
|
| 4 |
+
colorFrom: green
|
| 5 |
+
colorTo: purple
|
| 6 |
sdk: docker
|
| 7 |
+
app_port: 7860
|
|
|
|
|
|
|
| 8 |
pinned: false
|
|
|
|
| 9 |
---
|
| 10 |
|
| 11 |
+
# BecomeAPro - AI-Powered Exercise Tracker
|
| 12 |
|
| 13 |
+
Yapay zeka destekli gercek zamanli egzersiz takip uygulamasi.
|
| 14 |
|
| 15 |
+
## Ozellikler
|
| 16 |
+
|
| 17 |
+
- Tarayici icinden kamera erisimi (WebRTC)
|
| 18 |
+
- MediaPipe ile 33 vucut noktasi takibi
|
| 19 |
+
- XGBoost / PyTorch ile hareket siniflandirma
|
| 20 |
+
- 5 egzersiz destegi: Sinav, Mekik, Squat, Barfiks, Ziplama
|
| 21 |
+
- Anlik hareket tespiti ve guven orani gosterimi
|
| 22 |
+
|
| 23 |
+
## Kullanim
|
| 24 |
+
|
| 25 |
+
1. "START" butonuna tiklayin ve kamera izni verin
|
| 26 |
+
2. Tam vucut gorunumunde, iyi aydinlatilmis ortamda durun
|
| 27 |
+
3. Egzersizinizi yapmaya baslayin - AI hareketlerinizi anlik tanir
|
| 28 |
+
|
| 29 |
+
## Teknik Detaylar
|
| 30 |
+
|
| 31 |
+
- **Pose Detection**: MediaPipe Pose Landmarker (33 landmark x 3 eksen)
|
| 32 |
+
- **Classification**: XGBoost / PyTorch MLP (10 sinif: 5 egzersiz x 2 pozisyon)
|
| 33 |
+
- **Smoothing**: Son 12 frame uzerinden mode filtresi + guven esigi (%65)
|
| 34 |
+
- **Frontend**: Streamlit + streamlit-webrtc
|
app.py
ADDED
|
@@ -0,0 +1,763 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
BecomeAPro - AI-Powered Exercise Tracker (Hugging Face Space)
|
| 3 |
+
Streamlit + WebRTC for in-browser real-time pose detection.
|
| 4 |
+
"""
|
| 5 |
+
import json
|
| 6 |
+
import logging
|
| 7 |
+
import urllib.request
|
| 8 |
+
from collections import Counter, deque
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from threading import Lock
|
| 11 |
+
|
| 12 |
+
import av
|
| 13 |
+
import cv2
|
| 14 |
+
import mediapipe as mp
|
| 15 |
+
import numpy as np
|
| 16 |
+
import streamlit as st
|
| 17 |
+
from joblib import load
|
| 18 |
+
from mediapipe.tasks import python as mp_python
|
| 19 |
+
from mediapipe.tasks.python import vision
|
| 20 |
+
from streamlit_webrtc import WebRtcMode, webrtc_streamer
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
ROOT = Path(__file__).resolve().parent
|
| 25 |
+
MODELS_DIR = ROOT / "models"
|
| 26 |
+
POSE_MODEL_PATH = MODELS_DIR / "pose_landmarker_lite.task"
|
| 27 |
+
POSE_MODEL_URL = (
|
| 28 |
+
"https://storage.googleapis.com/mediapipe-models/"
|
| 29 |
+
"pose_landmarker/pose_landmarker_lite/float16/1/pose_landmarker_lite.task"
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
BUFFER_SIZE = 12
|
| 33 |
+
CONFIDENCE_THRESHOLD = 0.65
|
| 34 |
+
SCALE_XY = 100.0
|
| 35 |
+
SCALE_Z = 200.0
|
| 36 |
+
|
| 37 |
+
MP_INDEX_TO_NAME = [
|
| 38 |
+
"nose", "left_eye_inner", "left_eye", "left_eye_outer",
|
| 39 |
+
"right_eye_inner", "right_eye", "right_eye_outer",
|
| 40 |
+
"left_ear", "right_ear", "mouth_left", "mouth_right",
|
| 41 |
+
"left_shoulder", "right_shoulder", "left_elbow", "right_elbow",
|
| 42 |
+
"left_wrist", "right_wrist", "left_pinky", "right_pinky",
|
| 43 |
+
"left_index", "right_index", "left_thumb", "right_thumb",
|
| 44 |
+
"left_hip", "right_hip", "left_knee", "right_knee",
|
| 45 |
+
"left_ankle", "right_ankle", "left_heel", "right_heel",
|
| 46 |
+
"left_foot_index", "right_foot_index",
|
| 47 |
+
]
|
| 48 |
+
|
| 49 |
+
NAME_ALIASES = {
|
| 50 |
+
"right_index_1": "right_index", "left_index_1": "left_index",
|
| 51 |
+
"left_pinky_1": "left_pinky", "right_pinky_1": "right_pinky",
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
POSE_TO_TURKISH = {
|
| 55 |
+
"situp_up": "Mekik (Yukari)",
|
| 56 |
+
"situp_down": "Mekik (Asagi)",
|
| 57 |
+
"pushups_up": "Sinav (Yukari)",
|
| 58 |
+
"pushups_down": "Sinav (Asagi)",
|
| 59 |
+
"pullups_up": "Barfiks (Yukari)",
|
| 60 |
+
"pullups_down": "Barfiks (Asagi)",
|
| 61 |
+
"squats_up": "Squat (Yukari)",
|
| 62 |
+
"squats_down": "Squat (Asagi)",
|
| 63 |
+
"jumping_jacks_up": "Ziplama (Yukari)",
|
| 64 |
+
"jumping_jacks_down": "Ziplama (Asagi)",
|
| 65 |
+
"Belirsiz": "Belirsiz",
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
EXERCISES = [
|
| 69 |
+
{"name": "Sinav", "en": "Push-ups", "icon": "\U0001f4aa",
|
| 70 |
+
"desc": "Gogus, omuz ve triceps kaslari icin temel egzersiz.", "color": "#00d4aa"},
|
| 71 |
+
{"name": "Mekik", "en": "Sit-ups", "icon": "\U0001f504",
|
| 72 |
+
"desc": "Karin kaslari icin etkili bir core egzersizi.", "color": "#7c3aed"},
|
| 73 |
+
{"name": "Squat", "en": "Squats", "icon": "\U0001f9b5",
|
| 74 |
+
"desc": "Bacak ve kalca kaslari icin en etkili hareket.", "color": "#f59e0b"},
|
| 75 |
+
{"name": "Barfiks", "en": "Pull-ups", "icon": "\U0001f9d7",
|
| 76 |
+
"desc": "Sirt ve biceps kaslarini guclendiren egzersiz.", "color": "#ef4444"},
|
| 77 |
+
{"name": "Ziplama", "en": "Jumping Jacks", "icon": "\U0001f938",
|
| 78 |
+
"desc": "Tam vucut kardiyo ve koordinasyon egzersizi.", "color": "#3b82f6"},
|
| 79 |
+
]
|
| 80 |
+
|
| 81 |
+
# ---------------------------------------------------------------------------
|
| 82 |
+
# Page config (must be first st call)
|
| 83 |
+
# ---------------------------------------------------------------------------
|
| 84 |
+
|
| 85 |
+
st.set_page_config(
|
| 86 |
+
page_title="BecomeAPro | AI Exercise Tracker",
|
| 87 |
+
page_icon="\U0001f3cb\ufe0f",
|
| 88 |
+
layout="wide",
|
| 89 |
+
initial_sidebar_state="collapsed",
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
# ---------------------------------------------------------------------------
|
| 93 |
+
# CSS
|
| 94 |
+
# ---------------------------------------------------------------------------
|
| 95 |
+
|
| 96 |
+
CUSTOM_CSS = """\
|
| 97 |
+
<style>
|
| 98 |
+
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800;900&display=swap');
|
| 99 |
+
#MainMenu, footer, header {visibility: hidden;}
|
| 100 |
+
.block-container {padding-top: 0 !important; max-width: 1200px; margin: 0 auto;}
|
| 101 |
+
|
| 102 |
+
.stApp {
|
| 103 |
+
background: linear-gradient(180deg, #080810 0%, #0d0d1a 40%, #080810 100%);
|
| 104 |
+
color: #e0e0e8;
|
| 105 |
+
font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif;
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
/* Hero */
|
| 109 |
+
.hero {
|
| 110 |
+
text-align: center;
|
| 111 |
+
padding: 4.5rem 1rem 2.5rem;
|
| 112 |
+
position: relative;
|
| 113 |
+
overflow: hidden;
|
| 114 |
+
}
|
| 115 |
+
.hero::before {
|
| 116 |
+
content: '';
|
| 117 |
+
position: absolute;
|
| 118 |
+
top: -60%; left: -30%; width: 160%; height: 220%;
|
| 119 |
+
background:
|
| 120 |
+
radial-gradient(ellipse at 30% 50%, rgba(0,212,170,0.07) 0%, transparent 50%),
|
| 121 |
+
radial-gradient(ellipse at 70% 50%, rgba(124,58,237,0.07) 0%, transparent 50%);
|
| 122 |
+
animation: drift 10s ease-in-out infinite alternate;
|
| 123 |
+
pointer-events: none;
|
| 124 |
+
}
|
| 125 |
+
@keyframes drift {
|
| 126 |
+
from { transform: translate(0,0) rotate(0deg); }
|
| 127 |
+
to { transform: translate(-3%,2%) rotate(1deg); }
|
| 128 |
+
}
|
| 129 |
+
.hero-badge {
|
| 130 |
+
display: inline-block;
|
| 131 |
+
background: rgba(0,212,170,0.08);
|
| 132 |
+
border: 1px solid rgba(0,212,170,0.25);
|
| 133 |
+
border-radius: 50px;
|
| 134 |
+
padding: 6px 20px;
|
| 135 |
+
font-size: 0.82rem;
|
| 136 |
+
color: #00d4aa;
|
| 137 |
+
font-weight: 600;
|
| 138 |
+
margin-bottom: 1.6rem;
|
| 139 |
+
letter-spacing: 1.2px;
|
| 140 |
+
text-transform: uppercase;
|
| 141 |
+
}
|
| 142 |
+
.hero h1 {
|
| 143 |
+
font-size: clamp(2.2rem, 5vw, 3.8rem);
|
| 144 |
+
font-weight: 800;
|
| 145 |
+
line-height: 1.08;
|
| 146 |
+
margin: 0 0 1.1rem;
|
| 147 |
+
color: #ffffff;
|
| 148 |
+
position: relative;
|
| 149 |
+
}
|
| 150 |
+
.hero h1 .grad {
|
| 151 |
+
background: linear-gradient(135deg, #00d4aa 0%, #7c3aed 55%, #3b82f6 100%);
|
| 152 |
+
-webkit-background-clip: text;
|
| 153 |
+
-webkit-text-fill-color: transparent;
|
| 154 |
+
background-clip: text;
|
| 155 |
+
}
|
| 156 |
+
.hero-sub {
|
| 157 |
+
font-size: 1.12rem;
|
| 158 |
+
color: #7a7a95;
|
| 159 |
+
max-width: 580px;
|
| 160 |
+
margin: 0 auto;
|
| 161 |
+
line-height: 1.7;
|
| 162 |
+
position: relative;
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
/* Section Titles */
|
| 166 |
+
.sec-title {
|
| 167 |
+
font-size: 1.85rem;
|
| 168 |
+
font-weight: 700;
|
| 169 |
+
text-align: center;
|
| 170 |
+
margin: 3.5rem 0 0.4rem;
|
| 171 |
+
color: #fff;
|
| 172 |
+
}
|
| 173 |
+
.sec-sub {
|
| 174 |
+
text-align: center;
|
| 175 |
+
color: #7a7a95;
|
| 176 |
+
font-size: 0.95rem;
|
| 177 |
+
margin-bottom: 2rem;
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
/* Glass Card */
|
| 181 |
+
.g-card {
|
| 182 |
+
background: rgba(18,18,30,0.65);
|
| 183 |
+
backdrop-filter: blur(14px);
|
| 184 |
+
-webkit-backdrop-filter: blur(14px);
|
| 185 |
+
border: 1px solid rgba(255,255,255,0.055);
|
| 186 |
+
border-radius: 16px;
|
| 187 |
+
padding: 1.8rem 1.5rem;
|
| 188 |
+
transition: all 0.35s cubic-bezier(.4,0,.2,1);
|
| 189 |
+
position: relative;
|
| 190 |
+
overflow: hidden;
|
| 191 |
+
height: 100%;
|
| 192 |
+
}
|
| 193 |
+
.g-card:hover {
|
| 194 |
+
border-color: rgba(0,212,170,0.18);
|
| 195 |
+
transform: translateY(-4px);
|
| 196 |
+
box-shadow: 0 16px 48px rgba(0,0,0,0.25);
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
/* Step Cards */
|
| 200 |
+
.step-num {
|
| 201 |
+
display: inline-flex;
|
| 202 |
+
align-items: center;
|
| 203 |
+
justify-content: center;
|
| 204 |
+
width: 46px; height: 46px;
|
| 205 |
+
border-radius: 12px;
|
| 206 |
+
background: linear-gradient(135deg, #00d4aa, #7c3aed);
|
| 207 |
+
color: #fff;
|
| 208 |
+
font-weight: 700;
|
| 209 |
+
font-size: 1.15rem;
|
| 210 |
+
margin-bottom: 1rem;
|
| 211 |
+
}
|
| 212 |
+
.step-t { font-size: 1.1rem; font-weight: 600; color: #fff; margin-bottom: 0.45rem; }
|
| 213 |
+
.step-d { font-size: 0.88rem; color: #7a7a95; line-height: 1.6; }
|
| 214 |
+
|
| 215 |
+
/* Exercise Cards */
|
| 216 |
+
.accent-top {
|
| 217 |
+
position: absolute;
|
| 218 |
+
top: 0; left: 0; right: 0;
|
| 219 |
+
height: 3px;
|
| 220 |
+
border-radius: 16px 16px 0 0;
|
| 221 |
+
opacity: 0.7;
|
| 222 |
+
transition: opacity 0.3s;
|
| 223 |
+
}
|
| 224 |
+
.g-card:hover .accent-top { opacity: 1; }
|
| 225 |
+
.ex-icon { font-size: 2.5rem; margin-bottom: 0.7rem; display: block; }
|
| 226 |
+
.ex-name { font-size: 1.05rem; font-weight: 600; color: #fff; margin-bottom: 0.15rem; }
|
| 227 |
+
.ex-en { font-size: 0.78rem; color: #5a5a7a; margin-bottom: 0.5rem; }
|
| 228 |
+
.ex-desc { font-size: 0.82rem; color: #7a7a95; line-height: 1.5; }
|
| 229 |
+
|
| 230 |
+
/* CTA Section */
|
| 231 |
+
.cta-box {
|
| 232 |
+
text-align: center;
|
| 233 |
+
padding: 3rem 2rem 1.5rem;
|
| 234 |
+
background: linear-gradient(135deg, rgba(0,212,170,0.04), rgba(124,58,237,0.04));
|
| 235 |
+
border: 1px solid rgba(255,255,255,0.04);
|
| 236 |
+
border-radius: 24px;
|
| 237 |
+
margin: 2.5rem 0 0;
|
| 238 |
+
position: relative;
|
| 239 |
+
overflow: hidden;
|
| 240 |
+
}
|
| 241 |
+
.cta-box::before {
|
| 242 |
+
content: '';
|
| 243 |
+
position: absolute;
|
| 244 |
+
inset: -1px;
|
| 245 |
+
border-radius: 24px;
|
| 246 |
+
background: linear-gradient(135deg, rgba(0,212,170,0.12), transparent 40%, rgba(124,58,237,0.12));
|
| 247 |
+
z-index: 0;
|
| 248 |
+
pointer-events: none;
|
| 249 |
+
}
|
| 250 |
+
.cta-t { font-size: 1.75rem; font-weight: 700; color: #fff; margin-bottom: 0.5rem; position: relative; }
|
| 251 |
+
.cta-d { color: #7a7a95; margin-bottom: 0.2rem; font-size: 0.95rem; position: relative; }
|
| 252 |
+
|
| 253 |
+
/* Metric Card */
|
| 254 |
+
.m-val {
|
| 255 |
+
font-size: 2.1rem;
|
| 256 |
+
font-weight: 700;
|
| 257 |
+
background: linear-gradient(135deg, #00d4aa, #7c3aed);
|
| 258 |
+
-webkit-background-clip: text;
|
| 259 |
+
-webkit-text-fill-color: transparent;
|
| 260 |
+
background-clip: text;
|
| 261 |
+
}
|
| 262 |
+
.m-lbl { font-size: 0.82rem; color: #7a7a95; font-weight: 500; margin-top: 4px; }
|
| 263 |
+
|
| 264 |
+
/* Primary Button */
|
| 265 |
+
div.stButton > button[kind="primary"],
|
| 266 |
+
div.stButton > button[data-testid="stBaseButton-primary"] {
|
| 267 |
+
background: linear-gradient(135deg, #00d4aa 0%, #00b894 100%) !important;
|
| 268 |
+
border: none !important;
|
| 269 |
+
border-radius: 14px !important;
|
| 270 |
+
padding: 0.85rem 2.8rem !important;
|
| 271 |
+
font-size: 1.08rem !important;
|
| 272 |
+
font-weight: 600 !important;
|
| 273 |
+
font-family: 'Inter', sans-serif !important;
|
| 274 |
+
color: #080810 !important;
|
| 275 |
+
box-shadow: 0 4px 24px rgba(0,212,170,0.22) !important;
|
| 276 |
+
transition: all 0.3s cubic-bezier(.4,0,.2,1) !important;
|
| 277 |
+
letter-spacing: 0.3px !important;
|
| 278 |
+
min-height: 56px !important;
|
| 279 |
+
}
|
| 280 |
+
div.stButton > button[kind="primary"]:hover,
|
| 281 |
+
div.stButton > button[data-testid="stBaseButton-primary"]:hover {
|
| 282 |
+
box-shadow: 0 8px 36px rgba(0,212,170,0.38) !important;
|
| 283 |
+
transform: translateY(-2px) !important;
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
/* Tip Box */
|
| 287 |
+
.tip-box {
|
| 288 |
+
background: rgba(59,130,246,0.06);
|
| 289 |
+
border: 1px solid rgba(59,130,246,0.15);
|
| 290 |
+
border-radius: 14px;
|
| 291 |
+
padding: 1.1rem 1.4rem;
|
| 292 |
+
color: #8ab4f8;
|
| 293 |
+
font-size: 0.88rem;
|
| 294 |
+
line-height: 1.6;
|
| 295 |
+
margin: 1rem 0;
|
| 296 |
+
}
|
| 297 |
+
.tip-box strong { color: #a8ccff; }
|
| 298 |
+
|
| 299 |
+
/* Helpers */
|
| 300 |
+
.sep {
|
| 301 |
+
height: 1px;
|
| 302 |
+
background: linear-gradient(90deg, transparent, rgba(255,255,255,0.06), transparent);
|
| 303 |
+
margin: 2.5rem 0;
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
/* Footer */
|
| 307 |
+
.foot {
|
| 308 |
+
text-align: center;
|
| 309 |
+
padding: 2rem 0 1.5rem;
|
| 310 |
+
color: #444460;
|
| 311 |
+
font-size: 0.82rem;
|
| 312 |
+
margin-top: 3rem;
|
| 313 |
+
border-top: 1px solid rgba(255,255,255,0.04);
|
| 314 |
+
}
|
| 315 |
+
.foot a { color: #00d4aa; text-decoration: none; }
|
| 316 |
+
|
| 317 |
+
/* Scrollbar */
|
| 318 |
+
::-webkit-scrollbar { width: 6px; }
|
| 319 |
+
::-webkit-scrollbar-track { background: transparent; }
|
| 320 |
+
::-webkit-scrollbar-thumb { background: #2a2a3e; border-radius: 3px; }
|
| 321 |
+
::-webkit-scrollbar-thumb:hover { background: #3a3a52; }
|
| 322 |
+
|
| 323 |
+
/* WebRTC container styling */
|
| 324 |
+
.stVideo > div { border-radius: 16px; overflow: hidden; }
|
| 325 |
+
|
| 326 |
+
/* Onboarding Card */
|
| 327 |
+
.onboard-card {
|
| 328 |
+
background: linear-gradient(135deg, rgba(124,58,237,0.08), rgba(0,212,170,0.08));
|
| 329 |
+
border: 1px solid rgba(124,58,237,0.15);
|
| 330 |
+
border-radius: 16px;
|
| 331 |
+
padding: 2rem;
|
| 332 |
+
text-align: center;
|
| 333 |
+
}
|
| 334 |
+
.onboard-card h3 { color: #fff; font-size: 1.3rem; margin-bottom: 0.6rem; }
|
| 335 |
+
.onboard-card p { color: #7a7a95; font-size: 0.92rem; line-height: 1.6; }
|
| 336 |
+
</style>
|
| 337 |
+
"""
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
# ---------------------------------------------------------------------------
|
| 341 |
+
# Pose detection helpers (from camera_demo.py, adapted for WebRTC)
|
| 342 |
+
# ---------------------------------------------------------------------------
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def label_to_turkish(label: str) -> str:
|
| 346 |
+
return POSE_TO_TURKISH.get(label, label)
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def ensure_pose_model() -> str:
|
| 350 |
+
if POSE_MODEL_PATH.exists():
|
| 351 |
+
return str(POSE_MODEL_PATH)
|
| 352 |
+
MODELS_DIR.mkdir(parents=True, exist_ok=True)
|
| 353 |
+
logger.info("Downloading pose_landmarker model...")
|
| 354 |
+
urllib.request.urlretrieve(POSE_MODEL_URL, POSE_MODEL_PATH)
|
| 355 |
+
logger.info("Download complete.")
|
| 356 |
+
return str(POSE_MODEL_PATH)
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def landmarks_to_vector(landmark_list, feature_columns):
|
| 360 |
+
name_to_idx = {name: i for i, name in enumerate(MP_INDEX_TO_NAME)}
|
| 361 |
+
for alias, canonical in NAME_ALIASES.items():
|
| 362 |
+
name_to_idx[alias] = name_to_idx.get(canonical, 0)
|
| 363 |
+
|
| 364 |
+
values = []
|
| 365 |
+
for col in feature_columns:
|
| 366 |
+
if not col.startswith(("x_", "y_", "z_")):
|
| 367 |
+
continue
|
| 368 |
+
axis = col[0]
|
| 369 |
+
name = col[2:].strip()
|
| 370 |
+
name = NAME_ALIASES.get(name, name)
|
| 371 |
+
idx = name_to_idx.get(name, -1)
|
| 372 |
+
if idx < 0:
|
| 373 |
+
values.append(0.0)
|
| 374 |
+
continue
|
| 375 |
+
lm = landmark_list[idx]
|
| 376 |
+
x_val = lm.x if lm.x is not None else 0.0
|
| 377 |
+
y_val = lm.y if lm.y is not None else 0.0
|
| 378 |
+
z_val = lm.z if lm.z is not None else 0.0
|
| 379 |
+
if axis == "x":
|
| 380 |
+
values.append((x_val - 0.5) * SCALE_XY)
|
| 381 |
+
elif axis == "y":
|
| 382 |
+
values.append((y_val - 0.5) * SCALE_XY)
|
| 383 |
+
else:
|
| 384 |
+
values.append(z_val * SCALE_Z)
|
| 385 |
+
return np.array(values, dtype=np.float32).reshape(1, -1)
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
def predict_single(ml_model, encoder, scaler, model_type, X, buffer):
|
| 389 |
+
"""Run prediction with smoothing over the last N frames."""
|
| 390 |
+
X_scaled = scaler.transform(X)
|
| 391 |
+
if model_type == "xgboost":
|
| 392 |
+
pred_idx = ml_model.predict(X_scaled)[0]
|
| 393 |
+
probs = ml_model.predict_proba(X_scaled)[0]
|
| 394 |
+
else:
|
| 395 |
+
import torch
|
| 396 |
+
with torch.no_grad():
|
| 397 |
+
X_t = torch.from_numpy(X_scaled.astype(np.float32))
|
| 398 |
+
logits = ml_model(X_t)
|
| 399 |
+
probs = torch.softmax(logits, dim=1).numpy()[0]
|
| 400 |
+
pred_idx = int(np.argmax(probs))
|
| 401 |
+
|
| 402 |
+
conf = float(probs[pred_idx])
|
| 403 |
+
if conf < CONFIDENCE_THRESHOLD:
|
| 404 |
+
buffer.append("Belirsiz")
|
| 405 |
+
else:
|
| 406 |
+
label = encoder.inverse_transform([pred_idx])[0]
|
| 407 |
+
buffer.append(label)
|
| 408 |
+
|
| 409 |
+
counted = Counter(buffer)
|
| 410 |
+
mode_label = counted.most_common(1)[0][0]
|
| 411 |
+
return mode_label, conf
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
def draw_overlay_panel(frame, label, conf):
|
| 415 |
+
h, w = frame.shape[:2]
|
| 416 |
+
panel_h = 90
|
| 417 |
+
panel_w = min(400, w - 20)
|
| 418 |
+
x1, y1 = 10, 10
|
| 419 |
+
x2, y2 = x1 + panel_w, y1 + panel_h
|
| 420 |
+
|
| 421 |
+
overlay = frame.copy()
|
| 422 |
+
cv2.rectangle(overlay, (x1, y1), (x2, y2), (30, 30, 30), -1)
|
| 423 |
+
cv2.addWeighted(overlay, 0.75, frame, 0.25, 0, frame)
|
| 424 |
+
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 200, 100), 2)
|
| 425 |
+
|
| 426 |
+
turkce = label_to_turkish(label)
|
| 427 |
+
font = cv2.FONT_HERSHEY_SIMPLEX
|
| 428 |
+
color = (0, 255, 150) if label != "Belirsiz" else (100, 100, 100)
|
| 429 |
+
|
| 430 |
+
cv2.putText(frame, f"Hareket: {turkce}", (x1 + 12, y1 + 38),
|
| 431 |
+
font, 0.9, color, 2)
|
| 432 |
+
cv2.putText(frame, f"Guven: %{conf * 100:.0f}", (x1 + 12, y1 + 72),
|
| 433 |
+
font, 0.7, (200, 200, 200), 2)
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
# ---------------------------------------------------------------------------
|
| 437 |
+
# Thread-safe model & pose landmarker loader
|
| 438 |
+
# ---------------------------------------------------------------------------
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
@st.cache_resource
|
| 442 |
+
def load_all_artifacts():
|
| 443 |
+
"""Load ML model, scaler, encoder, feature columns, and MediaPipe pose landmarker.
|
| 444 |
+
Returns None tuple if model files are missing."""
|
| 445 |
+
meta_path = MODELS_DIR / "meta.pkl"
|
| 446 |
+
metadata_path = MODELS_DIR / "metadata.json"
|
| 447 |
+
if not meta_path.exists() or not metadata_path.exists():
|
| 448 |
+
return None, None, None, None, None, None, None
|
| 449 |
+
|
| 450 |
+
meta = load(meta_path)
|
| 451 |
+
encoder = load(MODELS_DIR / "encoder.pkl")
|
| 452 |
+
scaler = load(MODELS_DIR / "scaler.pkl")
|
| 453 |
+
model_type = meta.get("model_type", "xgboost")
|
| 454 |
+
|
| 455 |
+
model_path = meta.get("model_path")
|
| 456 |
+
if model_path and not Path(model_path).is_absolute():
|
| 457 |
+
model_path = MODELS_DIR / Path(model_path).name
|
| 458 |
+
|
| 459 |
+
if model_type == "xgboost":
|
| 460 |
+
ml_model = load(model_path)
|
| 461 |
+
else:
|
| 462 |
+
import torch
|
| 463 |
+
input_size = meta.get("input_size", 99)
|
| 464 |
+
num_classes = meta.get("num_classes", 10)
|
| 465 |
+
from torch import nn
|
| 466 |
+
ml_model = nn.Sequential(
|
| 467 |
+
nn.Linear(input_size, 200),
|
| 468 |
+
nn.ReLU(),
|
| 469 |
+
nn.Linear(200, num_classes),
|
| 470 |
+
)
|
| 471 |
+
ml_model.load_state_dict(torch.load(model_path, map_location="cpu"))
|
| 472 |
+
ml_model.eval()
|
| 473 |
+
|
| 474 |
+
with open(metadata_path, encoding="utf-8") as f:
|
| 475 |
+
feature_columns = json.load(f).get("feature_columns", [])
|
| 476 |
+
|
| 477 |
+
pose_model_path = ensure_pose_model()
|
| 478 |
+
base_options = mp_python.BaseOptions(model_asset_path=pose_model_path)
|
| 479 |
+
options = vision.PoseLandmarkerOptions(
|
| 480 |
+
base_options=base_options,
|
| 481 |
+
running_mode=vision.RunningMode.IMAGE,
|
| 482 |
+
)
|
| 483 |
+
pose_landmarker = vision.PoseLandmarker.create_from_options(options)
|
| 484 |
+
|
| 485 |
+
return ml_model, encoder, scaler, model_type, feature_columns, pose_landmarker, meta
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
# ---------------------------------------------------------------------------
|
| 489 |
+
# WebRTC video callback
|
| 490 |
+
# ---------------------------------------------------------------------------
|
| 491 |
+
|
| 492 |
+
_buffer_lock = Lock()
|
| 493 |
+
_prediction_buffer: deque = deque(maxlen=BUFFER_SIZE)
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
def make_video_frame_callback(ml_model, encoder, scaler, model_type,
|
| 497 |
+
feature_columns, pose_landmarker):
|
| 498 |
+
"""Create a closure that captures loaded artifacts for the WebRTC callback."""
|
| 499 |
+
from mediapipe.tasks.python.vision import drawing_utils, drawing_styles
|
| 500 |
+
|
| 501 |
+
def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
| 502 |
+
img = frame.to_ndarray(format="bgr24")
|
| 503 |
+
img = cv2.flip(img, 1)
|
| 504 |
+
|
| 505 |
+
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 506 |
+
mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=rgb)
|
| 507 |
+
|
| 508 |
+
try:
|
| 509 |
+
detection_result = pose_landmarker.detect(mp_image)
|
| 510 |
+
except Exception:
|
| 511 |
+
draw_overlay_panel(img, "Belirsiz", 0.0)
|
| 512 |
+
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
| 513 |
+
|
| 514 |
+
if detection_result.pose_landmarks:
|
| 515 |
+
pose_landmarks = detection_result.pose_landmarks[0]
|
| 516 |
+
|
| 517 |
+
frame_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 518 |
+
drawing_utils.draw_landmarks(
|
| 519 |
+
image=frame_rgb,
|
| 520 |
+
landmark_list=pose_landmarks,
|
| 521 |
+
connections=vision.PoseLandmarksConnections.POSE_LANDMARKS,
|
| 522 |
+
landmark_drawing_spec=drawing_styles.get_default_pose_landmarks_style(),
|
| 523 |
+
connection_drawing_spec=drawing_utils.DrawingSpec(
|
| 524 |
+
color=(0, 255, 0), thickness=2
|
| 525 |
+
),
|
| 526 |
+
)
|
| 527 |
+
img = cv2.cvtColor(frame_rgb, cv2.COLOR_RGB2BGR)
|
| 528 |
+
|
| 529 |
+
try:
|
| 530 |
+
X = landmarks_to_vector(pose_landmarks, feature_columns)
|
| 531 |
+
if X.shape[1] == scaler.n_features_in_:
|
| 532 |
+
with _buffer_lock:
|
| 533 |
+
label, conf = predict_single(
|
| 534 |
+
ml_model, encoder, scaler, model_type,
|
| 535 |
+
X, _prediction_buffer,
|
| 536 |
+
)
|
| 537 |
+
draw_overlay_panel(img, label, conf)
|
| 538 |
+
except Exception as e:
|
| 539 |
+
cv2.putText(img, f"Error: {e}", (10, 30),
|
| 540 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
|
| 541 |
+
else:
|
| 542 |
+
draw_overlay_panel(img, "Belirsiz", 0.0)
|
| 543 |
+
h, w = img.shape[:2]
|
| 544 |
+
cv2.putText(img, "Tam vucut gorunumunde durun",
|
| 545 |
+
(10, h - 25), cv2.FONT_HERSHEY_SIMPLEX,
|
| 546 |
+
0.55, (0, 165, 255), 1)
|
| 547 |
+
|
| 548 |
+
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
| 549 |
+
|
| 550 |
+
return video_frame_callback
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
# ---------------------------------------------------------------------------
|
| 554 |
+
# UI Sections
|
| 555 |
+
# ---------------------------------------------------------------------------
|
| 556 |
+
|
| 557 |
+
|
| 558 |
+
def render_hero():
|
| 559 |
+
st.markdown(
|
| 560 |
+
"""
|
| 561 |
+
<div class="hero">
|
| 562 |
+
<div class="hero-badge">AI-Powered Fitness</div>
|
| 563 |
+
<h1>
|
| 564 |
+
Egzersizlerini<br>
|
| 565 |
+
<span class="grad">Yapay Zeka ile Takip Et</span>
|
| 566 |
+
</h1>
|
| 567 |
+
<p class="hero-sub">
|
| 568 |
+
Kamerani ac, egzersizini yap. Yapay zeka hareketlerini anlik olarak tanir,
|
| 569 |
+
tekrarlarini sayar ve performansini takip eder.
|
| 570 |
+
</p>
|
| 571 |
+
</div>
|
| 572 |
+
""",
|
| 573 |
+
unsafe_allow_html=True,
|
| 574 |
+
)
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
def render_stats():
|
| 578 |
+
cols = st.columns(3)
|
| 579 |
+
stats = [
|
| 580 |
+
("5", "Desteklenen Egzersiz"),
|
| 581 |
+
("10", "Hareket Pozisyonu"),
|
| 582 |
+
("33", "Vucut Noktasi Takibi"),
|
| 583 |
+
]
|
| 584 |
+
for col, (val, label) in zip(cols, stats):
|
| 585 |
+
with col:
|
| 586 |
+
st.markdown(
|
| 587 |
+
f"""
|
| 588 |
+
<div class="g-card" style="text-align:center; padding:1.4rem 1rem;">
|
| 589 |
+
<div class="m-val">{val}</div>
|
| 590 |
+
<div class="m-lbl">{label}</div>
|
| 591 |
+
</div>
|
| 592 |
+
""",
|
| 593 |
+
unsafe_allow_html=True,
|
| 594 |
+
)
|
| 595 |
+
|
| 596 |
+
|
| 597 |
+
def render_how_it_works():
|
| 598 |
+
st.markdown('<div class="sep"></div>', unsafe_allow_html=True)
|
| 599 |
+
st.markdown('<div class="sec-title">Nasil Calisir?</div>', unsafe_allow_html=True)
|
| 600 |
+
st.markdown(
|
| 601 |
+
'<div class="sec-sub">Uc basit adimda antrenmanina basla</div>',
|
| 602 |
+
unsafe_allow_html=True,
|
| 603 |
+
)
|
| 604 |
+
steps = [
|
| 605 |
+
("1", "Kamerayi Baslat",
|
| 606 |
+
"Asagidaki START butonuna tiklayarak tarayici kameranizi acin. "
|
| 607 |
+
"Kameranin tam vucudunuzu gorecegi bir konumda durun."),
|
| 608 |
+
("2", "Egzersizini Yap",
|
| 609 |
+
"Sinav, mekik, squat veya baska bir egzersiz yapmaya baslayin. "
|
| 610 |
+
"AI modeli hareketlerinizi anlik olarak tanir."),
|
| 611 |
+
("3", "Sonuclarini Gor",
|
| 612 |
+
"Hareket tipi ve guven orani video uzerinde "
|
| 613 |
+
"canli olarak gosterilir."),
|
| 614 |
+
]
|
| 615 |
+
cols = st.columns(3)
|
| 616 |
+
for col, (num, title, desc) in zip(cols, steps):
|
| 617 |
+
with col:
|
| 618 |
+
st.markdown(
|
| 619 |
+
f"""
|
| 620 |
+
<div class="g-card" style="text-align:center;">
|
| 621 |
+
<div class="step-num">{num}</div>
|
| 622 |
+
<div class="step-t">{title}</div>
|
| 623 |
+
<div class="step-d">{desc}</div>
|
| 624 |
+
</div>
|
| 625 |
+
""",
|
| 626 |
+
unsafe_allow_html=True,
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
|
| 630 |
+
def render_exercises():
|
| 631 |
+
st.markdown('<div class="sep"></div>', unsafe_allow_html=True)
|
| 632 |
+
st.markdown(
|
| 633 |
+
'<div class="sec-title">Desteklenen Egzersizler</div>',
|
| 634 |
+
unsafe_allow_html=True,
|
| 635 |
+
)
|
| 636 |
+
st.markdown(
|
| 637 |
+
'<div class="sec-sub">AI modelimiz asagidaki hareketleri taniyor</div>',
|
| 638 |
+
unsafe_allow_html=True,
|
| 639 |
+
)
|
| 640 |
+
cols = st.columns(5)
|
| 641 |
+
for col, ex in zip(cols, EXERCISES):
|
| 642 |
+
with col:
|
| 643 |
+
st.markdown(
|
| 644 |
+
f"""
|
| 645 |
+
<div class="g-card" style="text-align:center; padding:1.6rem 0.8rem;">
|
| 646 |
+
<div class="accent-top" style="background:{ex['color']};"></div>
|
| 647 |
+
<div class="ex-icon">{ex['icon']}</div>
|
| 648 |
+
<div class="ex-name">{ex['name']}</div>
|
| 649 |
+
<div class="ex-en">{ex['en']}</div>
|
| 650 |
+
<div class="ex-desc">{ex['desc']}</div>
|
| 651 |
+
</div>
|
| 652 |
+
""",
|
| 653 |
+
unsafe_allow_html=True,
|
| 654 |
+
)
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
def render_camera_section(ml_model, encoder, scaler, model_type,
|
| 658 |
+
feature_columns, pose_landmarker):
|
| 659 |
+
st.markdown('<div class="sep"></div>', unsafe_allow_html=True)
|
| 660 |
+
st.markdown(
|
| 661 |
+
"""
|
| 662 |
+
<div class="cta-box">
|
| 663 |
+
<div class="cta-t">Antrenmanina Basla</div>
|
| 664 |
+
<div class="cta-d">
|
| 665 |
+
START butonuna tiklayarak kameranizi acin ve egzersize baslayin
|
| 666 |
+
</div>
|
| 667 |
+
</div>
|
| 668 |
+
""",
|
| 669 |
+
unsafe_allow_html=True,
|
| 670 |
+
)
|
| 671 |
+
|
| 672 |
+
st.markdown(
|
| 673 |
+
"""
|
| 674 |
+
<div class="tip-box" style="margin-top:1rem; text-align:center;">
|
| 675 |
+
<strong>Ipucu:</strong> Iyi aydinlatilmis bir ortamda
|
| 676 |
+
tam vucut gorunumunde durmaniz en iyi sonuclari verir.
|
| 677 |
+
Tarayiciniz kamera izni isteyecektir.
|
| 678 |
+
</div>
|
| 679 |
+
""",
|
| 680 |
+
unsafe_allow_html=True,
|
| 681 |
+
)
|
| 682 |
+
|
| 683 |
+
callback = make_video_frame_callback(
|
| 684 |
+
ml_model, encoder, scaler, model_type, feature_columns, pose_landmarker,
|
| 685 |
+
)
|
| 686 |
+
|
| 687 |
+
webrtc_streamer(
|
| 688 |
+
key="exercise-detection",
|
| 689 |
+
mode=WebRtcMode.SENDRECV,
|
| 690 |
+
video_frame_callback=callback,
|
| 691 |
+
media_stream_constraints={"video": True, "audio": False},
|
| 692 |
+
async_processing=True,
|
| 693 |
+
rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
|
| 694 |
+
)
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
def render_footer():
|
| 698 |
+
st.markdown(
|
| 699 |
+
"""
|
| 700 |
+
<div class="foot">
|
| 701 |
+
<strong>BecomeAPro</strong> — AI-Powered Exercise Tracker<br>
|
| 702 |
+
<span style="font-size:0.78rem; margin-top:4px; display:inline-block;">
|
| 703 |
+
MediaPipe • XGBoost / PyTorch • Streamlit • WebRTC
|
| 704 |
+
</span>
|
| 705 |
+
</div>
|
| 706 |
+
""",
|
| 707 |
+
unsafe_allow_html=True,
|
| 708 |
+
)
|
| 709 |
+
|
| 710 |
+
|
| 711 |
+
def render_model_missing():
|
| 712 |
+
st.markdown('<div class="sep"></div>', unsafe_allow_html=True)
|
| 713 |
+
_p1, col_c, _p2 = st.columns([1, 3, 1])
|
| 714 |
+
with col_c:
|
| 715 |
+
st.markdown(
|
| 716 |
+
"""
|
| 717 |
+
<div class="onboard-card">
|
| 718 |
+
<h3>Model Dosyalari Bulunamadi</h3>
|
| 719 |
+
<p>
|
| 720 |
+
Uygulamanin calisabilmesi icin egitilmis model dosyalarinin
|
| 721 |
+
<code>models/</code> klasorune eklenmesi gerekiyor.
|
| 722 |
+
</p>
|
| 723 |
+
<p style="margin-top:1rem; font-size:0.85rem; color:#5a5a7a;">
|
| 724 |
+
Gerekli dosyalar: meta.pkl, encoder.pkl, scaler.pkl,
|
| 725 |
+
final_model.pkl, metadata.json
|
| 726 |
+
</p>
|
| 727 |
+
</div>
|
| 728 |
+
""",
|
| 729 |
+
unsafe_allow_html=True,
|
| 730 |
+
)
|
| 731 |
+
|
| 732 |
+
|
| 733 |
+
# ---------------------------------------------------------------------------
|
| 734 |
+
# Main
|
| 735 |
+
# ---------------------------------------------------------------------------
|
| 736 |
+
|
| 737 |
+
|
| 738 |
+
def main():
|
| 739 |
+
st.markdown(CUSTOM_CSS, unsafe_allow_html=True)
|
| 740 |
+
|
| 741 |
+
result = load_all_artifacts()
|
| 742 |
+
ml_model = result[0]
|
| 743 |
+
|
| 744 |
+
render_hero()
|
| 745 |
+
|
| 746 |
+
if ml_model is None:
|
| 747 |
+
render_model_missing()
|
| 748 |
+
render_footer()
|
| 749 |
+
return
|
| 750 |
+
|
| 751 |
+
ml_model, encoder, scaler, model_type, feature_columns, pose_landmarker, _ = result
|
| 752 |
+
|
| 753 |
+
render_stats()
|
| 754 |
+
render_how_it_works()
|
| 755 |
+
render_exercises()
|
| 756 |
+
render_camera_section(
|
| 757 |
+
ml_model, encoder, scaler, model_type, feature_columns, pose_landmarker,
|
| 758 |
+
)
|
| 759 |
+
render_footer()
|
| 760 |
+
|
| 761 |
+
|
| 762 |
+
if __name__ == "__main__":
|
| 763 |
+
main()
|
requirements.txt
CHANGED
|
@@ -1,3 +1,9 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit>=1.20
|
| 2 |
+
streamlit-webrtc>=0.60.1
|
| 3 |
+
av
|
| 4 |
+
numpy>=1.21
|
| 5 |
+
scikit-learn>=1.0
|
| 6 |
+
xgboost>=1.5
|
| 7 |
+
joblib>=1.1
|
| 8 |
+
mediapipe>=0.10
|
| 9 |
+
opencv-python-headless>=4.5
|