1mpreccable commited on
Commit
e282e15
·
verified ·
1 Parent(s): 536096e

Upload 10 files

Browse files
interface_pages/__init__.py ADDED
File without changes
interface_pages/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (184 Bytes). View file
 
interface_pages/__pycache__/about_page.cpython-312.pyc ADDED
Binary file (438 Bytes). View file
 
interface_pages/__pycache__/home_page.cpython-312.pyc ADDED
Binary file (500 Bytes). View file
 
interface_pages/__pycache__/yoga_position_from_stream.cpython-312.pyc ADDED
Binary file (1.61 kB). View file
 
interface_pages/__pycache__/yoga_position_from_video.cpython-312.pyc ADDED
Binary file (613 Bytes). View file
 
interface_pages/about_page.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+
4
+ def about_page():
5
+ return gr.Markdown(
6
+ """
7
+ # About Us
8
+
9
+ WYOGAI — the BEST.
10
+ """
11
+ )
interface_pages/home_page.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+
4
+ def home_page():
5
+ ascii_logo = """"""
6
+
7
+ with gr.Column() as home_page_content:
8
+ gr.HTML(f'<div class="ascii-logo-panel"><pre>{ascii_logo}</pre></div>')
9
+ gr.Markdown(
10
+ """
11
+ # Welcome to YOGAI App!
12
+
13
+ This is your home page where you can explore different yoga practices.
14
+ """
15
+ )
16
+
17
+ return home_page_content
interface_pages/yoga_position_from_stream.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import numpy as np
4
+ import mediapipe as mp
5
+ from mediapipe.python.solutions import drawing_utils as mp_drawing
6
+ from PoseClassification.pose_embedding import FullBodyPoseEmbedding
7
+ from PoseClassification.pose_classifier import PoseClassifier
8
+ from PoseClassification.utils import EMADictSmoothing
9
+
10
+ # Initialize components
11
+ mp_pose = mp.solutions.pose
12
+ pose_tracker = mp_pose.Pose()
13
+ pose_embedder = FullBodyPoseEmbedding()
14
+ pose_classifier = PoseClassifier(
15
+ pose_samples_folder="data/yoga_poses_csvs_out",
16
+ pose_embedder=pose_embedder,
17
+ top_n_by_max_distance=30,
18
+ top_n_by_mean_distance=10,
19
+ )
20
+ pose_classification_filter = EMADictSmoothing(window_size=10, alpha=0.2)
21
+
22
+ class_names = ["chair", "cobra", "dog", "goddess", "plank", "tree", "warrior", "none"]
23
+ position_threshold = 8.0
24
+
25
+
26
+ def check_major_current_position(positions_detected: dict, threshold_position) -> str:
27
+ if max(positions_detected.values()) < float(threshold_position):
28
+ return "none"
29
+ return max(positions_detected, key=positions_detected.get)
30
+
31
+
32
+ def process_frame(frame):
33
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
34
+ result = pose_tracker.process(image=frame_rgb)
35
+ pose_landmarks = result.pose_landmarks
36
+
37
+ if pose_landmarks is not None:
38
+ frame_height, frame_width = frame.shape[0], frame.shape[1]
39
+ pose_landmarks = np.array(
40
+ [
41
+ [lmk.x * frame_width, lmk.y * frame_height, lmk.z * frame_width]
42
+ for lmk in pose_landmarks.landmark
43
+ ],
44
+ dtype=np.float32,
45
+ )
46
+ pose_classification = pose_classifier(pose_landmarks)
47
+ pose_classification_filtered = pose_classification_filter(pose_classification)
48
+ current_position = pose_classification_filtered
49
+ else:
50
+ current_position = {"none": 10.0}
51
+
52
+ current_position_major = check_major_current_position(
53
+ current_position, position_threshold
54
+ )
55
+ return current_position_major, frame
56
+
57
+
58
+ def yoga_position_from_stream():
59
+ current_position = "none"
60
+ position_timer = 0
61
+ last_update_time = 0
62
+ recording = False
63
+ recorded_frames = []
64
+
65
+ def classify_pose(frame):
66
+ nonlocal current_position, position_timer, last_update_time, recording, recorded_frames
67
+ if frame is None:
68
+ return (
69
+ None,
70
+ None,
71
+ current_position,
72
+ f"Duration: {int(position_timer)} seconds",
73
+ )
74
+
75
+ new_position, processed_frame = process_frame(frame)
76
+
77
+ if new_position != current_position:
78
+ current_position = new_position
79
+ position_timer = 0
80
+ last_update_time = cv2.getTickCount() / cv2.getTickFrequency()
81
+ else:
82
+ current_time = cv2.getTickCount() / cv2.getTickFrequency()
83
+ position_timer += current_time - last_update_time
84
+ last_update_time = current_time
85
+
86
+ mp_drawing.draw_landmarks(
87
+ image=processed_frame,
88
+ landmark_list=pose_tracker.process(
89
+ cv2.cvtColor(processed_frame, cv2.COLOR_BGR2RGB)
90
+ ).pose_landmarks,
91
+ connections=mp_pose.POSE_CONNECTIONS,
92
+ )
93
+
94
+ cv2.putText(
95
+ processed_frame,
96
+ f"Pose: {current_position}",
97
+ (10, 30),
98
+ cv2.FONT_HERSHEY_SIMPLEX,
99
+ 1,
100
+ (0, 255, 0),
101
+ 2,
102
+ )
103
+ cv2.putText(
104
+ processed_frame,
105
+ f"Duration: {int(position_timer)} seconds",
106
+ (10, 70),
107
+ cv2.FONT_HERSHEY_SIMPLEX,
108
+ 1,
109
+ (0, 255, 0),
110
+ 2,
111
+ )
112
+
113
+ if recording:
114
+ recorded_frames.append(processed_frame)
115
+
116
+ return (
117
+ frame,
118
+ processed_frame,
119
+ current_position,
120
+ f"Duration: {int(position_timer)} seconds",
121
+ )
122
+
123
+ def toggle_debug(debug_mode):
124
+ return [
125
+ gr.update(visible=debug_mode),
126
+ gr.update(visible=not debug_mode),
127
+ gr.update(visible=debug_mode),
128
+ ]
129
+
130
+ def start_recording():
131
+ nonlocal recording, recorded_frames
132
+ recording = True
133
+ recorded_frames = []
134
+ return "Recording started"
135
+
136
+ def stop_recording():
137
+ nonlocal recording
138
+ recording = False
139
+ return "Recording stopped"
140
+
141
+ def save_video():
142
+ nonlocal recorded_frames
143
+ if not recorded_frames:
144
+ return None, "No recorded frames available"
145
+
146
+ output_path = "recorded_yoga_session.mp4"
147
+ height, width, _ = recorded_frames[0].shape
148
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
149
+ out = cv2.VideoWriter(output_path, fourcc, 30.0, (width, height))
150
+
151
+ for frame in recorded_frames:
152
+ out.write(frame)
153
+ out.release()
154
+
155
+ return output_path, "Video saved successfully"
156
+
157
+ with gr.Column() as yoga_stream:
158
+ gr.Markdown("# Yoga Position Classifier")
159
+ gr.Markdown("Stream live yoga sessions and get real-time pose classification.")
160
+
161
+ debug_toggle = gr.Checkbox(label="Debug Mode", value=False)
162
+
163
+ with gr.Column(visible=True) as normal_view:
164
+ video_feed = gr.Webcam(streaming=True, elem_classes="fullscreen")
165
+ pose_output = gr.Textbox(label="Current Pose")
166
+ timer_output = gr.Textbox(label="Pose Duration")
167
+
168
+ with gr.Column(visible=False) as debug_view:
169
+ classified_video = gr.Image(label="Classified Video Feed")
170
+ with gr.Row():
171
+ start_button = gr.Button("Start Recording")
172
+ stop_button = gr.Button("Stop Recording")
173
+ save_button = gr.Button("Save Recording")
174
+ recording_status = gr.Textbox(label="Recording Status")
175
+ recorded_video = gr.Video(label="Recorded Video")
176
+ download_button = gr.Button("Download Recorded Video")
177
+
178
+ debug_toggle.change(
179
+ toggle_debug,
180
+ inputs=[debug_toggle],
181
+ outputs=[debug_view, normal_view, classified_video],
182
+ )
183
+
184
+ video_feed.stream(
185
+ classify_pose,
186
+ inputs=[video_feed],
187
+ outputs=[video_feed, classified_video, pose_output, timer_output],
188
+ show_progress=False,
189
+ )
190
+
191
+ start_button.click(start_recording, outputs=[recording_status])
192
+ stop_button.click(stop_recording, outputs=[recording_status])
193
+ save_button.click(save_video, outputs=[recorded_video, recording_status])
194
+ download_button.click(lambda: "recorded_yoga_session.mp4", outputs=[gr.File()])
195
+
196
+ return yoga_stream
197
+
198
+
199
+ if __name__ == "__main__":
200
+ with gr.Blocks() as demo:
201
+ yoga_position_from_stream()
202
+ demo.launch()
interface_pages/yoga_position_from_video.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+
4
+ def yoga_position_from_video():
5
+ return gr.Markdown(
6
+ """
7
+ # Yoga from Video
8
+
9
+ Watch pre-recorded yoga sessions and practice at your convenience.
10
+
11
+ Select a video below:
12
+
13
+ - Beginner Yoga
14
+ - Advanced Techniques
15
+ - Restorative Yoga
16
+ """
17
+ )