Amol Kaushik commited on
Commit
fc08c1d
·
1 Parent(s): 42a4f47

Remove duplicate A3/app.py - root app.py is the correct one

Browse files
Files changed (1) hide show
  1. A3/app.py +0 -399
A3/app.py DELETED
@@ -1,399 +0,0 @@
1
- import gradio as gr
2
- import pandas as pd
3
- import pickle
4
- import os
5
-
6
- # Get directory where this script is located
7
- SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
8
-
9
- MODEL_PATH = os.path.join(SCRIPT_DIR, "models/champion_model_final_2.pkl")
10
- CLASSIFICATION_MODEL_PATH = os.path.join(SCRIPT_DIR, "models/final_champion_model_A3.pkl")
11
- DATA_PATH = os.path.join(SCRIPT_DIR, "A3_Data/train_dataset.csv")
12
-
13
- model = None
14
- FEATURE_NAMES = None
15
- MODEL_METRICS = None
16
-
17
- # Classification model
18
- classification_model = None
19
- CLASSIFICATION_FEATURE_NAMES = None
20
- CLASSIFICATION_CLASSES = None
21
- CLASSIFICATION_METRICS = None
22
-
23
- BODY_REGION_RECOMMENDATIONS = {
24
- 'Upper Body': "Focus on shoulder mobility, thoracic spine extension, and keeping your head neutral.",
25
- 'Lower Body': "Work on hip mobility, ankle dorsiflexion, and knee tracking over toes."
26
- }
27
-
28
-
29
- def load_champion_model():
30
- global model, FEATURE_NAMES, MODEL_METRICS
31
-
32
- possible_paths = [
33
- MODEL_PATH,
34
- os.path.join(SCRIPT_DIR, "../A2/models/champion_model_final_2.pkl"),
35
- ]
36
-
37
- for path in possible_paths:
38
- if os.path.exists(path):
39
- print(f"Loading champion model from {path}")
40
- with open(path, "rb") as f:
41
- artifact = pickle.load(f)
42
-
43
- model = artifact["model"]
44
- FEATURE_NAMES = artifact["feature_columns"]
45
- MODEL_METRICS = artifact.get("test_metrics", {})
46
-
47
- print(f"model loaded successfully")
48
- print(f"Features: {len(FEATURE_NAMES)} columns")
49
- print(f"Test R2: {MODEL_METRICS.get('r2', 'N/A')}")
50
- return True
51
- return False
52
-
53
-
54
- def load_classification_model():
55
- global classification_model, CLASSIFICATION_FEATURE_NAMES, CLASSIFICATION_CLASSES, CLASSIFICATION_METRICS
56
-
57
- if os.path.exists(CLASSIFICATION_MODEL_PATH):
58
- print(f"Loading classification model from {CLASSIFICATION_MODEL_PATH}")
59
- with open(CLASSIFICATION_MODEL_PATH, "rb") as f:
60
- artifact = pickle.load(f)
61
-
62
- classification_model = artifact["model"]
63
- CLASSIFICATION_FEATURE_NAMES = artifact["feature_columns"]
64
- CLASSIFICATION_CLASSES = artifact["classes"]
65
- CLASSIFICATION_METRICS = artifact.get("test_metrics", {})
66
-
67
- print(f"Classification model loaded: {len(CLASSIFICATION_FEATURE_NAMES)} features")
68
- print(f"Classes: {CLASSIFICATION_CLASSES}")
69
- return True
70
-
71
- print("Classification model not found")
72
- return False
73
-
74
-
75
- load_champion_model()
76
- load_classification_model()
77
-
78
-
79
- # prediction function
80
- def predict_score(*feature_values):
81
- if model is None:
82
- return "Error", "Model not loaded"
83
-
84
- # Convert inputs to dataframe with correct feature names
85
- features_df = pd.DataFrame([feature_values], columns=FEATURE_NAMES)
86
-
87
- raw_score = model.predict(features_df)[0]
88
-
89
- # score to valid range and change to %
90
- score = max(0, min(1, raw_score)) * 100
91
-
92
- if score >= 80:
93
- interpretation = "Excellent, great squat form"
94
- elif score >= 60:
95
- interpretation = "Good, minor improvements needed"
96
- elif score >= 40:
97
- interpretation = "Average, a lot of areas to work on"
98
- else:
99
- interpretation = "Needs work, focus on proper form"
100
-
101
- # Create output
102
- r2 = MODEL_METRICS.get('r2', 'N/A')
103
- correlation = MODEL_METRICS.get('correlation', 'N/A')
104
-
105
- # Format metrics
106
- r2_str = f"{r2:.4f}" if isinstance(r2, (int, float)) else str(r2)
107
- corr_str = f"{correlation:.4f}" if isinstance(correlation, (int, float)) else str(correlation)
108
-
109
- details = f"""
110
- ### Prediction Details
111
- - **Raw Model Output:** {raw_score:.4f}
112
- - **Normalized Score:** {score:.1f}%
113
- - **Assessment:** {interpretation}
114
-
115
- ### Model Performance
116
- - **Test R-squared:** {r2_str}
117
- - **Test Correlation:** {corr_str}
118
-
119
- *Lower deviation values = better form*
120
- """
121
-
122
- return f"{score:.1f}%", interpretation, details
123
-
124
-
125
- # classification prediction function
126
- def predict_weakest_link(*feature_values):
127
- if classification_model is None:
128
- return "Error", "Model not loaded", ""
129
-
130
- features_df = pd.DataFrame([feature_values], columns=CLASSIFICATION_FEATURE_NAMES)
131
-
132
- prediction = classification_model.predict(features_df)[0]
133
- probabilities = classification_model.predict_proba(features_df)[0]
134
-
135
- # Get top predictions
136
- class_probs = list(zip(CLASSIFICATION_CLASSES, probabilities))
137
- class_probs.sort(key=lambda x: x[1], reverse=True)
138
-
139
- confidence = max(probabilities) * 100
140
- recommendation = BODY_REGION_RECOMMENDATIONS.get(prediction, "Focus on exercises that strengthen this region.")
141
-
142
- accuracy = CLASSIFICATION_METRICS.get('accuracy', 'N/A')
143
- f1_weighted = CLASSIFICATION_METRICS.get('f1_weighted', 'N/A')
144
- acc_str = f"{accuracy:.2%}" if isinstance(accuracy, (int, float)) else str(accuracy)
145
- f1_str = f"{f1_weighted:.2%}" if isinstance(f1_weighted, (int, float)) else str(f1_weighted)
146
-
147
- # Build prediction list
148
- predictions_list = "\n".join([f"{i+1}. **{cp[0]}** - {cp[1]*100:.1f}%" for i, cp in enumerate(class_probs)])
149
-
150
- details = f"""
151
- ### Prediction Details
152
- - **Predicted Body Region:** {prediction}
153
- - **Confidence:** {confidence:.1f}%
154
-
155
- ### Probability Distribution
156
- {predictions_list}
157
-
158
- ### Recommendation
159
- {recommendation}
160
-
161
- ### Model Performance
162
- - **Test Accuracy:** {acc_str}
163
- - **Test F1 (weighted):** {f1_str}
164
- """
165
-
166
- return prediction, f"Confidence: {confidence:.1f}%", details
167
-
168
-
169
- def load_example():
170
- if FEATURE_NAMES is None:
171
- return [0.5] * 35
172
-
173
- try:
174
- df = pd.read_csv(DATA_PATH, sep=';', decimal=',')
175
- available_features = [f for f in FEATURE_NAMES if f in df.columns]
176
- sample = df[available_features].sample(1).values[0]
177
- return [float(x) for x in sample]
178
- except Exception as e:
179
- print(f"Error loading example: {e}")
180
- return [0.5] * len(FEATURE_NAMES)
181
-
182
-
183
- def load_classification_example():
184
- if CLASSIFICATION_FEATURE_NAMES is None:
185
- return [0.5] * 40
186
-
187
- try:
188
- df = pd.read_csv(DATA_PATH, sep=';', decimal=',')
189
- available_features = [f for f in CLASSIFICATION_FEATURE_NAMES if f in df.columns]
190
- sample = df[available_features].sample(1).values[0]
191
- return [float(x) for x in sample]
192
- except Exception as e:
193
- print(f"Error loading classification example: {e}")
194
- return [0.5] * len(CLASSIFICATION_FEATURE_NAMES)
195
-
196
-
197
- # create gradio interface
198
- def create_interface():
199
- if FEATURE_NAMES is None:
200
- return gr.Interface(
201
- fn=lambda: "Model not loaded",
202
- inputs=[],
203
- outputs="text",
204
- title="Error: Model not loaded"
205
- )
206
-
207
- # Create input sliders for scoring features
208
- inputs = []
209
- for name in FEATURE_NAMES:
210
- slider = gr.Slider(
211
- minimum=0,
212
- maximum=1,
213
- value=0.5,
214
- step=0.01,
215
- label=name.replace("_", " "),
216
- )
217
- inputs.append(slider)
218
-
219
- # Create input sliders for classification features
220
- classification_inputs = []
221
- if CLASSIFICATION_FEATURE_NAMES is not None:
222
- for name in CLASSIFICATION_FEATURE_NAMES:
223
- slider = gr.Slider(
224
- minimum=0,
225
- maximum=1,
226
- value=0.5,
227
- step=0.01,
228
- label=name.replace("_", " "),
229
- )
230
- classification_inputs.append(slider)
231
-
232
- # Build the interface
233
- description = """
234
- ## Deep Squat Movement Assessment
235
-
236
- **How to use:**
237
- 1. Adjust the sliders to input deviation values (0 = no deviation, 1 = maximum deviation)
238
- 2. Click "Submit" to get your predicted score
239
- 3. Or click "Load Random Example" to test with real data
240
-
241
- **Score Interpretation:**
242
- - 80-100%: Excellent form
243
- - 60-79%: Good form
244
- - 40-59%: Average form
245
- - 0-39%: Needs improvement
246
- """
247
-
248
- classification_description = """
249
- ## Body Region Classification
250
-
251
- **How to use:**
252
- 1. Adjust the sliders to input deviation values (0 = no deviation, 1 = maximum deviation)
253
- 2. Click "Predict Body Region" to identify where to focus improvements
254
- 3. Or click "Load Random Example" to test with real data
255
-
256
- **Body Regions:** Upper Body, Lower Body
257
- """
258
-
259
- # features into categories for scoring
260
- angle_features = [n for n in FEATURE_NAMES if "Angle" in n]
261
- nasm_features = [n for n in FEATURE_NAMES if "NASM" in n]
262
- time_features = [n for n in FEATURE_NAMES if "Time" in n]
263
-
264
- # Get indices for each category
265
- angle_indices = [FEATURE_NAMES.index(f) for f in angle_features]
266
- nasm_indices = [FEATURE_NAMES.index(f) for f in nasm_features]
267
- time_indices = [FEATURE_NAMES.index(f) for f in time_features]
268
-
269
- # Classification feature categories
270
- if CLASSIFICATION_FEATURE_NAMES is not None:
271
- class_angle_features = [n for n in CLASSIFICATION_FEATURE_NAMES if "Angle" in n]
272
- class_nasm_features = [n for n in CLASSIFICATION_FEATURE_NAMES if "NASM" in n]
273
- class_time_features = [n for n in CLASSIFICATION_FEATURE_NAMES if "Time" in n]
274
- class_angle_indices = [CLASSIFICATION_FEATURE_NAMES.index(f) for f in class_angle_features]
275
- class_nasm_indices = [CLASSIFICATION_FEATURE_NAMES.index(f) for f in class_nasm_features]
276
- class_time_indices = [CLASSIFICATION_FEATURE_NAMES.index(f) for f in class_time_features]
277
-
278
- # Create the main interface
279
- with gr.Blocks(title="Deep Squat Assessment") as demo:
280
- gr.Markdown("# Deep Squat Movement Assessment")
281
-
282
- with gr.Tabs():
283
- # Tab 1: Movement Scoring (original A2 functionality)
284
- with gr.TabItem("Movement Scoring"):
285
- gr.Markdown(description)
286
-
287
- with gr.Row():
288
- with gr.Column(scale=2):
289
- gr.Markdown("### Input Features")
290
- gr.Markdown(f"*{len(FEATURE_NAMES)} features loaded from champion model*")
291
- gr.Markdown("*Deviation values: 0 = perfect, 1 = maximum deviation*")
292
-
293
- with gr.Tabs():
294
- with gr.TabItem(f"Angle Deviations ({len(angle_indices)})"):
295
- for idx in angle_indices:
296
- inputs[idx].render()
297
-
298
- with gr.TabItem(f"NASM Deviations ({len(nasm_indices)})"):
299
- for idx in nasm_indices:
300
- inputs[idx].render()
301
-
302
- with gr.TabItem(f"Time Deviations ({len(time_indices)})"):
303
- for idx in time_indices:
304
- inputs[idx].render()
305
-
306
- with gr.Column(scale=1):
307
- gr.Markdown("### Results")
308
- score_output = gr.Textbox(label="Predicted Score")
309
- interp_output = gr.Textbox(label="Assessment")
310
- details_output = gr.Markdown(label="Details")
311
-
312
- with gr.Row():
313
- submit_btn = gr.Button("Submit", variant="primary")
314
- example_btn = gr.Button("Load Random Example")
315
- clear_btn = gr.Button("Clear")
316
-
317
- submit_btn.click(
318
- fn=predict_score,
319
- inputs=inputs,
320
- outputs=[score_output, interp_output, details_output],
321
- )
322
-
323
- example_btn.click(
324
- fn=load_example,
325
- inputs=[],
326
- outputs=inputs
327
- )
328
-
329
- clear_btn.click(
330
- fn=lambda: [0.5] * len(FEATURE_NAMES) + ["", "", ""],
331
- inputs=[],
332
- outputs=inputs + [score_output, interp_output, details_output],
333
- )
334
-
335
- # weakest link classification
336
- if CLASSIFICATION_FEATURE_NAMES is not None:
337
- with gr.TabItem("Weakest Link Classification"):
338
- gr.Markdown(classification_description)
339
-
340
- with gr.Row():
341
- with gr.Column(scale=2):
342
- gr.Markdown("### Input Features")
343
- gr.Markdown(f"*{len(CLASSIFICATION_FEATURE_NAMES)} features for classification*")
344
- gr.Markdown("*Deviation values: 0 = perfect, 1 = maximum deviation*")
345
-
346
- with gr.Tabs():
347
- with gr.TabItem(f"Angle Deviations ({len(class_angle_indices)})"):
348
- for idx in class_angle_indices:
349
- classification_inputs[idx].render()
350
-
351
- with gr.TabItem(f"NASM Deviations ({len(class_nasm_indices)})"):
352
- for idx in class_nasm_indices:
353
- classification_inputs[idx].render()
354
-
355
- with gr.TabItem(f"Time Deviations ({len(class_time_indices)})"):
356
- for idx in class_time_indices:
357
- classification_inputs[idx].render()
358
-
359
- with gr.Column(scale=1):
360
- gr.Markdown("### Results")
361
- class_output = gr.Textbox(label="Predicted Body Region")
362
- class_interp_output = gr.Textbox(label="Confidence")
363
- class_details_output = gr.Markdown(label="Details")
364
-
365
- with gr.Row():
366
- class_submit_btn = gr.Button("Predict Body Region", variant="primary")
367
- class_example_btn = gr.Button("Load Random Example")
368
- class_clear_btn = gr.Button("Clear")
369
-
370
- class_submit_btn.click(
371
- fn=predict_weakest_link,
372
- inputs=classification_inputs,
373
- outputs=[class_output, class_interp_output, class_details_output],
374
- )
375
-
376
- class_example_btn.click(
377
- fn=load_classification_example,
378
- inputs=[],
379
- outputs=classification_inputs
380
- )
381
-
382
- class_clear_btn.click(
383
- fn=lambda: [0.5] * len(CLASSIFICATION_FEATURE_NAMES) + ["", "", ""],
384
- inputs=[],
385
- outputs=classification_inputs + [class_output, class_interp_output, class_details_output],
386
- )
387
-
388
- return demo
389
-
390
-
391
- # Create the interface
392
- demo = create_interface()
393
-
394
- if __name__ == "__main__":
395
- demo.launch(
396
- share=False,
397
- server_name="0.0.0.0",
398
- server_port=7860,
399
- )