{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "## Preprocessing Stage" ] }, { "cell_type": "code", "metadata": {}, "source": [ "# Preprocessing Stage\n", "import pandas as pd\n", "import numpy as np\n", "\n", "# Load the dataset and baseline data\n", "df = pd.read_csv(\"data.csv\")\n", "baseline_df = pd.read_csv(\"baseline.csv\")\n", "\n", "required_cols = [\n", " \"ID\",\n", " \"Age\",\n", " \"Weight\",\n", " \"Height\",\n", " \"AVRR\",\n", " \"SDNN\",\n", " \"RMSSD\",\n", " \"PNN50\",\n", " \"Coefficient_of_Variation\",\n", " \"Fatigue_Level\",\n", "]\n", "\n", "numeric_cols = [c for c in required_cols if c not in [\"Fatigue_Level\", \"ID\"]]\n", "for c in numeric_cols:\n", " df[c] = pd.to_numeric(df[c], errors=\"coerce\")\n", "\n", "df[\"ID\"] = pd.to_numeric(df[\"ID\"], errors=\"coerce\")\n", "df[\"Fatigue_Level\"] = pd.to_numeric(df[\"Fatigue_Level\"], errors=\"coerce\")\n", "\n", "reshaped_df = df[required_cols].dropna().copy()\n", "reshaped_df[\"Fatigue_Level\"] = reshaped_df[\"Fatigue_Level\"].astype(int)\n", "reshaped_df[\"ID\"] = reshaped_df[\"ID\"].astype(int)\n", "\n", "# Normalize HRV features using per-athlete baseline (percent change)\n", "# This matches what the backend sends: (value - baseline) / baseline\n", "hrv_features = ['AVRR', 'SDNN', 'RMSSD', 'PNN50', 'Coefficient_of_Variation']\n", "baseline_dict = baseline_df.set_index('ID')[hrv_features].to_dict('index')\n", "\n", "for feature in hrv_features:\n", " reshaped_df[feature] = reshaped_df.apply(\n", " lambda row: (row[feature] - baseline_dict[row['ID']][feature]) / baseline_dict[row['ID']][feature]\n", " if row['ID'] in baseline_dict and baseline_dict[row['ID']][feature] != 0\n", " else 0,\n", " axis=1\n", " )\n", "\n", "print(\"Dataset after baseline normalization (percent change):\")\n", "print(reshaped_df.head(10))\n", "print(reshaped_df.shape)\n", "\n", "# Engineer additional features from HRV percent-change values\n", "reshaped_df['RMSSD_SDNN_ratio'] = reshaped_df['RMSSD'] / (reshaped_df['SDNN'].abs() + 0.001)\n", "reshaped_df['HRV_index'] = (reshaped_df['SDNN'] + reshaped_df['RMSSD']) / 2\n", "reshaped_df['Stress_index'] = reshaped_df['AVRR'] / (reshaped_df['SDNN'].abs() + 0.001)\n", "reshaped_df['Parasympathetic'] = reshaped_df['RMSSD'] * reshaped_df['PNN50']\n", "reshaped_df['AVRR_PNN50'] = reshaped_df['AVRR'] * reshaped_df['PNN50']\n", "reshaped_df['CV_SDNN'] = reshaped_df['Coefficient_of_Variation'] * reshaped_df['SDNN']\n", "reshaped_df['RMSSD_sq'] = reshaped_df['RMSSD'] ** 2\n", "reshaped_df['SDNN_sq'] = reshaped_df['SDNN'] ** 2\n", "\n", "print(f\"\\nEngineered features added. Total columns: {len(reshaped_df.columns)}\")" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Feature Setup" ] }, { "cell_type": "code", "metadata": {}, "source": [ "from sklearn.model_selection import cross_val_predict, StratifiedKFold\n", "from sklearn.preprocessing import StandardScaler\n", "from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, ExtraTreesClassifier\n", "from sklearn.svm import SVC\n", "from sklearn.neighbors import KNeighborsClassifier\n", "from sklearn.linear_model import LogisticRegression\n", "from sklearn.metrics import (\n", " accuracy_score,\n", " precision_score,\n", " recall_score,\n", " f1_score,\n", " mean_absolute_error,\n", " mean_squared_error,\n", " r2_score,\n", " confusion_matrix,\n", " classification_report\n", ")\n", "import copy\n", "\n", "target_col = 'Fatigue_Level'\n", "base_feature_cols = [\n", " 'AVRR', 'SDNN', 'RMSSD', 'PNN50', 'Coefficient_of_Variation',\n", " 'Age', 'Weight', 'Height'\n", "]\n", "engineered_cols = [\n", " 'RMSSD_SDNN_ratio', 'HRV_index', 'Stress_index', 'Parasympathetic',\n", " 'AVRR_PNN50', 'CV_SDNN', 'RMSSD_sq', 'SDNN_sq'\n", "]\n", "feature_cols = base_feature_cols + engineered_cols\n", "\n", "X = reshaped_df[feature_cols]\n", "y = reshaped_df[target_col]\n", "\n", "# Train on ALL data (demo mode)\n", "scaler = StandardScaler()\n", "X_scaled = scaler.fit_transform(X)\n", "\n", "print(f\"Total samples: {len(X)}\")\n", "print(f\"Features: {len(feature_cols)}\")\n", "print(f\"Class distribution:\\n{y.value_counts().sort_index()}\")" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Model Comparison (10-Fold Stratified Cross-Validation)\n", "\n", "We compare multiple supervised classifiers using 10-fold stratified CV for reliable accuracy estimates.\n", "Each sample is tested exactly once while trained on the other 90%." ] }, { "cell_type": "code", "metadata": {}, "source": [ "models = {\n", " 'Random Forest': RandomForestClassifier(\n", " n_estimators=300, max_depth=None, min_samples_leaf=1,\n", " random_state=42, class_weight='balanced'\n", " ),\n", " 'Gradient Boosting': GradientBoostingClassifier(\n", " n_estimators=300, max_depth=4, learning_rate=0.1,\n", " subsample=0.8, random_state=42\n", " ),\n", " 'Extra Trees': ExtraTreesClassifier(\n", " n_estimators=300, max_depth=None,\n", " class_weight='balanced', random_state=42\n", " ),\n", " 'SVM (RBF)': SVC(\n", " kernel='rbf', C=100.0, gamma='scale',\n", " probability=True, random_state=42\n", " ),\n", " 'KNN': KNeighborsClassifier(n_neighbors=3, weights='distance'),\n", " 'Logistic Regression': LogisticRegression(\n", " max_iter=5000, class_weight='balanced', random_state=42\n", " ),\n", "}\n", "\n", "cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)\n", "\n", "print(\"=\" * 70)\n", "print(f\"MODEL PERFORMANCE (10-Fold Stratified CV, {len(X)} samples)\")\n", "print(\"=\" * 70)\n", "print(f\"{'Model':<25} {'Accuracy':>10} {'Precision':>10} {'Recall':>10} {'F1-Score':>10}\")\n", "print(\"-\" * 70)\n", "\n", "cv_results = {}\n", "for name, m in models.items():\n", " m_cv = copy.deepcopy(m)\n", " y_pred = cross_val_predict(m_cv, X_scaled, y, cv=cv)\n", " acc = accuracy_score(y, y_pred)\n", " prec = precision_score(y, y_pred, average='weighted', zero_division=0)\n", " rec = recall_score(y, y_pred, average='weighted', zero_division=0)\n", " f1_val = f1_score(y, y_pred, average='weighted', zero_division=0)\n", " cv_results[name] = {'accuracy': acc, 'precision': prec, 'recall': rec, 'f1': f1_val}\n", " print(f\"{name:<25} {acc:>10.4f} {prec:>10.4f} {rec:>10.4f} {f1_val:>10.4f}\")\n", "\n", "print(\"=\" * 70)\n", "\n", "best_model_name = 'Random Forest'\n", "print(f\"\\nBest CV model: {best_model_name} at {cv_results[best_model_name]['accuracy']:.1%}\")" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Visualize Model Comparison" ] }, { "cell_type": "code", "metadata": {}, "source": [ "import matplotlib.pyplot as plt\n", "\n", "cv_model_names = list(cv_results.keys())\n", "cv_accuracies = [cv_results[m]['accuracy'] for m in cv_model_names]\n", "cv_f1_scores = [cv_results[m]['f1'] for m in cv_model_names]\n", "\n", "x_cv = np.arange(len(cv_model_names))\n", "width = 0.35\n", "\n", "fig, ax = plt.subplots(figsize=(12, 6))\n", "bars1 = ax.bar(x_cv - width/2, cv_accuracies, width, label='Accuracy', color='steelblue')\n", "bars2 = ax.bar(x_cv + width/2, cv_f1_scores, width, label='F1 Score', color='coral')\n", "\n", "for bar in bars1:\n", " ax.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.01,\n", " f'{bar.get_height():.1%}', ha='center', va='bottom', fontsize=9)\n", "for bar in bars2:\n", " ax.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.01,\n", " f'{bar.get_height():.1%}', ha='center', va='bottom', fontsize=9)\n", "\n", "ax.set_xlabel('Model')\n", "ax.set_ylabel('Score')\n", "ax.set_title('Model Comparison (10-Fold Stratified Cross-Validation)')\n", "ax.set_xticks(x_cv)\n", "ax.set_xticklabels(cv_model_names, rotation=45, ha='right')\n", "ax.legend()\n", "ax.set_ylim(0, 0.95)\n", "ax.grid(axis='y', alpha=0.3)\n", "\n", "plt.tight_layout()\n", "plt.show()" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Train Final Model on ALL Data (Demo Mode)" ] }, { "cell_type": "code", "metadata": {}, "source": [ "model = RandomForestClassifier(\n", " n_estimators=300, max_depth=None, min_samples_leaf=1,\n", " random_state=42, class_weight='balanced'\n", ")\n", "model.fit(X_scaled, y)\n", "\n", "train_preds = model.predict(X_scaled)\n", "train_acc = accuracy_score(y, train_preds)\n", "print(f\"Training accuracy (all data): {train_acc:.4f}\")\n", "\n", "if hasattr(model, 'feature_importances_'):\n", " print(\"\\nFeature Importance:\")\n", " feature_importance = pd.DataFrame({\n", " 'feature': feature_cols,\n", " 'importance': model.feature_importances_\n", " }).sort_values('importance', ascending=False)\n", " print(feature_importance.to_string(index=False))" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Feature Importance Visualization" ] }, { "cell_type": "code", "metadata": {}, "source": [ "if hasattr(model, 'feature_importances_'):\n", " fig, ax = plt.subplots(figsize=(10, 6))\n", " feature_importance_sorted = feature_importance.sort_values('importance', ascending=True)\n", " ax.barh(feature_importance_sorted['feature'], feature_importance_sorted['importance'])\n", " ax.set_xlabel('Importance')\n", " ax.set_ylabel('Feature')\n", " ax.set_title('Feature Importance (Random Forest)')\n", " plt.tight_layout()\n", " plt.show()" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Confusion Matrix" ] }, { "cell_type": "code", "metadata": {}, "source": [ "conf_matrix = confusion_matrix(y, train_preds)\n", "print(f\"Confusion Matrix (training data):\")\n", "print(conf_matrix)\n", "print(f\"\\n{classification_report(y, train_preds, zero_division=0)}\")" ], "execution_count": null, "outputs": [] }, { "cell_type": "code", "metadata": {}, "source": [ "import seaborn as sns\n", "\n", "fig, ax = plt.subplots(figsize=(8, 6))\n", "sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', ax=ax,\n", " xticklabels=[1,2,3,4,5],\n", " yticklabels=[1,2,3,4,5])\n", "ax.set_xlabel('Predicted Fatigue Level')\n", "ax.set_ylabel('True Fatigue Level')\n", "ax.set_title(f'Confusion Matrix - {best_model_name} (Training Data)')\n", "plt.tight_layout()\n", "plt.show()" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Prediction Function for Live Data" ] }, { "cell_type": "code", "metadata": {}, "source": [ "def _build_feature_row(raw_data):\n", " \"\"\"Compute engineered features from base HRV percent-change values.\"\"\"\n", " row = dict(raw_data)\n", " row['RMSSD_SDNN_ratio'] = row['RMSSD'] / (abs(row['SDNN']) + 0.001)\n", " row['HRV_index'] = (row['SDNN'] + row['RMSSD']) / 2\n", " row['Stress_index'] = row['AVRR'] / (abs(row['SDNN']) + 0.001)\n", " row['Parasympathetic'] = row['RMSSD'] * row['PNN50']\n", " row['AVRR_PNN50'] = row['AVRR'] * row['PNN50']\n", " row['CV_SDNN'] = row['Coefficient_of_Variation'] * row['SDNN']\n", " row['RMSSD_sq'] = row['RMSSD'] ** 2\n", " row['SDNN_sq'] = row['SDNN'] ** 2\n", " return row\n", "\n", "\n", "def predict_fatigue_level(new_data_row, scaler, model):\n", " row = _build_feature_row(new_data_row)\n", " input_df = pd.DataFrame([row])[feature_cols]\n", " scaled_input = scaler.transform(input_df)\n", " prediction = model.predict(scaled_input)[0]\n", " return int(prediction)\n", "\n", "\n", "def predict_fatigue_with_confidence(new_data_row, scaler, model):\n", " row = _build_feature_row(new_data_row)\n", " input_df = pd.DataFrame([row])[feature_cols]\n", " scaled_input = scaler.transform(input_df)\n", " prediction = model.predict(scaled_input)[0]\n", "\n", " if hasattr(model, 'predict_proba'):\n", " proba = model.predict_proba(scaled_input)[0]\n", " classes = model.classes_\n", " prob_dict = {int(cls): float(prob) for cls, prob in zip(classes, proba)}\n", " else:\n", " prob_dict = {int(prediction): 1.0}\n", "\n", " return int(prediction), prob_dict" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Save Model Artifacts" ] }, { "cell_type": "code", "metadata": {}, "source": [ "import joblib\n", "import json\n", "\n", "joblib.dump(scaler, \"scaler.joblib\")\n", "joblib.dump(model, \"fatigue_model.joblib\")\n", "\n", "cv_acc = cv_results[best_model_name]['accuracy']\n", "cv_f1 = cv_results[best_model_name]['f1']\n", "model_metadata = {\n", " 'model_type': best_model_name,\n", " 'feature_cols': feature_cols,\n", " 'target_col': target_col,\n", " 'cv_accuracy': float(cv_acc),\n", " 'cv_f1_score': float(cv_f1),\n", " 'training_accuracy': float(train_acc),\n", " 'n_samples': len(X),\n", " 'training_mode': 'all_data_demo'\n", "}\n", "\n", "with open(\"model_metadata.json\", \"w\") as f:\n", " json.dump(model_metadata, f, indent=2)\n", "\n", "print(\"Model artifacts saved:\")\n", "print(\" - scaler.joblib\")\n", "print(\" - fatigue_model.joblib\")\n", "print(\" - model_metadata.json\")" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Test Predictions with Sample Data" ] }, { "cell_type": "code", "metadata": {}, "source": [ "print(\"=\" * 60)\n", "print(\"SAMPLE PREDICTIONS\")\n", "print(\"=\" * 60)\n", "\n", "new_data = {\n", " \"AVRR\": 0.05,\n", " \"SDNN\": 0.10,\n", " \"RMSSD\": 0.15,\n", " \"PNN50\": 0.10,\n", " \"Coefficient_of_Variation\": 0.05,\n", " \"Age\": 22,\n", " \"Height\": 183,\n", " \"Weight\": 180\n", "}\n", "\n", "pred, probs = predict_fatigue_with_confidence(new_data, scaler, model)\n", "print(f\"\\nTest 1 - Near baseline (expected low fatigue):\")\n", "print(f\" Predicted Fatigue Level: {pred}\")\n", "print(f\" Confidence scores: {probs}\")\n", "\n", "new_data = {\n", " \"AVRR\": -0.20,\n", " \"SDNN\": -0.15,\n", " \"RMSSD\": -0.10,\n", " \"PNN50\": -0.30,\n", " \"Coefficient_of_Variation\": 0.10,\n", " \"Age\": 22,\n", " \"Height\": 183,\n", " \"Weight\": 180\n", "}\n", "\n", "pred, probs = predict_fatigue_with_confidence(new_data, scaler, model)\n", "print(f\"\\nTest 2 - Moderate decline from baseline:\")\n", "print(f\" Predicted Fatigue Level: {pred}\")\n", "print(f\" Confidence scores: {probs}\")\n", "\n", "new_data = {\n", " \"AVRR\": -0.40,\n", " \"SDNN\": -0.50,\n", " \"RMSSD\": -0.45,\n", " \"PNN50\": -0.60,\n", " \"Coefficient_of_Variation\": -0.20,\n", " \"Age\": 21,\n", " \"Height\": 163,\n", " \"Weight\": 97\n", "}\n", "\n", "pred, probs = predict_fatigue_with_confidence(new_data, scaler, model)\n", "print(f\"\\nTest 3 - Significant decline from baseline:\")\n", "print(f\" Predicted Fatigue Level: {pred}\")\n", "print(f\" Confidence scores: {probs}\")" ], "execution_count": null, "outputs": [] } ], "metadata": { "kernelspec": { "display_name": "StaminaSense (venv)", "language": "python", "name": "staminasense" }, "language_info": { "name": "python", "version": "3.13.0" } }, "nbformat": 4, "nbformat_minor": 4 }