Spaces:
Running
Running
Commit ·
c095e08
0
Parent(s):
Initial commit: NBA Sage Predictor for Hugging Face Spaces (with LFS for large files)
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .dockerignore +67 -0
- .gitattributes +3 -0
- .gitignore +64 -0
- Dockerfile +71 -0
- README.md +33 -0
- api/__init__.py +3 -0
- api/api.py +393 -0
- data/processed/game_dataset.joblib +3 -0
- data/processed/game_features.parquet +3 -0
- models/game_predictor.joblib +3 -0
- process.md +254 -0
- requirements.txt +21 -0
- server.py +341 -0
- src/__init__.py +1 -0
- src/auto_trainer.py +278 -0
- src/config.py +154 -0
- src/continuous_learner.py +306 -0
- src/data_collector.py +649 -0
- src/feature_engineering.py +695 -0
- src/injury_collector.py +224 -0
- src/live_data_collector.py +235 -0
- src/models/__init__.py +1 -0
- src/models/championship_predictor.py +237 -0
- src/models/game_predictor.py +331 -0
- src/models/mvp_predictor.py +257 -0
- src/prediction_pipeline.py +636 -0
- src/prediction_tracker.py +507 -0
- src/preprocessing.py +292 -0
- src/visualization.py +539 -0
- web/.gitignore +24 -0
- web/README.md +16 -0
- web/eslint.config.js +29 -0
- web/index.html +16 -0
- web/package.json +27 -0
- web/public/vite.svg +1 -0
- web/src/App.css +42 -0
- web/src/App.jsx +156 -0
- web/src/api.js +87 -0
- web/src/assets/react.svg +1 -0
- web/src/icons.jsx +162 -0
- web/src/index.css +1378 -0
- web/src/main.jsx +10 -0
- web/src/pages/Accuracy.jsx +297 -0
- web/src/pages/Championship.jsx +120 -0
- web/src/pages/HeadToHead.jsx +196 -0
- web/src/pages/LiveGames.jsx +320 -0
- web/src/pages/MvpRace.jsx +144 -0
- web/src/pages/PlayerStats.jsx +142 -0
- web/src/pages/Predictions.jsx +176 -0
- web/src/pages/Standings.jsx +115 -0
.dockerignore
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Virtual environments
|
| 2 |
+
venv/
|
| 3 |
+
.venv/
|
| 4 |
+
env/
|
| 5 |
+
.env/
|
| 6 |
+
|
| 7 |
+
# Python cache
|
| 8 |
+
__pycache__/
|
| 9 |
+
*.py[cod]
|
| 10 |
+
*$py.class
|
| 11 |
+
*.so
|
| 12 |
+
|
| 13 |
+
# Node modules (will be rebuilt in container)
|
| 14 |
+
node_modules/
|
| 15 |
+
web/node_modules/
|
| 16 |
+
|
| 17 |
+
# Build outputs (will be rebuilt)
|
| 18 |
+
web/dist/
|
| 19 |
+
dist/
|
| 20 |
+
build/
|
| 21 |
+
|
| 22 |
+
# IDE
|
| 23 |
+
.vscode/
|
| 24 |
+
.idea/
|
| 25 |
+
*.swp
|
| 26 |
+
*.swo
|
| 27 |
+
|
| 28 |
+
# OS
|
| 29 |
+
.DS_Store
|
| 30 |
+
Thumbs.db
|
| 31 |
+
|
| 32 |
+
# Git
|
| 33 |
+
.git/
|
| 34 |
+
.gitignore
|
| 35 |
+
|
| 36 |
+
# Logs
|
| 37 |
+
*.log
|
| 38 |
+
logs/
|
| 39 |
+
|
| 40 |
+
# Test files
|
| 41 |
+
tests/
|
| 42 |
+
test_*.py
|
| 43 |
+
*_test.py
|
| 44 |
+
|
| 45 |
+
# Jupyter
|
| 46 |
+
.ipynb_checkpoints/
|
| 47 |
+
*.ipynb
|
| 48 |
+
|
| 49 |
+
# Local config
|
| 50 |
+
.env
|
| 51 |
+
.env.local
|
| 52 |
+
*.local
|
| 53 |
+
|
| 54 |
+
# Graphs and temporary files
|
| 55 |
+
graphs/
|
| 56 |
+
|
| 57 |
+
# App folder (Streamlit - not needed for this deployment)
|
| 58 |
+
app/
|
| 59 |
+
|
| 60 |
+
# Large raw data files not needed at runtime
|
| 61 |
+
data/games_details.csv
|
| 62 |
+
data/nba_games.csv
|
| 63 |
+
data/players.csv
|
| 64 |
+
data/ranking.csv
|
| 65 |
+
data/teams.csv
|
| 66 |
+
data/raw/
|
| 67 |
+
data/api_data/
|
.gitattributes
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
data/api_data/** !text !filter !merge !diff
|
.gitignore
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Virtual environments
|
| 2 |
+
venv/
|
| 3 |
+
.venv/
|
| 4 |
+
env/
|
| 5 |
+
.env/
|
| 6 |
+
|
| 7 |
+
# Python cache
|
| 8 |
+
__pycache__/
|
| 9 |
+
*.py[cod]
|
| 10 |
+
*$py.class
|
| 11 |
+
*.so
|
| 12 |
+
|
| 13 |
+
# Node modules
|
| 14 |
+
node_modules/
|
| 15 |
+
|
| 16 |
+
# Build outputs
|
| 17 |
+
web/dist/
|
| 18 |
+
dist/
|
| 19 |
+
build/
|
| 20 |
+
|
| 21 |
+
# IDE
|
| 22 |
+
.vscode/
|
| 23 |
+
.idea/
|
| 24 |
+
*.swp
|
| 25 |
+
*.swo
|
| 26 |
+
|
| 27 |
+
# OS
|
| 28 |
+
.DS_Store
|
| 29 |
+
Thumbs.db
|
| 30 |
+
|
| 31 |
+
# Logs
|
| 32 |
+
*.log
|
| 33 |
+
logs/
|
| 34 |
+
|
| 35 |
+
# Test files
|
| 36 |
+
tests/
|
| 37 |
+
test_*.py
|
| 38 |
+
*_test.py
|
| 39 |
+
|
| 40 |
+
# Jupyter
|
| 41 |
+
.ipynb_checkpoints/
|
| 42 |
+
*.ipynb
|
| 43 |
+
|
| 44 |
+
# Local config
|
| 45 |
+
.env.local
|
| 46 |
+
*.local
|
| 47 |
+
|
| 48 |
+
# Graphs
|
| 49 |
+
graphs/
|
| 50 |
+
|
| 51 |
+
# Large data files - keep only processed and models
|
| 52 |
+
data/games_details.csv
|
| 53 |
+
data/nba_games.csv
|
| 54 |
+
data/players.csv
|
| 55 |
+
data/ranking.csv
|
| 56 |
+
data/teams.csv
|
| 57 |
+
data/raw/
|
| 58 |
+
data/api_data/
|
| 59 |
+
|
| 60 |
+
# App folder (optional Streamlit)
|
| 61 |
+
app/
|
| 62 |
+
|
| 63 |
+
# Package lock (let npm generate fresh)
|
| 64 |
+
web/package-lock.json
|
Dockerfile
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# syntax=docker/dockerfile:1
|
| 2 |
+
|
| 3 |
+
# ============================================================================
|
| 4 |
+
# NBA Sage Predictor - Hugging Face Spaces Docker Image
|
| 5 |
+
# ============================================================================
|
| 6 |
+
|
| 7 |
+
FROM python:3.11-slim
|
| 8 |
+
|
| 9 |
+
# Set environment variables
|
| 10 |
+
ENV PYTHONDONTWRITEBYTECODE=1
|
| 11 |
+
ENV PYTHONUNBUFFERED=1
|
| 12 |
+
ENV PORT=7860
|
| 13 |
+
|
| 14 |
+
# Install system dependencies
|
| 15 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 16 |
+
curl \
|
| 17 |
+
git \
|
| 18 |
+
build-essential \
|
| 19 |
+
&& curl -fsSL https://deb.nodesource.com/setup_20.x | bash - \
|
| 20 |
+
&& apt-get install -y nodejs \
|
| 21 |
+
&& apt-get clean \
|
| 22 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 23 |
+
|
| 24 |
+
# Create app directory
|
| 25 |
+
WORKDIR /app
|
| 26 |
+
|
| 27 |
+
# Copy requirements first for better caching
|
| 28 |
+
COPY requirements.txt .
|
| 29 |
+
|
| 30 |
+
# Install Python dependencies
|
| 31 |
+
RUN pip install --no-cache-dir --upgrade pip && \
|
| 32 |
+
pip install --no-cache-dir -r requirements.txt
|
| 33 |
+
|
| 34 |
+
# Copy the web frontend package files
|
| 35 |
+
COPY web/package*.json ./web/
|
| 36 |
+
WORKDIR /app/web
|
| 37 |
+
|
| 38 |
+
# Install Node dependencies
|
| 39 |
+
RUN npm ci
|
| 40 |
+
|
| 41 |
+
# Copy web source files and build
|
| 42 |
+
COPY web/ .
|
| 43 |
+
RUN npm run build
|
| 44 |
+
|
| 45 |
+
# Move back to app root
|
| 46 |
+
WORKDIR /app
|
| 47 |
+
|
| 48 |
+
# Copy built frontend to static folder
|
| 49 |
+
RUN mkdir -p static && cp -r web/dist/* static/
|
| 50 |
+
|
| 51 |
+
# Copy Python source code
|
| 52 |
+
COPY src/ ./src/
|
| 53 |
+
COPY api/ ./api/
|
| 54 |
+
COPY server.py .
|
| 55 |
+
|
| 56 |
+
# Copy data and models
|
| 57 |
+
COPY data/processed/ ./data/processed/
|
| 58 |
+
COPY models/ ./models/
|
| 59 |
+
|
| 60 |
+
# Create data directories for runtime
|
| 61 |
+
RUN mkdir -p data/predictions data/injuries
|
| 62 |
+
|
| 63 |
+
# Expose port
|
| 64 |
+
EXPOSE 7860
|
| 65 |
+
|
| 66 |
+
# Health check
|
| 67 |
+
HEALTHCHECK --interval=30s --timeout=30s --start-period=120s --retries=3 \
|
| 68 |
+
CMD curl -f http://localhost:7860/api/health || exit 1
|
| 69 |
+
|
| 70 |
+
# Run the production server
|
| 71 |
+
CMD ["python", "server.py"]
|
README.md
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: NBA Sage Predictor
|
| 3 |
+
emoji: 🏀
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: yellow
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: true
|
| 8 |
+
license: mit
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# NBA Sage - ML Prediction System
|
| 12 |
+
|
| 13 |
+
An advanced NBA game prediction system using machine learning and ELO ratings.
|
| 14 |
+
|
| 15 |
+
## Features
|
| 16 |
+
|
| 17 |
+
- 🎮 **Live Game Tracking** - Real-time scores and predictions
|
| 18 |
+
- 🎯 **Game Predictions** - ML-powered win probability predictions
|
| 19 |
+
- 📊 **Model Accuracy** - Track prediction performance
|
| 20 |
+
- 🏆 **MVP Race** - Current MVP candidate rankings
|
| 21 |
+
- 👑 **Championship Odds** - Team championship probability rankings
|
| 22 |
+
- ⚔️ **Head to Head** - Compare any two teams
|
| 23 |
+
|
| 24 |
+
## Tech Stack
|
| 25 |
+
|
| 26 |
+
- **Frontend**: React + Vite
|
| 27 |
+
- **Backend**: Flask API
|
| 28 |
+
- **ML**: XGBoost, LightGBM, ELO Rating System
|
| 29 |
+
- **Data**: NBA API
|
| 30 |
+
|
| 31 |
+
## Author
|
| 32 |
+
|
| 33 |
+
Built with ❤️ for NBA fans and data enthusiasts.
|
api/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
API package for NBA ML Predictor
|
| 3 |
+
"""
|
api/api.py
ADDED
|
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NBA ML Prediction System - Flask Backend
|
| 3 |
+
=========================================
|
| 4 |
+
REST API for the React frontend.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from flask import Flask, jsonify, request
|
| 8 |
+
from flask_cors import CORS
|
| 9 |
+
import sys
|
| 10 |
+
import logging
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
|
| 13 |
+
# Configure logging to reduce verbosity
|
| 14 |
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
| 15 |
+
logging.getLogger("src.injury_collector").setLevel(logging.WARNING)
|
| 16 |
+
logging.getLogger("src.prediction_tracker").setLevel(logging.WARNING)
|
| 17 |
+
logging.getLogger("chromadb").setLevel(logging.WARNING)
|
| 18 |
+
|
| 19 |
+
# Add project root to path
|
| 20 |
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
| 21 |
+
|
| 22 |
+
from src.prediction_pipeline import PredictionPipeline
|
| 23 |
+
|
| 24 |
+
# Initialize Flask app
|
| 25 |
+
app = Flask(__name__)
|
| 26 |
+
CORS(app, origins=["http://localhost:5173", "http://localhost:3000", "http://127.0.0.1:5173"])
|
| 27 |
+
|
| 28 |
+
# Initialize prediction pipeline (ELO ratings loaded on startup)
|
| 29 |
+
print("Initializing prediction pipeline...")
|
| 30 |
+
pipeline = PredictionPipeline()
|
| 31 |
+
print("Pipeline ready!")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@app.route("/api/health")
|
| 35 |
+
def health_check():
|
| 36 |
+
"""Health check endpoint."""
|
| 37 |
+
return jsonify({"status": "healthy", "pipeline_ready": pipeline is not None})
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@app.route("/api/games/live")
|
| 41 |
+
def get_live_games():
|
| 42 |
+
"""Get today's games with live scores and predictions."""
|
| 43 |
+
games = pipeline.get_games_with_predictions()
|
| 44 |
+
|
| 45 |
+
# Process each game - save predictions for upcoming, update results for completed
|
| 46 |
+
for game in games:
|
| 47 |
+
status = game.get("status")
|
| 48 |
+
game_id = game.get("game_id")
|
| 49 |
+
pred = game.get("prediction", {})
|
| 50 |
+
|
| 51 |
+
if game_id and pred:
|
| 52 |
+
if status == "NOT_STARTED":
|
| 53 |
+
# Only save if prediction doesn't already exist
|
| 54 |
+
existing = pipeline.prediction_tracker.get_prediction(game_id)
|
| 55 |
+
if not existing:
|
| 56 |
+
pipeline.prediction_tracker.save_prediction(game_id, {
|
| 57 |
+
"game_date": game.get("game_date"),
|
| 58 |
+
"home_team": game.get("home_team"),
|
| 59 |
+
"away_team": game.get("away_team"),
|
| 60 |
+
"predicted_winner": pred.get("predicted_winner"),
|
| 61 |
+
"home_win_probability": pred.get("home_win_probability"),
|
| 62 |
+
"away_win_probability": pred.get("away_win_probability"),
|
| 63 |
+
"confidence": pred.get("confidence"),
|
| 64 |
+
"home_elo": pred.get("home_elo"),
|
| 65 |
+
"away_elo": pred.get("away_elo"),
|
| 66 |
+
})
|
| 67 |
+
elif status == "FINAL":
|
| 68 |
+
# Update result for completed game
|
| 69 |
+
home_score = game.get("home_score", 0)
|
| 70 |
+
away_score = game.get("away_score", 0)
|
| 71 |
+
actual_winner = game.get("home_team") if home_score > away_score else game.get("away_team")
|
| 72 |
+
|
| 73 |
+
# Check if prediction exists, if not save it first
|
| 74 |
+
existing = pipeline.prediction_tracker.get_prediction(game_id)
|
| 75 |
+
if not existing:
|
| 76 |
+
# Save prediction first (for games completed before tracking started)
|
| 77 |
+
pipeline.prediction_tracker.save_prediction(game_id, {
|
| 78 |
+
"game_date": game.get("game_date"),
|
| 79 |
+
"home_team": game.get("home_team"),
|
| 80 |
+
"away_team": game.get("away_team"),
|
| 81 |
+
"predicted_winner": pred.get("predicted_winner"),
|
| 82 |
+
"home_win_probability": pred.get("home_win_probability"),
|
| 83 |
+
"away_win_probability": pred.get("away_win_probability"),
|
| 84 |
+
"confidence": pred.get("confidence"),
|
| 85 |
+
"home_elo": pred.get("home_elo"),
|
| 86 |
+
"away_elo": pred.get("away_elo"),
|
| 87 |
+
})
|
| 88 |
+
|
| 89 |
+
# Now update with result
|
| 90 |
+
pipeline.prediction_tracker.update_result(
|
| 91 |
+
game_id,
|
| 92 |
+
actual_winner,
|
| 93 |
+
home_score,
|
| 94 |
+
away_score
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
# Mark whether our prediction was correct
|
| 98 |
+
game["prediction_correct"] = pred.get("predicted_winner") == actual_winner
|
| 99 |
+
|
| 100 |
+
# Separate by status
|
| 101 |
+
return jsonify({
|
| 102 |
+
"live": [g for g in games if g.get("status") == "IN_PROGRESS"],
|
| 103 |
+
"final": [g for g in games if g.get("status") == "FINAL"],
|
| 104 |
+
"upcoming": [g for g in games if g.get("status") == "NOT_STARTED"],
|
| 105 |
+
"total": len(games)
|
| 106 |
+
})
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
@app.route("/api/games/upcoming")
|
| 110 |
+
def get_upcoming_games():
|
| 111 |
+
"""Get upcoming games for the next N days."""
|
| 112 |
+
days = request.args.get("days", 7, type=int)
|
| 113 |
+
days = max(1, min(days, 14)) # Clamp between 1-14
|
| 114 |
+
|
| 115 |
+
games = pipeline.get_upcoming_games(days_ahead=days)
|
| 116 |
+
|
| 117 |
+
# Add predictions to each game
|
| 118 |
+
enriched_games = []
|
| 119 |
+
for game in games:
|
| 120 |
+
pred = pipeline.predict_game(game["home_team"], game["away_team"])
|
| 121 |
+
enriched_games.append({
|
| 122 |
+
**game,
|
| 123 |
+
"prediction": pred
|
| 124 |
+
})
|
| 125 |
+
|
| 126 |
+
return jsonify({"games": enriched_games, "count": len(enriched_games)})
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
@app.route("/api/predict")
|
| 130 |
+
def predict_game():
|
| 131 |
+
"""Predict outcome for a single game."""
|
| 132 |
+
home = request.args.get("home", "").upper()
|
| 133 |
+
away = request.args.get("away", "").upper()
|
| 134 |
+
|
| 135 |
+
if not home or not away:
|
| 136 |
+
return jsonify({"error": "Missing home or away team parameter"}), 400
|
| 137 |
+
|
| 138 |
+
prediction = pipeline.predict_game(home, away)
|
| 139 |
+
return jsonify(prediction)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
@app.route("/api/accuracy")
|
| 143 |
+
def get_accuracy():
|
| 144 |
+
"""Get comprehensive model accuracy statistics."""
|
| 145 |
+
stats = pipeline.get_accuracy_stats()
|
| 146 |
+
recent = pipeline.get_recent_predictions(50) # Get more for analysis
|
| 147 |
+
|
| 148 |
+
# Calculate additional metrics
|
| 149 |
+
completed = [p for p in recent if p.get("is_correct", -1) >= 0]
|
| 150 |
+
correct = [p for p in completed if p.get("is_correct") == 1]
|
| 151 |
+
|
| 152 |
+
# Home vs Away accuracy
|
| 153 |
+
home_picks = [p for p in completed if p.get("predicted_winner") == p.get("home_team")]
|
| 154 |
+
home_correct = [p for p in home_picks if p.get("is_correct") == 1]
|
| 155 |
+
away_picks = [p for p in completed if p.get("predicted_winner") == p.get("away_team")]
|
| 156 |
+
away_correct = [p for p in away_picks if p.get("is_correct") == 1]
|
| 157 |
+
|
| 158 |
+
# Current streak
|
| 159 |
+
streak = 0
|
| 160 |
+
streak_type = None
|
| 161 |
+
for p in sorted(completed, key=lambda x: x.get("updated_at", ""), reverse=True):
|
| 162 |
+
if streak_type is None:
|
| 163 |
+
streak_type = "W" if p.get("is_correct") == 1 else "L"
|
| 164 |
+
if (p.get("is_correct") == 1 and streak_type == "W") or (p.get("is_correct") == 0 and streak_type == "L"):
|
| 165 |
+
streak += 1
|
| 166 |
+
else:
|
| 167 |
+
break
|
| 168 |
+
|
| 169 |
+
# Last 10 games
|
| 170 |
+
last_10 = completed[:10] if len(completed) >= 10 else completed
|
| 171 |
+
last_10_correct = sum(1 for p in last_10 if p.get("is_correct") == 1)
|
| 172 |
+
|
| 173 |
+
# Average win probability for correct vs incorrect predictions
|
| 174 |
+
correct_avg_prob = sum(max(p.get("home_win_prob", 0.5), p.get("away_win_prob", 0.5)) for p in correct) / len(correct) if correct else 0
|
| 175 |
+
incorrect = [p for p in completed if p.get("is_correct") == 0]
|
| 176 |
+
incorrect_avg_prob = sum(max(p.get("home_win_prob", 0.5), p.get("away_win_prob", 0.5)) for p in incorrect) / len(incorrect) if incorrect else 0
|
| 177 |
+
|
| 178 |
+
# Build enhanced response
|
| 179 |
+
enhanced_stats = {
|
| 180 |
+
**stats,
|
| 181 |
+
"home_pick_accuracy": len(home_correct) / len(home_picks) if home_picks else 0,
|
| 182 |
+
"away_pick_accuracy": len(away_correct) / len(away_picks) if away_picks else 0,
|
| 183 |
+
"home_picks_total": len(home_picks),
|
| 184 |
+
"away_picks_total": len(away_picks),
|
| 185 |
+
"current_streak": streak,
|
| 186 |
+
"streak_type": streak_type or "N/A",
|
| 187 |
+
"last_10_record": f"{last_10_correct}-{len(last_10) - last_10_correct}",
|
| 188 |
+
"last_10_accuracy": last_10_correct / len(last_10) if last_10 else 0,
|
| 189 |
+
"avg_probability_correct": correct_avg_prob,
|
| 190 |
+
"avg_probability_incorrect": incorrect_avg_prob,
|
| 191 |
+
"pending_predictions": len([p for p in recent if p.get("is_correct", -1) == -1]),
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
return jsonify({
|
| 195 |
+
"stats": enhanced_stats,
|
| 196 |
+
"recent_predictions": recent[:20] # Return 20 most recent for display
|
| 197 |
+
})
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
@app.route("/api/mvp")
|
| 201 |
+
def get_mvp_race():
|
| 202 |
+
"""Get current MVP race standings."""
|
| 203 |
+
mvp_df = pipeline.get_mvp_race()
|
| 204 |
+
|
| 205 |
+
# Convert DataFrame to list of dicts
|
| 206 |
+
candidates = []
|
| 207 |
+
for idx, row in mvp_df.iterrows():
|
| 208 |
+
candidates.append({
|
| 209 |
+
"rank": len(candidates) + 1,
|
| 210 |
+
"name": row["PLAYER_NAME"],
|
| 211 |
+
"ppg": round(float(row["PTS"]), 1),
|
| 212 |
+
"rpg": round(float(row["REB"]), 1),
|
| 213 |
+
"apg": round(float(row["AST"]), 1),
|
| 214 |
+
"mvp_score": round(float(row["mvp_score"]), 1),
|
| 215 |
+
"similarity": round(float(row["mvp_similarity"]) * 100, 1)
|
| 216 |
+
})
|
| 217 |
+
|
| 218 |
+
return jsonify({"candidates": candidates})
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
@app.route("/api/championship")
|
| 222 |
+
def get_championship_odds():
|
| 223 |
+
"""Get current championship odds."""
|
| 224 |
+
champ_df = pipeline.get_championship_odds()
|
| 225 |
+
|
| 226 |
+
# Convert DataFrame to list of dicts
|
| 227 |
+
teams = []
|
| 228 |
+
for idx, row in champ_df.iterrows():
|
| 229 |
+
# ChampionshipPredictor returns: TEAM_ABBREVIATION, W_PCT, playoff_experience, strength_rating, champ_probability
|
| 230 |
+
teams.append({
|
| 231 |
+
"rank": len(teams) + 1,
|
| 232 |
+
"team": row.get("TEAM_ABBREVIATION", row.get("Team", "N/A")),
|
| 233 |
+
"odds": round(float(row.get("champ_probability", row.get("Championship_Odds", 0))) * 100, 1),
|
| 234 |
+
"win_pct": round(float(row.get("W_PCT", 0.5)) * 100, 1)
|
| 235 |
+
})
|
| 236 |
+
|
| 237 |
+
return jsonify({"teams": teams})
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
@app.route("/api/teams")
|
| 241 |
+
def get_teams():
|
| 242 |
+
"""Get list of all NBA teams."""
|
| 243 |
+
from src.config import NBA_TEAMS
|
| 244 |
+
|
| 245 |
+
teams = [{"id": tid, "abbrev": abbrev} for tid, abbrev in NBA_TEAMS.items()]
|
| 246 |
+
teams.sort(key=lambda x: x["abbrev"])
|
| 247 |
+
|
| 248 |
+
return jsonify({"teams": teams})
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
@app.route("/api/roster/<team_abbrev>")
|
| 252 |
+
def get_team_roster(team_abbrev):
|
| 253 |
+
"""Get projected starting 5 for a team using LIVE 2025-26 season stats."""
|
| 254 |
+
try:
|
| 255 |
+
from nba_api.stats.endpoints import leaguedashplayerstats
|
| 256 |
+
import time
|
| 257 |
+
|
| 258 |
+
# Fetch current season player stats from NBA API
|
| 259 |
+
time.sleep(0.5) # Rate limiting
|
| 260 |
+
stats = leaguedashplayerstats.LeagueDashPlayerStats(
|
| 261 |
+
season='2025-26',
|
| 262 |
+
per_mode_detailed='PerGame'
|
| 263 |
+
)
|
| 264 |
+
df = stats.get_data_frames()[0]
|
| 265 |
+
|
| 266 |
+
# Filter by team
|
| 267 |
+
team_abbrev = team_abbrev.upper()
|
| 268 |
+
team_players = df[df['TEAM_ABBREVIATION'] == team_abbrev].copy()
|
| 269 |
+
|
| 270 |
+
if team_players.empty:
|
| 271 |
+
return jsonify({"team": team_abbrev, "starters": []})
|
| 272 |
+
|
| 273 |
+
# Sort by minutes played (starters play the most minutes)
|
| 274 |
+
team_players = team_players.sort_values('MIN', ascending=False)
|
| 275 |
+
|
| 276 |
+
# Get top 5 players (projected starters)
|
| 277 |
+
starters = []
|
| 278 |
+
for _, player in team_players.head(5).iterrows():
|
| 279 |
+
starters.append({
|
| 280 |
+
'name': player['PLAYER_NAME'],
|
| 281 |
+
'position': player.get('POSITION', ''),
|
| 282 |
+
'pts': round(float(player['PTS']), 1),
|
| 283 |
+
'reb': round(float(player.get('REB', 0)), 1),
|
| 284 |
+
'ast': round(float(player.get('AST', 0)), 1),
|
| 285 |
+
'min': round(float(player.get('MIN', 0)), 1)
|
| 286 |
+
})
|
| 287 |
+
|
| 288 |
+
return jsonify({"team": team_abbrev, "starters": starters})
|
| 289 |
+
|
| 290 |
+
except Exception as e:
|
| 291 |
+
print(f"Error fetching roster for {team_abbrev}: {e}")
|
| 292 |
+
# Fallback to pipeline method
|
| 293 |
+
roster = pipeline.get_team_roster(team_abbrev.upper())
|
| 294 |
+
return jsonify({"team": team_abbrev.upper(), "starters": roster})
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
@app.route("/api/standings")
|
| 298 |
+
def get_standings():
|
| 299 |
+
"""Get current NBA standings by conference."""
|
| 300 |
+
# Team name to abbreviation mapping for fallback - includes team names, cities, and variants
|
| 301 |
+
TEAM_NAME_TO_ABBREV = {
|
| 302 |
+
# Team nicknames
|
| 303 |
+
"hawks": "ATL", "celtics": "BOS", "nets": "BKN", "hornets": "CHA",
|
| 304 |
+
"bulls": "CHI", "cavaliers": "CLE", "cavs": "CLE", "mavericks": "DAL", "mavs": "DAL",
|
| 305 |
+
"nuggets": "DEN", "pistons": "DET", "warriors": "GSW", "dubs": "GSW",
|
| 306 |
+
"rockets": "HOU", "pacers": "IND", "clippers": "LAC", "lakers": "LAL",
|
| 307 |
+
"grizzlies": "MEM", "heat": "MIA", "bucks": "MIL", "timberwolves": "MIN", "wolves": "MIN",
|
| 308 |
+
"pelicans": "NOP", "pels": "NOP", "knicks": "NYK", "thunder": "OKC",
|
| 309 |
+
"magic": "ORL", "76ers": "PHI", "sixers": "PHI", "suns": "PHX",
|
| 310 |
+
"trail blazers": "POR", "blazers": "POR", "trailblazers": "POR",
|
| 311 |
+
"kings": "SAC", "spurs": "SAS", "raptors": "TOR", "jazz": "UTA", "wizards": "WAS",
|
| 312 |
+
# City names
|
| 313 |
+
"atlanta": "ATL", "boston": "BOS", "brooklyn": "BKN", "charlotte": "CHA",
|
| 314 |
+
"chicago": "CHI", "cleveland": "CLE", "dallas": "DAL", "denver": "DEN",
|
| 315 |
+
"detroit": "DET", "golden state": "GSW", "houston": "HOU", "indiana": "IND",
|
| 316 |
+
"los angeles c": "LAC", "la c": "LAC", "los angeles l": "LAL", "la l": "LAL",
|
| 317 |
+
"memphis": "MEM", "miami": "MIA", "milwaukee": "MIL", "minnesota": "MIN",
|
| 318 |
+
"new orleans": "NOP", "new york": "NYK", "oklahoma city": "OKC", "oklahoma": "OKC",
|
| 319 |
+
"orlando": "ORL", "philadelphia": "PHI", "phoenix": "PHX", "portland": "POR",
|
| 320 |
+
"sacramento": "SAC", "san antonio": "SAS", "toronto": "TOR", "utah": "UTA", "washington": "WAS"
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
def get_abbrev_from_name(team_name):
|
| 324 |
+
"""Extract abbreviation from team name."""
|
| 325 |
+
team_name_lower = team_name.lower().strip()
|
| 326 |
+
for name_part, abbrev in TEAM_NAME_TO_ABBREV.items():
|
| 327 |
+
if name_part in team_name_lower:
|
| 328 |
+
return abbrev
|
| 329 |
+
return ""
|
| 330 |
+
|
| 331 |
+
try:
|
| 332 |
+
from nba_api.stats.endpoints import leaguestandings
|
| 333 |
+
import time
|
| 334 |
+
|
| 335 |
+
time.sleep(0.5)
|
| 336 |
+
standings = leaguestandings.LeagueStandings(season='2025-26')
|
| 337 |
+
df = standings.get_data_frames()[0]
|
| 338 |
+
|
| 339 |
+
# Debug: print column names on first run
|
| 340 |
+
print(f"Standings columns: {list(df.columns)}")
|
| 341 |
+
|
| 342 |
+
east = []
|
| 343 |
+
west = []
|
| 344 |
+
|
| 345 |
+
for _, row in df.iterrows():
|
| 346 |
+
# Try multiple possible column names for team abbreviation
|
| 347 |
+
abbrev = ""
|
| 348 |
+
for col_name in ["TeamSlug", "TeamAbbreviation", "TEAM_ABBREVIATION", "team_abbreviation"]:
|
| 349 |
+
if col_name in row.index and row.get(col_name):
|
| 350 |
+
abbrev = str(row.get(col_name)).upper().strip()
|
| 351 |
+
break
|
| 352 |
+
|
| 353 |
+
# Build team name from city + name
|
| 354 |
+
team_city = str(row.get("TeamCity", ""))
|
| 355 |
+
team_name = str(row.get("TeamName", ""))
|
| 356 |
+
full_team_name = f"{team_city} {team_name}".strip()
|
| 357 |
+
|
| 358 |
+
# If abbreviation still not found, extract from team name
|
| 359 |
+
if not abbrev:
|
| 360 |
+
abbrev = get_abbrev_from_name(full_team_name)
|
| 361 |
+
|
| 362 |
+
team_data = {
|
| 363 |
+
"team": abbrev, # Use abbreviation for frontend TeamLogo component
|
| 364 |
+
"team_abbrev": abbrev, # Duplicate for clarity
|
| 365 |
+
"team_name": full_team_name,
|
| 366 |
+
"wins": int(row.get("WINS", 0)),
|
| 367 |
+
"losses": int(row.get("LOSSES", 0)),
|
| 368 |
+
"win_pct": float(row.get("WinPCT", 0)),
|
| 369 |
+
"gb": str(row.get("ConferenceGamesBack", "-")),
|
| 370 |
+
"streak": str(row.get("strCurrentStreak", "-")),
|
| 371 |
+
"conference": row.get("Conference", ""),
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
if row.get("Conference") == "East":
|
| 375 |
+
east.append(team_data)
|
| 376 |
+
else:
|
| 377 |
+
west.append(team_data)
|
| 378 |
+
|
| 379 |
+
# Sort by wins descending
|
| 380 |
+
east.sort(key=lambda x: (-x["wins"], x["losses"]))
|
| 381 |
+
west.sort(key=lambda x: (-x["wins"], x["losses"]))
|
| 382 |
+
|
| 383 |
+
return jsonify({"east": east, "west": west})
|
| 384 |
+
|
| 385 |
+
except Exception as e:
|
| 386 |
+
print(f"Error fetching standings: {e}")
|
| 387 |
+
import traceback
|
| 388 |
+
traceback.print_exc()
|
| 389 |
+
return jsonify({"east": [], "west": [], "error": str(e)})
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
if __name__ == "__main__":
|
| 393 |
+
app.run(host="0.0.0.0", port=8000, debug=True)
|
data/processed/game_dataset.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:942fe53289804148c25358df751e9bbb0080dd6d72c804ceadf91f7a88310ac4
|
| 3 |
+
size 40839859
|
data/processed/game_features.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0d5f377ae5eede6597cf5c42cfe714ebecfcf9a2765138f1bafb21f69e121222
|
| 3 |
+
size 7078674
|
models/game_predictor.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:98ea35c665375601bf707adaf9a9233078c4a94aff8a5d281b2a16acf2ec3f86
|
| 3 |
+
size 3664164
|
process.md
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NBA ML Prediction System - Process Guide
|
| 2 |
+
|
| 3 |
+
## Prerequisites
|
| 4 |
+
|
| 5 |
+
Before starting, ensure you have:
|
| 6 |
+
- Python 3.10+ installed
|
| 7 |
+
- Virtual environment activated: `.\venv\Scripts\activate`
|
| 8 |
+
- All dependencies installed: `pip install -r requirements.txt`
|
| 9 |
+
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
## Step 1: Collect Training Data (COMPREHENSIVE)
|
| 13 |
+
|
| 14 |
+
**Purpose**: Fetch 10 seasons of ALL NBA stats from the API including:
|
| 15 |
+
- Games, Team Stats, Player Stats (basic)
|
| 16 |
+
- Advanced Metrics (NET_RTG, PACE, PIE, TS%, eFG%)
|
| 17 |
+
- Clutch Stats (performance in close games)
|
| 18 |
+
- Hustle Stats (deflections, charges, loose balls)
|
| 19 |
+
- Defense Stats
|
| 20 |
+
|
| 21 |
+
**File**: `src/data_collector.py`
|
| 22 |
+
|
| 23 |
+
**Command**:
|
| 24 |
+
```bash
|
| 25 |
+
python -m src.data_collector
|
| 26 |
+
```
|
| 27 |
+
|
| 28 |
+
**Duration**: ~2-4 hours (has resume capability if interrupted)
|
| 29 |
+
|
| 30 |
+
**Output Files** (in `data/raw/`):
|
| 31 |
+
- `all_games.parquet` - Game results
|
| 32 |
+
- `all_team_stats.parquet` - Basic team stats
|
| 33 |
+
- `all_team_advanced.parquet` - NET_RTG, PACE, PIE, TS%
|
| 34 |
+
- `all_team_clutch.parquet` - Close game performance
|
| 35 |
+
- `all_team_hustle.parquet` - Deflections, charges
|
| 36 |
+
- `all_team_defense.parquet` - Defensive metrics
|
| 37 |
+
- `all_player_stats.parquet` - Player averages
|
| 38 |
+
- `all_player_advanced.parquet` - PER, USG%, TS%
|
| 39 |
+
- `all_player_clutch.parquet` - Player clutch stats
|
| 40 |
+
- `all_player_hustle.parquet` - Player hustle metrics
|
| 41 |
+
|
| 42 |
+
---
|
| 43 |
+
|
| 44 |
+
## Step 2: Generate Features
|
| 45 |
+
|
| 46 |
+
**Purpose**: Create ~50+ features including ELO, rolling stats, momentum, rest/fatigue
|
| 47 |
+
|
| 48 |
+
**File**: `src/feature_engineering.py`
|
| 49 |
+
|
| 50 |
+
**Command**:
|
| 51 |
+
```bash
|
| 52 |
+
python -m src.feature_engineering --process
|
| 53 |
+
```
|
| 54 |
+
|
| 55 |
+
**Duration**: ~30-60 minutes
|
| 56 |
+
|
| 57 |
+
**Output Files**:
|
| 58 |
+
- `data/processed/game_features.parquet`
|
| 59 |
+
|
| 60 |
+
**Features Generated**:
|
| 61 |
+
- ELO ratings (team_elo, opponent_elo, elo_diff, elo_win_prob)
|
| 62 |
+
- Rolling stats (PTS/AST/REB/FG_PCT last 5/10/20 games)
|
| 63 |
+
- Defensive stats (STL, BLK, DREB rolling)
|
| 64 |
+
- Momentum (wins_last5, hot_streak, cold_streak, plus_minus)
|
| 65 |
+
- Rest/fatigue (days_rest, back_to_back, games_last_week)
|
| 66 |
+
- Season averages (all stats)
|
| 67 |
+
- Team advanced metrics (NET_RTG, PACE, clutch, hustle)
|
| 68 |
+
- Player aggregations (top players avg, star concentration)
|
| 69 |
+
|
| 70 |
+
---
|
| 71 |
+
|
| 72 |
+
## Step 3: Build Dataset
|
| 73 |
+
|
| 74 |
+
**Purpose**: Split data into train/val/test and prepare for training
|
| 75 |
+
|
| 76 |
+
**File**: `src/preprocessing.py`
|
| 77 |
+
|
| 78 |
+
**Command**:
|
| 79 |
+
```bash
|
| 80 |
+
python -m src.preprocessing --build
|
| 81 |
+
```
|
| 82 |
+
|
| 83 |
+
**Output Files**:
|
| 84 |
+
- `data/processed/game_dataset.joblib`
|
| 85 |
+
|
| 86 |
+
**What It Does**:
|
| 87 |
+
- Automatically detects ALL numeric features
|
| 88 |
+
- Splits by season (no data leakage)
|
| 89 |
+
- Scales and imputes missing values
|
| 90 |
+
|
| 91 |
+
---
|
| 92 |
+
|
| 93 |
+
## Step 4: Train Model
|
| 94 |
+
|
| 95 |
+
**Purpose**: Train XGBoost + LightGBM ensemble on ALL features
|
| 96 |
+
|
| 97 |
+
**File**: `src/models/game_predictor.py`
|
| 98 |
+
|
| 99 |
+
**Command**:
|
| 100 |
+
```bash
|
| 101 |
+
python -m src.models.game_predictor --train
|
| 102 |
+
```
|
| 103 |
+
|
| 104 |
+
**Expected Output**:
|
| 105 |
+
```
|
| 106 |
+
Loading dataset...
|
| 107 |
+
Training XGBoost model...
|
| 108 |
+
Training LightGBM model...
|
| 109 |
+
Training complete!
|
| 110 |
+
|
| 111 |
+
=== Test Metrics ===
|
| 112 |
+
Test Accuracy: 0.67XX
|
| 113 |
+
Test Brier Score: 0.21XX
|
| 114 |
+
✓ Target accuracy (>65%) achieved!
|
| 115 |
+
|
| 116 |
+
=== Top Features ===
|
| 117 |
+
feature xgb_importance lgb_importance avg_importance
|
| 118 |
+
0 elo_diff 0.XXX 0.XXX 0.XXX
|
| 119 |
+
1 elo_win_prob 0.XXX 0.XXX 0.XXX
|
| 120 |
+
...
|
| 121 |
+
|
| 122 |
+
Saved model to models/game_predictor.joblib
|
| 123 |
+
```
|
| 124 |
+
|
| 125 |
+
**Output Files**:
|
| 126 |
+
- `models/game_predictor.joblib`
|
| 127 |
+
|
| 128 |
+
---
|
| 129 |
+
|
| 130 |
+
## Step 5: Generate Visualizations
|
| 131 |
+
|
| 132 |
+
**Purpose**: Create analysis charts saved to `graphs/`
|
| 133 |
+
|
| 134 |
+
**File**: `src/visualization.py`
|
| 135 |
+
|
| 136 |
+
**Command**:
|
| 137 |
+
```bash
|
| 138 |
+
python -m src.visualization
|
| 139 |
+
```
|
| 140 |
+
|
| 141 |
+
**Output Files** (in `graphs/`):
|
| 142 |
+
- `mvp_race.png`
|
| 143 |
+
- `mvp_stat_comparison.png`
|
| 144 |
+
- `championship_odds_pie.png`
|
| 145 |
+
- `strength_vs_experience.png`
|
| 146 |
+
|
| 147 |
+
---
|
| 148 |
+
|
| 149 |
+
## Step 6: Run the Dashboard
|
| 150 |
+
|
| 151 |
+
**Purpose**: Launch Streamlit web interface
|
| 152 |
+
|
| 153 |
+
**File**: `app/app.py`
|
| 154 |
+
|
| 155 |
+
**Command**:
|
| 156 |
+
```bash
|
| 157 |
+
streamlit run app/app.py
|
| 158 |
+
```
|
| 159 |
+
|
| 160 |
+
**Opens**: `http://localhost:8501`
|
| 161 |
+
|
| 162 |
+
**Pages**:
|
| 163 |
+
- 🔴 Live Games - Real-time scores with predictions
|
| 164 |
+
- 🎮 Game Predictions - Predict any matchup
|
| 165 |
+
- 📈 Model Accuracy - Track prediction accuracy
|
| 166 |
+
- 🏆 MVP Race - Top candidates
|
| 167 |
+
- 👑 Championship Odds - Team probabilities
|
| 168 |
+
- 📊 Team Explorer - Stats & injuries
|
| 169 |
+
|
| 170 |
+
---
|
| 171 |
+
|
| 172 |
+
## Quick Reference
|
| 173 |
+
|
| 174 |
+
| Step | Command | Duration |
|
| 175 |
+
|------|---------|----------|
|
| 176 |
+
| 1 | `python -m src.data_collector` | 2-4 hours |
|
| 177 |
+
| 2 | `python -m src.feature_engineering --process` | 30-60 min |
|
| 178 |
+
| 3 | `python -m src.preprocessing --build` | 1-2 min |
|
| 179 |
+
| 4 | `python -m src.models.game_predictor --train` | 2-5 min |
|
| 180 |
+
| 5 | `python -m src.visualization` | 10 sec |
|
| 181 |
+
| 6 | `streamlit run app/app.py` | Immediate |
|
| 182 |
+
|
| 183 |
+
---
|
| 184 |
+
|
| 185 |
+
## Live Data Features (NEW)
|
| 186 |
+
|
| 187 |
+
### View Live Scoreboard
|
| 188 |
+
```bash
|
| 189 |
+
python -m src.live_data_collector
|
| 190 |
+
```
|
| 191 |
+
Shows today's NBA games with live scores.
|
| 192 |
+
|
| 193 |
+
### Continuous Learning
|
| 194 |
+
```bash
|
| 195 |
+
# Ingest completed games
|
| 196 |
+
python -m src.continuous_learner --ingest
|
| 197 |
+
|
| 198 |
+
# Full update cycle (ingest + features + retrain)
|
| 199 |
+
python -m src.continuous_learner --update
|
| 200 |
+
|
| 201 |
+
# Update without retraining
|
| 202 |
+
python -m src.continuous_learner --update --no-retrain
|
| 203 |
+
```
|
| 204 |
+
|
| 205 |
+
### Check Prediction Accuracy
|
| 206 |
+
```bash
|
| 207 |
+
python -m src.prediction_tracker
|
| 208 |
+
```
|
| 209 |
+
Shows accuracy stats from ChromaDB.
|
| 210 |
+
|
| 211 |
+
---
|
| 212 |
+
|
| 213 |
+
## Data Flow
|
| 214 |
+
|
| 215 |
+
```
|
| 216 |
+
NBA API
|
| 217 |
+
↓
|
| 218 |
+
[Step 1: data_collector.py]
|
| 219 |
+
↓
|
| 220 |
+
data/raw/*.parquet (10+ files)
|
| 221 |
+
↓
|
| 222 |
+
[Step 2: feature_engineering.py]
|
| 223 |
+
↓
|
| 224 |
+
data/processed/game_features.parquet (~50+ features)
|
| 225 |
+
↓
|
| 226 |
+
[Step 3: preprocessing.py]
|
| 227 |
+
↓
|
| 228 |
+
data/processed/game_dataset.joblib (train/val/test splits)
|
| 229 |
+
↓
|
| 230 |
+
[Step 4: game_predictor.py]
|
| 231 |
+
↓
|
| 232 |
+
models/game_predictor.joblib (trained ensemble)
|
| 233 |
+
↓
|
| 234 |
+
[Step 6: app.py] → Web Dashboard
|
| 235 |
+
↓
|
| 236 |
+
ChromaDB (prediction tracking)
|
| 237 |
+
```
|
| 238 |
+
|
| 239 |
+
---
|
| 240 |
+
|
| 241 |
+
## Troubleshooting
|
| 242 |
+
|
| 243 |
+
### ModuleNotFoundError: No module named 'src'
|
| 244 |
+
Ensure you're in the project root directory.
|
| 245 |
+
|
| 246 |
+
### API Rate Limit Errors
|
| 247 |
+
The data collector handles this with exponential backoff. Just let it retry.
|
| 248 |
+
|
| 249 |
+
### Resume Interrupted Collection
|
| 250 |
+
Just run the command again - it has checkpoint capability and will skip completed data.
|
| 251 |
+
|
| 252 |
+
### ChromaDB Connection Issues
|
| 253 |
+
Check your API key in `src/config.py` under `ChromaDBConfig`.
|
| 254 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nba_api>=1.4.1
|
| 2 |
+
pandas>=2.0.0
|
| 3 |
+
numpy>=1.24.0
|
| 4 |
+
scikit-learn>=1.3.0
|
| 5 |
+
xgboost>=2.0.0
|
| 6 |
+
lightgbm>=4.0.0
|
| 7 |
+
streamlit>=1.28.0
|
| 8 |
+
plotly>=5.18.0
|
| 9 |
+
joblib>=1.3.0
|
| 10 |
+
tqdm>=4.66.0
|
| 11 |
+
pyarrow>=14.0.0
|
| 12 |
+
requests>=2.31.0
|
| 13 |
+
tenacity>=8.2.0
|
| 14 |
+
matplotlib>=3.8.0
|
| 15 |
+
seaborn>=0.13.0
|
| 16 |
+
chromadb-client>=0.5.0
|
| 17 |
+
streamlit-autorefresh>=1.0.1
|
| 18 |
+
apscheduler>=3.10.0
|
| 19 |
+
flask>=3.0.0
|
| 20 |
+
flask-cors>=4.0.0
|
| 21 |
+
|
server.py
ADDED
|
@@ -0,0 +1,341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NBA ML Prediction System - Production Server
|
| 3 |
+
=============================================
|
| 4 |
+
Serves the React frontend and Flask API for Hugging Face Spaces.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from flask import Flask, jsonify, request, send_from_directory
|
| 8 |
+
from flask_cors import CORS
|
| 9 |
+
import sys
|
| 10 |
+
import logging
|
| 11 |
+
import os
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
|
| 14 |
+
# Configure logging
|
| 15 |
+
logging.basicConfig(level=logging.INFO)
|
| 16 |
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
| 17 |
+
logging.getLogger("chromadb").setLevel(logging.WARNING)
|
| 18 |
+
|
| 19 |
+
# Add project root to path
|
| 20 |
+
ROOT_DIR = Path(__file__).parent
|
| 21 |
+
sys.path.insert(0, str(ROOT_DIR))
|
| 22 |
+
|
| 23 |
+
from src.prediction_pipeline import PredictionPipeline
|
| 24 |
+
|
| 25 |
+
# Initialize Flask app
|
| 26 |
+
app = Flask(__name__, static_folder='static', static_url_path='')
|
| 27 |
+
|
| 28 |
+
# Allow all origins for Hugging Face Spaces
|
| 29 |
+
CORS(app, origins=["*"])
|
| 30 |
+
|
| 31 |
+
# Initialize prediction pipeline
|
| 32 |
+
print("Initializing prediction pipeline...")
|
| 33 |
+
try:
|
| 34 |
+
pipeline = PredictionPipeline()
|
| 35 |
+
print("Pipeline ready!")
|
| 36 |
+
except Exception as e:
|
| 37 |
+
print(f"Warning: Pipeline initialization failed: {e}")
|
| 38 |
+
pipeline = None
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# ============================================================================
|
| 42 |
+
# Serve React Frontend
|
| 43 |
+
# ============================================================================
|
| 44 |
+
|
| 45 |
+
@app.route('/')
|
| 46 |
+
def serve_frontend():
|
| 47 |
+
"""Serve the React frontend."""
|
| 48 |
+
return send_from_directory('static', 'index.html')
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@app.route('/<path:path>')
|
| 52 |
+
def serve_static(path):
|
| 53 |
+
"""Serve static files, fallback to index.html for client-side routing."""
|
| 54 |
+
static_folder = Path(app.static_folder)
|
| 55 |
+
file_path = static_folder / path
|
| 56 |
+
|
| 57 |
+
if file_path.exists() and file_path.is_file():
|
| 58 |
+
return send_from_directory('static', path)
|
| 59 |
+
return send_from_directory('static', 'index.html')
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
# ============================================================================
|
| 63 |
+
# API Endpoints
|
| 64 |
+
# ============================================================================
|
| 65 |
+
|
| 66 |
+
@app.route("/api/health")
|
| 67 |
+
def health_check():
|
| 68 |
+
"""Health check endpoint."""
|
| 69 |
+
return jsonify({"status": "healthy", "pipeline_ready": pipeline is not None})
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@app.route("/api/games/live")
|
| 73 |
+
def get_live_games():
|
| 74 |
+
"""Get today's games with live scores and predictions."""
|
| 75 |
+
if not pipeline:
|
| 76 |
+
return jsonify({"live": [], "final": [], "upcoming": [], "total": 0, "error": "Pipeline not ready"})
|
| 77 |
+
|
| 78 |
+
try:
|
| 79 |
+
games = pipeline.get_games_with_predictions()
|
| 80 |
+
|
| 81 |
+
# Process each game
|
| 82 |
+
for game in games:
|
| 83 |
+
status = game.get("status")
|
| 84 |
+
game_id = game.get("game_id")
|
| 85 |
+
pred = game.get("prediction", {})
|
| 86 |
+
|
| 87 |
+
if game_id and pred:
|
| 88 |
+
if status == "NOT_STARTED":
|
| 89 |
+
existing = pipeline.prediction_tracker.get_prediction(game_id)
|
| 90 |
+
if not existing:
|
| 91 |
+
pipeline.prediction_tracker.save_prediction(game_id, {
|
| 92 |
+
"game_date": game.get("game_date"),
|
| 93 |
+
"home_team": game.get("home_team"),
|
| 94 |
+
"away_team": game.get("away_team"),
|
| 95 |
+
"predicted_winner": pred.get("predicted_winner"),
|
| 96 |
+
"home_win_probability": pred.get("home_win_probability"),
|
| 97 |
+
"away_win_probability": pred.get("away_win_probability"),
|
| 98 |
+
"confidence": pred.get("confidence"),
|
| 99 |
+
"home_elo": pred.get("home_elo"),
|
| 100 |
+
"away_elo": pred.get("away_elo"),
|
| 101 |
+
})
|
| 102 |
+
elif status == "FINAL":
|
| 103 |
+
home_score = game.get("home_score", 0)
|
| 104 |
+
away_score = game.get("away_score", 0)
|
| 105 |
+
actual_winner = game.get("home_team") if home_score > away_score else game.get("away_team")
|
| 106 |
+
|
| 107 |
+
existing = pipeline.prediction_tracker.get_prediction(game_id)
|
| 108 |
+
if not existing:
|
| 109 |
+
pipeline.prediction_tracker.save_prediction(game_id, {
|
| 110 |
+
"game_date": game.get("game_date"),
|
| 111 |
+
"home_team": game.get("home_team"),
|
| 112 |
+
"away_team": game.get("away_team"),
|
| 113 |
+
"predicted_winner": pred.get("predicted_winner"),
|
| 114 |
+
"home_win_probability": pred.get("home_win_probability"),
|
| 115 |
+
"away_win_probability": pred.get("away_win_probability"),
|
| 116 |
+
"confidence": pred.get("confidence"),
|
| 117 |
+
"home_elo": pred.get("home_elo"),
|
| 118 |
+
"away_elo": pred.get("away_elo"),
|
| 119 |
+
})
|
| 120 |
+
|
| 121 |
+
pipeline.prediction_tracker.update_result(game_id, actual_winner, home_score, away_score)
|
| 122 |
+
game["prediction_correct"] = pred.get("predicted_winner") == actual_winner
|
| 123 |
+
|
| 124 |
+
return jsonify({
|
| 125 |
+
"live": [g for g in games if g.get("status") == "IN_PROGRESS"],
|
| 126 |
+
"final": [g for g in games if g.get("status") == "FINAL"],
|
| 127 |
+
"upcoming": [g for g in games if g.get("status") == "NOT_STARTED"],
|
| 128 |
+
"total": len(games)
|
| 129 |
+
})
|
| 130 |
+
except Exception as e:
|
| 131 |
+
logging.error(f"Error in get_live_games: {e}")
|
| 132 |
+
return jsonify({"live": [], "final": [], "upcoming": [], "total": 0, "error": str(e)})
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
@app.route("/api/games/upcoming")
|
| 136 |
+
def get_upcoming_games():
|
| 137 |
+
"""Get upcoming games for the next N days."""
|
| 138 |
+
if not pipeline:
|
| 139 |
+
return jsonify({"games": [], "count": 0, "error": "Pipeline not ready"})
|
| 140 |
+
|
| 141 |
+
try:
|
| 142 |
+
days = request.args.get("days", 7, type=int)
|
| 143 |
+
days = max(1, min(days, 14))
|
| 144 |
+
|
| 145 |
+
games = pipeline.get_upcoming_games(days_ahead=days)
|
| 146 |
+
|
| 147 |
+
enriched_games = []
|
| 148 |
+
for game in games:
|
| 149 |
+
pred = pipeline.predict_game(game["home_team"], game["away_team"])
|
| 150 |
+
enriched_games.append({**game, "prediction": pred})
|
| 151 |
+
|
| 152 |
+
return jsonify({"games": enriched_games, "count": len(enriched_games)})
|
| 153 |
+
except Exception as e:
|
| 154 |
+
logging.error(f"Error in get_upcoming_games: {e}")
|
| 155 |
+
return jsonify({"games": [], "count": 0, "error": str(e)})
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
@app.route("/api/predict")
|
| 159 |
+
def predict_game():
|
| 160 |
+
"""Predict outcome for a single game."""
|
| 161 |
+
if not pipeline:
|
| 162 |
+
return jsonify({"error": "Pipeline not ready"}), 503
|
| 163 |
+
|
| 164 |
+
home = request.args.get("home", "").upper()
|
| 165 |
+
away = request.args.get("away", "").upper()
|
| 166 |
+
|
| 167 |
+
if not home or not away:
|
| 168 |
+
return jsonify({"error": "Missing home or away team parameter"}), 400
|
| 169 |
+
|
| 170 |
+
try:
|
| 171 |
+
prediction = pipeline.predict_game(home, away)
|
| 172 |
+
return jsonify(prediction)
|
| 173 |
+
except Exception as e:
|
| 174 |
+
return jsonify({"error": str(e)}), 500
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
@app.route("/api/accuracy")
|
| 178 |
+
def get_accuracy():
|
| 179 |
+
"""Get comprehensive model accuracy statistics."""
|
| 180 |
+
if not pipeline:
|
| 181 |
+
return jsonify({"stats": {}, "recent_predictions": [], "error": "Pipeline not ready"})
|
| 182 |
+
|
| 183 |
+
try:
|
| 184 |
+
stats = pipeline.get_accuracy_stats()
|
| 185 |
+
recent = pipeline.get_recent_predictions(50)
|
| 186 |
+
|
| 187 |
+
completed = [p for p in recent if p.get("is_correct", -1) >= 0]
|
| 188 |
+
correct = [p for p in completed if p.get("is_correct") == 1]
|
| 189 |
+
|
| 190 |
+
home_picks = [p for p in completed if p.get("predicted_winner") == p.get("home_team")]
|
| 191 |
+
home_correct = [p for p in home_picks if p.get("is_correct") == 1]
|
| 192 |
+
away_picks = [p for p in completed if p.get("predicted_winner") == p.get("away_team")]
|
| 193 |
+
away_correct = [p for p in away_picks if p.get("is_correct") == 1]
|
| 194 |
+
|
| 195 |
+
streak = 0
|
| 196 |
+
streak_type = None
|
| 197 |
+
for p in sorted(completed, key=lambda x: x.get("updated_at", ""), reverse=True):
|
| 198 |
+
if streak_type is None:
|
| 199 |
+
streak_type = "W" if p.get("is_correct") == 1 else "L"
|
| 200 |
+
if (p.get("is_correct") == 1 and streak_type == "W") or (p.get("is_correct") == 0 and streak_type == "L"):
|
| 201 |
+
streak += 1
|
| 202 |
+
else:
|
| 203 |
+
break
|
| 204 |
+
|
| 205 |
+
last_10 = completed[:10] if len(completed) >= 10 else completed
|
| 206 |
+
last_10_correct = sum(1 for p in last_10 if p.get("is_correct") == 1)
|
| 207 |
+
|
| 208 |
+
correct_avg_prob = sum(max(p.get("home_win_prob", 0.5), p.get("away_win_prob", 0.5)) for p in correct) / len(correct) if correct else 0
|
| 209 |
+
incorrect = [p for p in completed if p.get("is_correct") == 0]
|
| 210 |
+
incorrect_avg_prob = sum(max(p.get("home_win_prob", 0.5), p.get("away_win_prob", 0.5)) for p in incorrect) / len(incorrect) if incorrect else 0
|
| 211 |
+
|
| 212 |
+
enhanced_stats = {
|
| 213 |
+
**stats,
|
| 214 |
+
"home_pick_accuracy": len(home_correct) / len(home_picks) if home_picks else 0,
|
| 215 |
+
"away_pick_accuracy": len(away_correct) / len(away_picks) if away_picks else 0,
|
| 216 |
+
"home_picks_total": len(home_picks),
|
| 217 |
+
"away_picks_total": len(away_picks),
|
| 218 |
+
"current_streak": streak,
|
| 219 |
+
"streak_type": streak_type or "N/A",
|
| 220 |
+
"last_10_record": f"{last_10_correct}-{len(last_10) - last_10_correct}",
|
| 221 |
+
"last_10_accuracy": last_10_correct / len(last_10) if last_10 else 0,
|
| 222 |
+
"avg_probability_correct": correct_avg_prob,
|
| 223 |
+
"avg_probability_incorrect": incorrect_avg_prob,
|
| 224 |
+
"pending_predictions": len([p for p in recent if p.get("is_correct", -1) == -1]),
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
return jsonify({"stats": enhanced_stats, "recent_predictions": recent[:20]})
|
| 228 |
+
except Exception as e:
|
| 229 |
+
logging.error(f"Error in get_accuracy: {e}")
|
| 230 |
+
return jsonify({"stats": {}, "recent_predictions": [], "error": str(e)})
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
@app.route("/api/mvp")
|
| 234 |
+
def get_mvp_race():
|
| 235 |
+
"""Get current MVP race standings."""
|
| 236 |
+
if not pipeline:
|
| 237 |
+
return jsonify({"candidates": [], "error": "Pipeline not ready"})
|
| 238 |
+
|
| 239 |
+
try:
|
| 240 |
+
mvp_df = pipeline.get_mvp_race()
|
| 241 |
+
|
| 242 |
+
candidates = []
|
| 243 |
+
for idx, row in mvp_df.iterrows():
|
| 244 |
+
candidates.append({
|
| 245 |
+
"rank": len(candidates) + 1,
|
| 246 |
+
"name": row["PLAYER_NAME"],
|
| 247 |
+
"ppg": round(float(row["PTS"]), 1),
|
| 248 |
+
"rpg": round(float(row["REB"]), 1),
|
| 249 |
+
"apg": round(float(row["AST"]), 1),
|
| 250 |
+
"mvp_score": round(float(row["mvp_score"]), 1),
|
| 251 |
+
"similarity": round(float(row["mvp_similarity"]) * 100, 1)
|
| 252 |
+
})
|
| 253 |
+
|
| 254 |
+
return jsonify({"candidates": candidates})
|
| 255 |
+
except Exception as e:
|
| 256 |
+
logging.error(f"Error in get_mvp_race: {e}")
|
| 257 |
+
return jsonify({"candidates": [], "error": str(e)})
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
@app.route("/api/championship")
|
| 261 |
+
def get_championship_odds():
|
| 262 |
+
"""Get current championship odds."""
|
| 263 |
+
if not pipeline:
|
| 264 |
+
return jsonify({"teams": [], "error": "Pipeline not ready"})
|
| 265 |
+
|
| 266 |
+
try:
|
| 267 |
+
champ_df = pipeline.get_championship_odds()
|
| 268 |
+
|
| 269 |
+
teams = []
|
| 270 |
+
for idx, row in champ_df.iterrows():
|
| 271 |
+
teams.append({
|
| 272 |
+
"rank": len(teams) + 1,
|
| 273 |
+
"team": row.get("TEAM_ABBREVIATION", row.get("Team", "N/A")),
|
| 274 |
+
"odds": round(float(row.get("champ_probability", row.get("Championship_Odds", 0))) * 100, 1),
|
| 275 |
+
"win_pct": round(float(row.get("W_PCT", 0.5)) * 100, 1)
|
| 276 |
+
})
|
| 277 |
+
|
| 278 |
+
return jsonify({"teams": teams})
|
| 279 |
+
except Exception as e:
|
| 280 |
+
logging.error(f"Error in get_championship_odds: {e}")
|
| 281 |
+
return jsonify({"teams": [], "error": str(e)})
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
@app.route("/api/teams")
|
| 285 |
+
def get_teams():
|
| 286 |
+
"""Get list of all NBA teams."""
|
| 287 |
+
try:
|
| 288 |
+
from src.config import NBA_TEAMS
|
| 289 |
+
teams = [{"id": tid, "abbrev": abbrev} for tid, abbrev in NBA_TEAMS.items()]
|
| 290 |
+
teams.sort(key=lambda x: x["abbrev"])
|
| 291 |
+
return jsonify({"teams": teams})
|
| 292 |
+
except Exception as e:
|
| 293 |
+
return jsonify({"teams": [], "error": str(e)})
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
@app.route("/api/roster/<team_abbrev>")
|
| 297 |
+
def get_team_roster(team_abbrev):
|
| 298 |
+
"""Get projected starting 5 for a team."""
|
| 299 |
+
try:
|
| 300 |
+
from nba_api.stats.endpoints import leaguedashplayerstats
|
| 301 |
+
import time
|
| 302 |
+
|
| 303 |
+
time.sleep(0.6)
|
| 304 |
+
stats = leaguedashplayerstats.LeagueDashPlayerStats(
|
| 305 |
+
season='2025-26',
|
| 306 |
+
per_mode_detailed='PerGame'
|
| 307 |
+
)
|
| 308 |
+
df = stats.get_data_frames()[0]
|
| 309 |
+
|
| 310 |
+
team_abbrev = team_abbrev.upper()
|
| 311 |
+
team_players = df[df['TEAM_ABBREVIATION'] == team_abbrev].copy()
|
| 312 |
+
|
| 313 |
+
if team_players.empty:
|
| 314 |
+
return jsonify({"team": team_abbrev, "starters": []})
|
| 315 |
+
|
| 316 |
+
team_players = team_players.sort_values('MIN', ascending=False)
|
| 317 |
+
|
| 318 |
+
starters = []
|
| 319 |
+
for _, player in team_players.head(5).iterrows():
|
| 320 |
+
starters.append({
|
| 321 |
+
'name': player['PLAYER_NAME'],
|
| 322 |
+
'position': player.get('POSITION', ''),
|
| 323 |
+
'pts': round(float(player['PTS']), 1),
|
| 324 |
+
'reb': round(float(player.get('REB', 0)), 1),
|
| 325 |
+
'ast': round(float(player.get('AST', 0)), 1),
|
| 326 |
+
'min': round(float(player.get('MIN', 0)), 1)
|
| 327 |
+
})
|
| 328 |
+
|
| 329 |
+
return jsonify({"team": team_abbrev, "starters": starters})
|
| 330 |
+
|
| 331 |
+
except Exception as e:
|
| 332 |
+
logging.error(f"Error fetching roster for {team_abbrev}: {e}")
|
| 333 |
+
if pipeline:
|
| 334 |
+
roster = pipeline.get_team_roster(team_abbrev.upper())
|
| 335 |
+
return jsonify({"team": team_abbrev.upper(), "starters": roster})
|
| 336 |
+
return jsonify({"team": team_abbrev, "starters": [], "error": str(e)})
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
if __name__ == "__main__":
|
| 340 |
+
port = int(os.environ.get("PORT", 7860))
|
| 341 |
+
app.run(host="0.0.0.0", port=port, debug=False)
|
src/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Empty init file
|
src/auto_trainer.py
ADDED
|
@@ -0,0 +1,278 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NBA ML Prediction System - Auto Training Scheduler
|
| 3 |
+
===================================================
|
| 4 |
+
Background scheduler that automatically trains the model on new game data.
|
| 5 |
+
Runs within the Streamlit app or as a standalone service.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import logging
|
| 9 |
+
import threading
|
| 10 |
+
import time
|
| 11 |
+
from datetime import datetime, timedelta
|
| 12 |
+
from typing import Optional
|
| 13 |
+
import atexit
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class AutoTrainer:
|
| 19 |
+
"""
|
| 20 |
+
Automatic model training scheduler.
|
| 21 |
+
|
| 22 |
+
Runs background tasks to:
|
| 23 |
+
1. Ingest completed games every hour
|
| 24 |
+
2. Retrain the model daily
|
| 25 |
+
3. Update prediction results after games
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
_instance: Optional['AutoTrainer'] = None
|
| 29 |
+
_lock = threading.Lock()
|
| 30 |
+
|
| 31 |
+
def __new__(cls):
|
| 32 |
+
"""Singleton pattern - only one auto trainer instance."""
|
| 33 |
+
with cls._lock:
|
| 34 |
+
if cls._instance is None:
|
| 35 |
+
cls._instance = super().__new__(cls)
|
| 36 |
+
cls._instance._initialized = False
|
| 37 |
+
return cls._instance
|
| 38 |
+
|
| 39 |
+
def __init__(self):
|
| 40 |
+
if self._initialized:
|
| 41 |
+
return
|
| 42 |
+
|
| 43 |
+
self._initialized = True
|
| 44 |
+
self._running = False
|
| 45 |
+
self._thread: Optional[threading.Thread] = None
|
| 46 |
+
self._stop_event = threading.Event()
|
| 47 |
+
|
| 48 |
+
# Track last run times
|
| 49 |
+
self._last_ingest = None
|
| 50 |
+
self._last_retrain = None
|
| 51 |
+
self._last_results_check = None
|
| 52 |
+
|
| 53 |
+
# Intervals (in seconds)
|
| 54 |
+
self.INGEST_INTERVAL = 3600 # 1 hour
|
| 55 |
+
self.RETRAIN_INTERVAL = 86400 # 24 hours (daily)
|
| 56 |
+
self.RESULTS_CHECK_INTERVAL = 1800 # 30 minutes
|
| 57 |
+
|
| 58 |
+
logger.info("AutoTrainer initialized")
|
| 59 |
+
|
| 60 |
+
def start(self):
|
| 61 |
+
"""Start the background training scheduler."""
|
| 62 |
+
if self._running:
|
| 63 |
+
logger.info("AutoTrainer already running")
|
| 64 |
+
return
|
| 65 |
+
|
| 66 |
+
self._running = True
|
| 67 |
+
self._stop_event.clear()
|
| 68 |
+
self._thread = threading.Thread(target=self._run_loop, daemon=True)
|
| 69 |
+
self._thread.start()
|
| 70 |
+
|
| 71 |
+
# Register cleanup on exit
|
| 72 |
+
atexit.register(self.stop)
|
| 73 |
+
|
| 74 |
+
logger.info("AutoTrainer started - background training enabled")
|
| 75 |
+
|
| 76 |
+
def stop(self):
|
| 77 |
+
"""Stop the background scheduler."""
|
| 78 |
+
if not self._running:
|
| 79 |
+
return
|
| 80 |
+
|
| 81 |
+
self._running = False
|
| 82 |
+
self._stop_event.set()
|
| 83 |
+
|
| 84 |
+
if self._thread and self._thread.is_alive():
|
| 85 |
+
self._thread.join(timeout=5)
|
| 86 |
+
|
| 87 |
+
logger.info("AutoTrainer stopped")
|
| 88 |
+
|
| 89 |
+
def _run_loop(self):
|
| 90 |
+
"""Main background loop - checks for tasks to run."""
|
| 91 |
+
logger.info("AutoTrainer loop started")
|
| 92 |
+
|
| 93 |
+
while not self._stop_event.is_set():
|
| 94 |
+
try:
|
| 95 |
+
now = datetime.now()
|
| 96 |
+
|
| 97 |
+
# Check and update prediction results (every 30 min)
|
| 98 |
+
if self._should_run(self._last_results_check, self.RESULTS_CHECK_INTERVAL):
|
| 99 |
+
self._check_results()
|
| 100 |
+
self._last_results_check = now
|
| 101 |
+
|
| 102 |
+
# Ingest completed games (every hour)
|
| 103 |
+
if self._should_run(self._last_ingest, self.INGEST_INTERVAL):
|
| 104 |
+
self._ingest_games()
|
| 105 |
+
self._last_ingest = now
|
| 106 |
+
|
| 107 |
+
# Retrain model only after all daily games are complete
|
| 108 |
+
# NBA games typically end by 1 AM ET, so we retrain at 4 AM ET (safe window)
|
| 109 |
+
# 4 AM ET = 1:30 PM IST
|
| 110 |
+
if self._should_run(self._last_retrain, self.RETRAIN_INTERVAL):
|
| 111 |
+
if self._all_daily_games_complete():
|
| 112 |
+
self._retrain_model()
|
| 113 |
+
self._last_retrain = now
|
| 114 |
+
else:
|
| 115 |
+
logger.info("AutoTrainer: Waiting for all games to complete before retrain")
|
| 116 |
+
|
| 117 |
+
except Exception as e:
|
| 118 |
+
logger.error(f"AutoTrainer error: {e}")
|
| 119 |
+
|
| 120 |
+
# Sleep for 5 minutes between checks
|
| 121 |
+
self._stop_event.wait(300)
|
| 122 |
+
|
| 123 |
+
def _all_daily_games_complete(self) -> bool:
|
| 124 |
+
"""Check if all of today's games have completed."""
|
| 125 |
+
try:
|
| 126 |
+
from src.live_data_collector import LiveDataCollector
|
| 127 |
+
collector = LiveDataCollector()
|
| 128 |
+
|
| 129 |
+
# Get live games - if any are still in progress, don't retrain
|
| 130 |
+
live_games = collector.get_live_games()
|
| 131 |
+
if live_games:
|
| 132 |
+
logger.info(f"AutoTrainer: {len(live_games)} games still in progress")
|
| 133 |
+
return False
|
| 134 |
+
|
| 135 |
+
# Get upcoming games - if any haven't started, don't retrain yet
|
| 136 |
+
upcoming = collector.get_upcoming_games()
|
| 137 |
+
if upcoming:
|
| 138 |
+
logger.info(f"AutoTrainer: {len(upcoming)} games haven't started yet")
|
| 139 |
+
return False
|
| 140 |
+
|
| 141 |
+
# All games completed (or no games today)
|
| 142 |
+
return True
|
| 143 |
+
|
| 144 |
+
except Exception as e:
|
| 145 |
+
logger.warning(f"Could not check game status: {e}")
|
| 146 |
+
# Default to checking time - after 4 AM ET (safe window)
|
| 147 |
+
hour = datetime.now().hour
|
| 148 |
+
# 4 AM ET ≈ 1:30 PM IST, 9 AM UTC
|
| 149 |
+
return hour >= 4 or hour < 12 # Between 4 AM and noon
|
| 150 |
+
|
| 151 |
+
def _should_run(self, last_run: Optional[datetime], interval: int) -> bool:
|
| 152 |
+
"""Check if enough time has passed since last run."""
|
| 153 |
+
if last_run is None:
|
| 154 |
+
return True
|
| 155 |
+
return (datetime.now() - last_run).total_seconds() >= interval
|
| 156 |
+
|
| 157 |
+
def _check_results(self):
|
| 158 |
+
"""Check completed games and update prediction results."""
|
| 159 |
+
logger.info("AutoTrainer: Checking prediction results...")
|
| 160 |
+
try:
|
| 161 |
+
from src.prediction_pipeline import PredictionPipeline
|
| 162 |
+
pipeline = PredictionPipeline()
|
| 163 |
+
updated = pipeline.check_prediction_results()
|
| 164 |
+
logger.info(f"AutoTrainer: Updated {len(updated)} prediction results")
|
| 165 |
+
except Exception as e:
|
| 166 |
+
logger.error(f"AutoTrainer: Failed to check results: {e}")
|
| 167 |
+
|
| 168 |
+
def _ingest_games(self):
|
| 169 |
+
"""Ingest completed games into training data."""
|
| 170 |
+
logger.info("AutoTrainer: Ingesting completed games...")
|
| 171 |
+
try:
|
| 172 |
+
from src.continuous_learner import ContinuousLearner
|
| 173 |
+
learner = ContinuousLearner()
|
| 174 |
+
count = learner.ingest_completed_games()
|
| 175 |
+
logger.info(f"AutoTrainer: Ingested {count} new games")
|
| 176 |
+
except Exception as e:
|
| 177 |
+
logger.error(f"AutoTrainer: Failed to ingest games: {e}")
|
| 178 |
+
|
| 179 |
+
def _retrain_model(self):
|
| 180 |
+
"""Full model retrain cycle."""
|
| 181 |
+
logger.info("AutoTrainer: Starting daily model retrain...")
|
| 182 |
+
try:
|
| 183 |
+
from src.continuous_learner import ContinuousLearner
|
| 184 |
+
learner = ContinuousLearner()
|
| 185 |
+
results = learner.run_update_cycle(retrain=True)
|
| 186 |
+
|
| 187 |
+
if results.get("model_retrained"):
|
| 188 |
+
accuracy = results.get("metrics", {}).get("test_accuracy", 0)
|
| 189 |
+
logger.info(f"AutoTrainer: Model retrained! Accuracy: {accuracy:.2%}")
|
| 190 |
+
else:
|
| 191 |
+
logger.info("AutoTrainer: No new data to retrain on")
|
| 192 |
+
|
| 193 |
+
except Exception as e:
|
| 194 |
+
logger.error(f"AutoTrainer: Failed to retrain model: {e}")
|
| 195 |
+
|
| 196 |
+
def get_status(self) -> dict:
|
| 197 |
+
"""Get current auto-trainer status."""
|
| 198 |
+
return {
|
| 199 |
+
"running": self._running,
|
| 200 |
+
"last_ingest": self._last_ingest.isoformat() if self._last_ingest else None,
|
| 201 |
+
"last_retrain": self._last_retrain.isoformat() if self._last_retrain else None,
|
| 202 |
+
"last_results_check": self._last_results_check.isoformat() if self._last_results_check else None,
|
| 203 |
+
"next_ingest_in": self._time_until_next(self._last_ingest, self.INGEST_INTERVAL),
|
| 204 |
+
"next_retrain_in": self._time_until_next(self._last_retrain, self.RETRAIN_INTERVAL),
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
def _time_until_next(self, last_run: Optional[datetime], interval: int) -> str:
|
| 208 |
+
"""Human-readable time until next run."""
|
| 209 |
+
if last_run is None:
|
| 210 |
+
return "Soon"
|
| 211 |
+
|
| 212 |
+
elapsed = (datetime.now() - last_run).total_seconds()
|
| 213 |
+
remaining = max(0, interval - elapsed)
|
| 214 |
+
|
| 215 |
+
if remaining < 60:
|
| 216 |
+
return f"{int(remaining)}s"
|
| 217 |
+
elif remaining < 3600:
|
| 218 |
+
return f"{int(remaining / 60)}m"
|
| 219 |
+
else:
|
| 220 |
+
return f"{int(remaining / 3600)}h {int((remaining % 3600) / 60)}m"
|
| 221 |
+
|
| 222 |
+
def force_ingest(self):
|
| 223 |
+
"""Force an immediate game ingestion."""
|
| 224 |
+
threading.Thread(target=self._ingest_games, daemon=True).start()
|
| 225 |
+
|
| 226 |
+
def force_retrain(self):
|
| 227 |
+
"""Force an immediate model retrain."""
|
| 228 |
+
threading.Thread(target=self._retrain_model, daemon=True).start()
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
# Global instance
|
| 232 |
+
auto_trainer = AutoTrainer()
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
# =============================================================================
|
| 236 |
+
# CLI INTERFACE
|
| 237 |
+
# =============================================================================
|
| 238 |
+
if __name__ == "__main__":
|
| 239 |
+
import argparse
|
| 240 |
+
|
| 241 |
+
logging.basicConfig(level=logging.INFO)
|
| 242 |
+
|
| 243 |
+
parser = argparse.ArgumentParser(description="NBA Auto Training Scheduler")
|
| 244 |
+
parser.add_argument("--start", action="store_true", help="Start the scheduler")
|
| 245 |
+
parser.add_argument("--status", action="store_true", help="Show scheduler status")
|
| 246 |
+
|
| 247 |
+
args = parser.parse_args()
|
| 248 |
+
|
| 249 |
+
if args.status:
|
| 250 |
+
status = auto_trainer.get_status()
|
| 251 |
+
print("\n=== Auto Trainer Status ===\n")
|
| 252 |
+
print(f"Running: {'Yes ✓' if status['running'] else 'No'}")
|
| 253 |
+
print(f"Last Ingest: {status['last_ingest'] or 'Never'}")
|
| 254 |
+
print(f"Last Retrain: {status['last_retrain'] or 'Never'}")
|
| 255 |
+
print(f"Next Ingest In: {status['next_ingest_in']}")
|
| 256 |
+
print(f"Next Retrain In: {status['next_retrain_in']}")
|
| 257 |
+
|
| 258 |
+
elif args.start:
|
| 259 |
+
print("\n=== Starting Auto Trainer ===\n")
|
| 260 |
+
print("Background training enabled!")
|
| 261 |
+
print("- Checks prediction results every 30 minutes")
|
| 262 |
+
print("- Ingests completed games every 1 hour")
|
| 263 |
+
print("- Retrains model every 24 hours")
|
| 264 |
+
print("\nPress Ctrl+C to stop...\n")
|
| 265 |
+
|
| 266 |
+
auto_trainer.start()
|
| 267 |
+
|
| 268 |
+
try:
|
| 269 |
+
while True:
|
| 270 |
+
time.sleep(60)
|
| 271 |
+
status = auto_trainer.get_status()
|
| 272 |
+
print(f"[{datetime.now().strftime('%H:%M:%S')}] Running... Next ingest: {status['next_ingest_in']}, Next retrain: {status['next_retrain_in']}")
|
| 273 |
+
except KeyboardInterrupt:
|
| 274 |
+
print("\nStopping...")
|
| 275 |
+
auto_trainer.stop()
|
| 276 |
+
|
| 277 |
+
else:
|
| 278 |
+
print("Use --start to begin auto training or --status to check status")
|
src/config.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NBA ML Prediction System - Configuration
|
| 3 |
+
=========================================
|
| 4 |
+
Central configuration for data collection, model training, and predictions.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from dataclasses import dataclass, field
|
| 9 |
+
from typing import List
|
| 10 |
+
|
| 11 |
+
# =============================================================================
|
| 12 |
+
# PATHS
|
| 13 |
+
# =============================================================================
|
| 14 |
+
PROJECT_ROOT = Path(__file__).parent.parent
|
| 15 |
+
DATA_DIR = PROJECT_ROOT / "data"
|
| 16 |
+
RAW_DATA_DIR = DATA_DIR / "raw"
|
| 17 |
+
PROCESSED_DATA_DIR = DATA_DIR / "processed"
|
| 18 |
+
API_CACHE_DIR = DATA_DIR / "api_data"
|
| 19 |
+
MODELS_DIR = PROJECT_ROOT / "models"
|
| 20 |
+
|
| 21 |
+
# Create directories if they don't exist
|
| 22 |
+
for dir_path in [RAW_DATA_DIR, PROCESSED_DATA_DIR, API_CACHE_DIR, MODELS_DIR]:
|
| 23 |
+
dir_path.mkdir(parents=True, exist_ok=True)
|
| 24 |
+
|
| 25 |
+
# =============================================================================
|
| 26 |
+
# SEASONS
|
| 27 |
+
# =============================================================================
|
| 28 |
+
# Extended dataset: 23 years (2003-2026) for comprehensive training
|
| 29 |
+
INITIAL_SEASON_START = 2003
|
| 30 |
+
INITIAL_SEASON_END = 2026 # Current 2025-26 season
|
| 31 |
+
FULL_SEASON_START = 2003 # Full dataset starts from 2003
|
| 32 |
+
|
| 33 |
+
def get_season_strings(start_year: int = INITIAL_SEASON_START,
|
| 34 |
+
end_year: int = INITIAL_SEASON_END) -> List[str]:
|
| 35 |
+
"""Generate season strings like '2003-04', '2004-05', etc."""
|
| 36 |
+
return [f"{year}-{str(year+1)[-2:]}" for year in range(start_year, end_year)]
|
| 37 |
+
|
| 38 |
+
SEASON_STRINGS = get_season_strings()
|
| 39 |
+
|
| 40 |
+
# =============================================================================
|
| 41 |
+
# CHROMADB CONFIGURATION
|
| 42 |
+
# =============================================================================
|
| 43 |
+
@dataclass
|
| 44 |
+
class ChromaDBConfig:
|
| 45 |
+
"""Configuration for ChromaDB prediction tracking."""
|
| 46 |
+
tenant: str = "70e82e68-9fa7-4224-9975-d49d355f6328"
|
| 47 |
+
database: str = "NBA_ML"
|
| 48 |
+
api_key: str = "ck-2bXunZK4X3BFSPHtwLG2Ki9xr5r6ZPxzADESDperHweT"
|
| 49 |
+
collection_name: str = "predictions"
|
| 50 |
+
|
| 51 |
+
CHROMADB_CONFIG = ChromaDBConfig()
|
| 52 |
+
|
| 53 |
+
# =============================================================================
|
| 54 |
+
# LIVE DATA CONFIGURATION
|
| 55 |
+
# =============================================================================
|
| 56 |
+
LIVE_REFRESH_INTERVAL = 15 # Seconds between live score refreshes
|
| 57 |
+
|
| 58 |
+
# =============================================================================
|
| 59 |
+
# API CONFIGURATION
|
| 60 |
+
# =============================================================================
|
| 61 |
+
@dataclass
|
| 62 |
+
class APIConfig:
|
| 63 |
+
"""Configuration for NBA API requests with robustness features."""
|
| 64 |
+
base_delay: float = 0.6 # Base delay between requests (seconds)
|
| 65 |
+
max_retries: int = 3 # Maximum retry attempts
|
| 66 |
+
initial_backoff: float = 2.0 # Initial backoff in seconds
|
| 67 |
+
max_backoff: float = 60.0 # Maximum backoff in seconds
|
| 68 |
+
backoff_multiplier: float = 2.0 # Exponential backoff multiplier
|
| 69 |
+
timeout: int = 30 # Request timeout in seconds
|
| 70 |
+
|
| 71 |
+
API_CONFIG = APIConfig()
|
| 72 |
+
|
| 73 |
+
# =============================================================================
|
| 74 |
+
# ELO CONFIGURATION
|
| 75 |
+
# =============================================================================
|
| 76 |
+
@dataclass
|
| 77 |
+
class ELOConfig:
|
| 78 |
+
"""Configuration for ELO rating calculations."""
|
| 79 |
+
initial_rating: float = 1500.0
|
| 80 |
+
k_factor: float = 20.0 # How much ratings change per game
|
| 81 |
+
home_advantage: float = 100.0 # ELO points added for home team
|
| 82 |
+
season_regression: float = 0.85 # Regress to mean at season start (85% = only 15% carryover)
|
| 83 |
+
|
| 84 |
+
ELO_CONFIG = ELOConfig()
|
| 85 |
+
|
| 86 |
+
# =============================================================================
|
| 87 |
+
# FEATURE CONFIGURATION
|
| 88 |
+
# =============================================================================
|
| 89 |
+
@dataclass
|
| 90 |
+
class FeatureConfig:
|
| 91 |
+
"""Configuration for feature engineering."""
|
| 92 |
+
rolling_windows: List[int] = field(default_factory=lambda: [5, 10, 20])
|
| 93 |
+
min_games_for_features: int = 5 # Minimum games before generating features
|
| 94 |
+
|
| 95 |
+
FEATURE_CONFIG = FeatureConfig()
|
| 96 |
+
|
| 97 |
+
# =============================================================================
|
| 98 |
+
# MODEL CONFIGURATION
|
| 99 |
+
# =============================================================================
|
| 100 |
+
@dataclass
|
| 101 |
+
class ModelConfig:
|
| 102 |
+
"""Configuration for model training."""
|
| 103 |
+
test_seasons: List[str] = field(default_factory=lambda: ["2024-25"])
|
| 104 |
+
val_seasons: List[str] = field(default_factory=lambda: ["2023-24"])
|
| 105 |
+
random_state: int = 42
|
| 106 |
+
|
| 107 |
+
# XGBoost defaults
|
| 108 |
+
xgb_params: dict = field(default_factory=lambda: {
|
| 109 |
+
"n_estimators": 500,
|
| 110 |
+
"max_depth": 6,
|
| 111 |
+
"learning_rate": 0.05,
|
| 112 |
+
"subsample": 0.8,
|
| 113 |
+
"colsample_bytree": 0.8,
|
| 114 |
+
"random_state": 42
|
| 115 |
+
})
|
| 116 |
+
|
| 117 |
+
# LightGBM defaults
|
| 118 |
+
lgb_params: dict = field(default_factory=lambda: {
|
| 119 |
+
"n_estimators": 500,
|
| 120 |
+
"max_depth": 6,
|
| 121 |
+
"learning_rate": 0.05,
|
| 122 |
+
"subsample": 0.8,
|
| 123 |
+
"colsample_bytree": 0.8,
|
| 124 |
+
"random_state": 42,
|
| 125 |
+
"verbose": -1
|
| 126 |
+
})
|
| 127 |
+
|
| 128 |
+
MODEL_CONFIG = ModelConfig()
|
| 129 |
+
|
| 130 |
+
# =============================================================================
|
| 131 |
+
# TEAM MAPPINGS
|
| 132 |
+
# =============================================================================
|
| 133 |
+
# NBA Team IDs (for reference)
|
| 134 |
+
NBA_TEAMS = {
|
| 135 |
+
1610612737: "ATL", 1610612738: "BOS", 1610612739: "CLE", 1610612740: "NOP",
|
| 136 |
+
1610612741: "CHI", 1610612742: "DAL", 1610612743: "DEN", 1610612744: "GSW",
|
| 137 |
+
1610612745: "HOU", 1610612746: "LAC", 1610612747: "LAL", 1610612748: "MIA",
|
| 138 |
+
1610612749: "MIL", 1610612750: "MIN", 1610612751: "BKN", 1610612752: "NYK",
|
| 139 |
+
1610612753: "ORL", 1610612754: "IND", 1610612755: "PHI", 1610612756: "PHX",
|
| 140 |
+
1610612757: "POR", 1610612758: "SAC", 1610612759: "SAS", 1610612760: "OKC",
|
| 141 |
+
1610612761: "TOR", 1610612762: "UTA", 1610612763: "MEM", 1610612764: "WAS",
|
| 142 |
+
1610612765: "DET", 1610612766: "CHA"
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
# =============================================================================
|
| 146 |
+
# INJURY STATUS WEIGHTS
|
| 147 |
+
# =============================================================================
|
| 148 |
+
INJURY_IMPACT = {
|
| 149 |
+
"Out": 1.0, # Full impact - player not available
|
| 150 |
+
"Doubtful": 0.8,
|
| 151 |
+
"Questionable": 0.5,
|
| 152 |
+
"Probable": 0.2,
|
| 153 |
+
"Available": 0.0
|
| 154 |
+
}
|
src/continuous_learner.py
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NBA ML Prediction System - Continuous Learner
|
| 3 |
+
==============================================
|
| 4 |
+
Handles incremental model updates with new game data.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
from datetime import datetime, timedelta
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import Dict, List, Optional
|
| 11 |
+
import time
|
| 12 |
+
|
| 13 |
+
import pandas as pd
|
| 14 |
+
import numpy as np
|
| 15 |
+
|
| 16 |
+
from src.config import (
|
| 17 |
+
RAW_DATA_DIR,
|
| 18 |
+
PROCESSED_DATA_DIR,
|
| 19 |
+
MODELS_DIR,
|
| 20 |
+
SEASON_STRINGS,
|
| 21 |
+
API_CONFIG
|
| 22 |
+
)
|
| 23 |
+
from src.data_collector import GameDataCollector, TeamDataCollector, PlayerDataCollector, CacheManager
|
| 24 |
+
from src.feature_engineering import FeatureGenerator
|
| 25 |
+
from src.preprocessing import DataPreprocessor
|
| 26 |
+
from src.models.game_predictor import GamePredictor, train_game_predictor
|
| 27 |
+
from src.live_data_collector import LiveDataCollector
|
| 28 |
+
from src.prediction_tracker import PredictionTracker
|
| 29 |
+
|
| 30 |
+
logger = logging.getLogger(__name__)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class ContinuousLearner:
|
| 34 |
+
"""
|
| 35 |
+
Handles model updates with new game data.
|
| 36 |
+
|
| 37 |
+
Workflow:
|
| 38 |
+
1. Ingest completed games from live API
|
| 39 |
+
2. Update raw data files
|
| 40 |
+
3. Re-run feature engineering
|
| 41 |
+
4. Retrain model (incremental or full)
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
def __init__(self):
|
| 45 |
+
self.game_collector = GameDataCollector()
|
| 46 |
+
self.live_collector = LiveDataCollector()
|
| 47 |
+
self.prediction_tracker = PredictionTracker()
|
| 48 |
+
self.feature_gen = FeatureGenerator()
|
| 49 |
+
self.cache = CacheManager()
|
| 50 |
+
|
| 51 |
+
# Checkpoint file for tracking last ingested game
|
| 52 |
+
self.checkpoint_file = PROCESSED_DATA_DIR / "continuous_learning_checkpoint.json"
|
| 53 |
+
|
| 54 |
+
def _load_checkpoint(self) -> Dict:
|
| 55 |
+
"""Load checkpoint of last processed game."""
|
| 56 |
+
import json
|
| 57 |
+
if self.checkpoint_file.exists():
|
| 58 |
+
with open(self.checkpoint_file, 'r') as f:
|
| 59 |
+
return json.load(f)
|
| 60 |
+
return {"last_game_date": None, "last_game_ids": []}
|
| 61 |
+
|
| 62 |
+
def _save_checkpoint(self, checkpoint: Dict):
|
| 63 |
+
"""Save checkpoint after processing."""
|
| 64 |
+
import json
|
| 65 |
+
with open(self.checkpoint_file, 'w') as f:
|
| 66 |
+
json.dump(checkpoint, f, indent=2)
|
| 67 |
+
|
| 68 |
+
def ingest_completed_games(self) -> int:
|
| 69 |
+
"""
|
| 70 |
+
Fetch completed games from live API and add to training data.
|
| 71 |
+
|
| 72 |
+
Returns:
|
| 73 |
+
Number of new games ingested
|
| 74 |
+
"""
|
| 75 |
+
logger.info("Checking for completed games to ingest...")
|
| 76 |
+
|
| 77 |
+
# Get completed games from today
|
| 78 |
+
final_games = self.live_collector.get_final_games()
|
| 79 |
+
|
| 80 |
+
if not final_games:
|
| 81 |
+
logger.info("No completed games to ingest")
|
| 82 |
+
return 0
|
| 83 |
+
|
| 84 |
+
# Load checkpoint
|
| 85 |
+
checkpoint = self._load_checkpoint()
|
| 86 |
+
processed_ids = set(checkpoint.get("last_game_ids", []))
|
| 87 |
+
|
| 88 |
+
# Filter to new games only
|
| 89 |
+
new_games = [g for g in final_games if g["game_id"] not in processed_ids]
|
| 90 |
+
|
| 91 |
+
if not new_games:
|
| 92 |
+
logger.info("All completed games already processed")
|
| 93 |
+
return 0
|
| 94 |
+
|
| 95 |
+
logger.info(f"Found {len(new_games)} new completed games")
|
| 96 |
+
|
| 97 |
+
# Update prediction tracker with results
|
| 98 |
+
for game in new_games:
|
| 99 |
+
winner = game["home_team"] if game["home_score"] > game["away_score"] else game["away_team"]
|
| 100 |
+
self.prediction_tracker.update_result(
|
| 101 |
+
game_id=game["game_id"],
|
| 102 |
+
actual_winner=winner,
|
| 103 |
+
home_score=game["home_score"],
|
| 104 |
+
away_score=game["away_score"]
|
| 105 |
+
)
|
| 106 |
+
processed_ids.add(game["game_id"])
|
| 107 |
+
|
| 108 |
+
# Append new games to raw data
|
| 109 |
+
self._append_games_to_raw_data(new_games)
|
| 110 |
+
|
| 111 |
+
# Update checkpoint
|
| 112 |
+
checkpoint = {
|
| 113 |
+
"last_game_date": datetime.now().isoformat(),
|
| 114 |
+
"last_game_ids": list(processed_ids)[-100] # Keep last 100 IDs
|
| 115 |
+
}
|
| 116 |
+
self._save_checkpoint(checkpoint)
|
| 117 |
+
|
| 118 |
+
logger.info(f"Ingested {len(new_games)} new games")
|
| 119 |
+
return len(new_games)
|
| 120 |
+
|
| 121 |
+
def _append_games_to_raw_data(self, games: List[Dict]):
|
| 122 |
+
"""Append new game data to raw parquet files."""
|
| 123 |
+
try:
|
| 124 |
+
# Load existing games
|
| 125 |
+
games_file = RAW_DATA_DIR / "all_games.parquet"
|
| 126 |
+
if games_file.exists():
|
| 127 |
+
existing_df = pd.read_parquet(games_file)
|
| 128 |
+
else:
|
| 129 |
+
existing_df = pd.DataFrame()
|
| 130 |
+
|
| 131 |
+
# Convert new games to DataFrame
|
| 132 |
+
new_rows = []
|
| 133 |
+
for game in games:
|
| 134 |
+
new_rows.append({
|
| 135 |
+
"GAME_ID": game["game_id"],
|
| 136 |
+
"GAME_DATE": game.get("game_date", ""),
|
| 137 |
+
"HOME_TEAM_ID": game["home_team_id"],
|
| 138 |
+
"VISITOR_TEAM_ID": game["away_team_id"],
|
| 139 |
+
"HOME_TEAM_ABBREVIATION": game["home_team"],
|
| 140 |
+
"VISITOR_TEAM_ABBREVIATION": game["away_team"],
|
| 141 |
+
"PTS_home": game["home_score"],
|
| 142 |
+
"PTS_away": game["away_score"],
|
| 143 |
+
"HOME_TEAM_WINS": 1 if game["home_score"] > game["away_score"] else 0,
|
| 144 |
+
"SEASON_ID": self._get_current_season_id(),
|
| 145 |
+
})
|
| 146 |
+
|
| 147 |
+
new_df = pd.DataFrame(new_rows)
|
| 148 |
+
|
| 149 |
+
# Append and deduplicate
|
| 150 |
+
combined_df = pd.concat([existing_df, new_df], ignore_index=True)
|
| 151 |
+
combined_df = combined_df.drop_duplicates(subset=["GAME_ID"], keep="last")
|
| 152 |
+
|
| 153 |
+
# Save
|
| 154 |
+
combined_df.to_parquet(games_file, index=False)
|
| 155 |
+
logger.info(f"Updated raw games data: {len(combined_df)} total games")
|
| 156 |
+
|
| 157 |
+
except Exception as e:
|
| 158 |
+
logger.error(f"Failed to append games to raw data: {e}")
|
| 159 |
+
|
| 160 |
+
def _get_current_season_id(self) -> str:
|
| 161 |
+
"""Get current NBA season ID."""
|
| 162 |
+
now = datetime.now()
|
| 163 |
+
year = now.year if now.month >= 10 else now.year - 1
|
| 164 |
+
return f"2{year}" # e.g., "22024" for 2024-25 season
|
| 165 |
+
|
| 166 |
+
def update_features(self) -> bool:
|
| 167 |
+
"""
|
| 168 |
+
Re-run feature engineering with updated data.
|
| 169 |
+
|
| 170 |
+
Returns:
|
| 171 |
+
True if successful
|
| 172 |
+
"""
|
| 173 |
+
logger.info("Updating features with new data...")
|
| 174 |
+
|
| 175 |
+
try:
|
| 176 |
+
# Re-run feature generation
|
| 177 |
+
self.feature_gen.process_all_data(force_regenerate=True)
|
| 178 |
+
logger.info("Feature update complete")
|
| 179 |
+
return True
|
| 180 |
+
|
| 181 |
+
except Exception as e:
|
| 182 |
+
logger.error(f"Failed to update features: {e}")
|
| 183 |
+
return False
|
| 184 |
+
|
| 185 |
+
def retrain_model(self, incremental: bool = True) -> Dict:
|
| 186 |
+
"""
|
| 187 |
+
Retrain the game prediction model.
|
| 188 |
+
|
| 189 |
+
Args:
|
| 190 |
+
incremental: If True, use warm start from existing model.
|
| 191 |
+
If False, train from scratch.
|
| 192 |
+
|
| 193 |
+
Returns:
|
| 194 |
+
Dict with training metrics
|
| 195 |
+
"""
|
| 196 |
+
logger.info(f"Retraining model (incremental={incremental})...")
|
| 197 |
+
|
| 198 |
+
try:
|
| 199 |
+
import joblib
|
| 200 |
+
|
| 201 |
+
# Load dataset
|
| 202 |
+
dataset_path = PROCESSED_DATA_DIR / "game_dataset.joblib"
|
| 203 |
+
if not dataset_path.exists():
|
| 204 |
+
logger.error("Dataset not found. Run preprocessing first.")
|
| 205 |
+
return {"error": "Dataset not found"}
|
| 206 |
+
|
| 207 |
+
dataset = joblib.load(dataset_path)
|
| 208 |
+
|
| 209 |
+
# Train model
|
| 210 |
+
metrics = train_game_predictor(dataset)
|
| 211 |
+
|
| 212 |
+
logger.info(f"Model retrained. Accuracy: {metrics.get('test_accuracy', 0):.2%}")
|
| 213 |
+
return metrics
|
| 214 |
+
|
| 215 |
+
except Exception as e:
|
| 216 |
+
logger.error(f"Failed to retrain model: {e}")
|
| 217 |
+
return {"error": str(e)}
|
| 218 |
+
|
| 219 |
+
def run_update_cycle(self, retrain: bool = True) -> Dict:
|
| 220 |
+
"""
|
| 221 |
+
Full update cycle: ingest -> features -> retrain.
|
| 222 |
+
|
| 223 |
+
Args:
|
| 224 |
+
retrain: Whether to retrain model after updating data
|
| 225 |
+
|
| 226 |
+
Returns:
|
| 227 |
+
Dict with cycle results
|
| 228 |
+
"""
|
| 229 |
+
logger.info("Starting continuous learning update cycle...")
|
| 230 |
+
|
| 231 |
+
results = {
|
| 232 |
+
"timestamp": datetime.now().isoformat(),
|
| 233 |
+
"games_ingested": 0,
|
| 234 |
+
"features_updated": False,
|
| 235 |
+
"model_retrained": False,
|
| 236 |
+
"metrics": {}
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
# Step 1: Ingest completed games
|
| 240 |
+
games_ingested = self.ingest_completed_games()
|
| 241 |
+
results["games_ingested"] = games_ingested
|
| 242 |
+
|
| 243 |
+
if games_ingested == 0:
|
| 244 |
+
logger.info("No new games to process, skipping update")
|
| 245 |
+
return results
|
| 246 |
+
|
| 247 |
+
# Step 2: Update features
|
| 248 |
+
features_updated = self.update_features()
|
| 249 |
+
results["features_updated"] = features_updated
|
| 250 |
+
|
| 251 |
+
if not features_updated:
|
| 252 |
+
logger.warning("Feature update failed, skipping retrain")
|
| 253 |
+
return results
|
| 254 |
+
|
| 255 |
+
# Step 3: Retrain model (if requested)
|
| 256 |
+
if retrain:
|
| 257 |
+
# Rebuild dataset
|
| 258 |
+
preprocessor = DataPreprocessor()
|
| 259 |
+
preprocessor.build_dataset()
|
| 260 |
+
|
| 261 |
+
# Retrain
|
| 262 |
+
metrics = self.retrain_model(incremental=True)
|
| 263 |
+
results["model_retrained"] = "error" not in metrics
|
| 264 |
+
results["metrics"] = metrics
|
| 265 |
+
|
| 266 |
+
logger.info("Update cycle complete")
|
| 267 |
+
return results
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
# =============================================================================
|
| 271 |
+
# CLI INTERFACE
|
| 272 |
+
# =============================================================================
|
| 273 |
+
if __name__ == "__main__":
|
| 274 |
+
import argparse
|
| 275 |
+
|
| 276 |
+
logging.basicConfig(level=logging.INFO)
|
| 277 |
+
|
| 278 |
+
parser = argparse.ArgumentParser(description="NBA Continuous Learning System")
|
| 279 |
+
parser.add_argument("--ingest", action="store_true", help="Ingest completed games only")
|
| 280 |
+
parser.add_argument("--update", action="store_true", help="Full update cycle")
|
| 281 |
+
parser.add_argument("--no-retrain", action="store_true", help="Skip model retraining")
|
| 282 |
+
|
| 283 |
+
args = parser.parse_args()
|
| 284 |
+
|
| 285 |
+
learner = ContinuousLearner()
|
| 286 |
+
|
| 287 |
+
if args.ingest:
|
| 288 |
+
print("\n=== Ingesting Completed Games ===\n")
|
| 289 |
+
count = learner.ingest_completed_games()
|
| 290 |
+
print(f"Ingested {count} new games")
|
| 291 |
+
|
| 292 |
+
elif args.update:
|
| 293 |
+
print("\n=== Running Update Cycle ===\n")
|
| 294 |
+
results = learner.run_update_cycle(retrain=not args.no_retrain)
|
| 295 |
+
print(f"Games ingested: {results['games_ingested']}")
|
| 296 |
+
print(f"Features updated: {results['features_updated']}")
|
| 297 |
+
print(f"Model retrained: {results['model_retrained']}")
|
| 298 |
+
if results['metrics']:
|
| 299 |
+
print(f"Test accuracy: {results['metrics'].get('test_accuracy', 'N/A')}")
|
| 300 |
+
|
| 301 |
+
else:
|
| 302 |
+
print("Use --ingest or --update")
|
| 303 |
+
print("\nUsage:")
|
| 304 |
+
print(" python -m src.continuous_learner --ingest")
|
| 305 |
+
print(" python -m src.continuous_learner --update")
|
| 306 |
+
print(" python -m src.continuous_learner --update --no-retrain")
|
src/data_collector.py
ADDED
|
@@ -0,0 +1,649 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NBA ML Prediction System - Comprehensive Data Collector
|
| 3 |
+
========================================================
|
| 4 |
+
Full data collection from NBA API with all available endpoints:
|
| 5 |
+
- Games, Team Stats, Player Stats (basic)
|
| 6 |
+
- Advanced Metrics, Clutch Stats, Hustle Stats
|
| 7 |
+
- Box Scores, Standings, Play Types
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import time
|
| 11 |
+
import json
|
| 12 |
+
import pandas as pd
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
from datetime import datetime
|
| 15 |
+
from typing import Optional, List, Dict, Any
|
| 16 |
+
from tqdm import tqdm
|
| 17 |
+
import logging
|
| 18 |
+
|
| 19 |
+
from nba_api.stats.endpoints import (
|
| 20 |
+
# Basic endpoints
|
| 21 |
+
leaguegamefinder,
|
| 22 |
+
leaguestandings,
|
| 23 |
+
leaguedashteamstats,
|
| 24 |
+
leaguedashplayerstats,
|
| 25 |
+
playergamelog,
|
| 26 |
+
boxscoretraditionalv2,
|
| 27 |
+
boxscoreadvancedv2,
|
| 28 |
+
leagueleaders,
|
| 29 |
+
# Advanced endpoints
|
| 30 |
+
teamestimatedmetrics,
|
| 31 |
+
playerestimatedmetrics,
|
| 32 |
+
leaguedashteamclutch,
|
| 33 |
+
leaguedashplayerclutch,
|
| 34 |
+
leaguehustlestatsteam,
|
| 35 |
+
leaguehustlestatsplayer,
|
| 36 |
+
leaguedashptteamdefend,
|
| 37 |
+
leaguedashptstats,
|
| 38 |
+
leaguestandingsv3,
|
| 39 |
+
teamyearbyyearstats,
|
| 40 |
+
# Box score variants
|
| 41 |
+
boxscoremiscv2,
|
| 42 |
+
boxscorescoringv2,
|
| 43 |
+
boxscoreusagev2,
|
| 44 |
+
# Shooting
|
| 45 |
+
leaguedashteamptshot,
|
| 46 |
+
leaguedashplayerptshot,
|
| 47 |
+
)
|
| 48 |
+
from nba_api.stats.static import teams, players
|
| 49 |
+
|
| 50 |
+
from src.config import (
|
| 51 |
+
API_CONFIG,
|
| 52 |
+
SEASON_STRINGS,
|
| 53 |
+
API_CACHE_DIR,
|
| 54 |
+
RAW_DATA_DIR,
|
| 55 |
+
NBA_TEAMS
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
# Setup logging
|
| 59 |
+
logging.basicConfig(level=logging.INFO)
|
| 60 |
+
logger = logging.getLogger(__name__)
|
| 61 |
+
|
| 62 |
+
# =============================================================================
|
| 63 |
+
# RETRY DECORATOR WITH EXPONENTIAL BACKOFF
|
| 64 |
+
# =============================================================================
|
| 65 |
+
def retry_with_backoff(func):
|
| 66 |
+
"""Decorator to retry API calls with exponential backoff."""
|
| 67 |
+
def wrapper(*args, **kwargs):
|
| 68 |
+
backoff = API_CONFIG.initial_backoff
|
| 69 |
+
last_exception = None
|
| 70 |
+
|
| 71 |
+
for attempt in range(API_CONFIG.max_retries + 1):
|
| 72 |
+
try:
|
| 73 |
+
time.sleep(API_CONFIG.base_delay)
|
| 74 |
+
return func(*args, **kwargs)
|
| 75 |
+
except Exception as e:
|
| 76 |
+
last_exception = e
|
| 77 |
+
if attempt < API_CONFIG.max_retries:
|
| 78 |
+
logger.warning(f"Attempt {attempt + 1} failed: {e}. Retrying in {backoff}s...")
|
| 79 |
+
time.sleep(backoff)
|
| 80 |
+
backoff = min(backoff * API_CONFIG.backoff_multiplier, API_CONFIG.max_backoff)
|
| 81 |
+
else:
|
| 82 |
+
logger.error(f"All {API_CONFIG.max_retries + 1} attempts failed for {func.__name__}")
|
| 83 |
+
|
| 84 |
+
raise last_exception
|
| 85 |
+
return wrapper
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
# =============================================================================
|
| 89 |
+
# CACHE MANAGER
|
| 90 |
+
# =============================================================================
|
| 91 |
+
class CacheManager:
|
| 92 |
+
"""Manages caching of API responses with per-endpoint, per-season storage."""
|
| 93 |
+
|
| 94 |
+
def __init__(self, cache_dir: Path = API_CACHE_DIR):
|
| 95 |
+
self.cache_dir = cache_dir
|
| 96 |
+
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
| 97 |
+
self.checkpoint_file = cache_dir / "checkpoint.json"
|
| 98 |
+
|
| 99 |
+
def get_cache_path(self, endpoint: str, season: str, entity_id: Optional[str] = None) -> Path:
|
| 100 |
+
if entity_id:
|
| 101 |
+
return self.cache_dir / f"{endpoint}_{season}_{entity_id}.parquet"
|
| 102 |
+
return self.cache_dir / f"{endpoint}_{season}.parquet"
|
| 103 |
+
|
| 104 |
+
def is_cached(self, endpoint: str, season: str, entity_id: Optional[str] = None) -> bool:
|
| 105 |
+
return self.get_cache_path(endpoint, season, entity_id).exists()
|
| 106 |
+
|
| 107 |
+
def load_cached(self, endpoint: str, season: str, entity_id: Optional[str] = None) -> Optional[pd.DataFrame]:
|
| 108 |
+
cache_path = self.get_cache_path(endpoint, season, entity_id)
|
| 109 |
+
if cache_path.exists():
|
| 110 |
+
return pd.read_parquet(cache_path)
|
| 111 |
+
return None
|
| 112 |
+
|
| 113 |
+
def save_to_cache(self, df: pd.DataFrame, endpoint: str, season: str, entity_id: Optional[str] = None):
|
| 114 |
+
cache_path = self.get_cache_path(endpoint, season, entity_id)
|
| 115 |
+
df.to_parquet(cache_path, index=False)
|
| 116 |
+
logger.info(f"Cached {len(df)} rows to {cache_path.name}")
|
| 117 |
+
|
| 118 |
+
def load_checkpoint(self) -> Dict[str, Any]:
|
| 119 |
+
if self.checkpoint_file.exists():
|
| 120 |
+
with open(self.checkpoint_file, 'r') as f:
|
| 121 |
+
return json.load(f)
|
| 122 |
+
return {"completed_seasons": [], "last_endpoint": None, "last_season": None}
|
| 123 |
+
|
| 124 |
+
def save_checkpoint(self, checkpoint: Dict[str, Any]):
|
| 125 |
+
with open(self.checkpoint_file, 'w') as f:
|
| 126 |
+
json.dump(checkpoint, f, indent=2)
|
| 127 |
+
|
| 128 |
+
def mark_season_complete(self, endpoint: str, season: str):
|
| 129 |
+
checkpoint = self.load_checkpoint()
|
| 130 |
+
key = f"{endpoint}_{season}"
|
| 131 |
+
if key not in checkpoint["completed_seasons"]:
|
| 132 |
+
checkpoint["completed_seasons"].append(key)
|
| 133 |
+
checkpoint["last_endpoint"] = endpoint
|
| 134 |
+
checkpoint["last_season"] = season
|
| 135 |
+
self.save_checkpoint(checkpoint)
|
| 136 |
+
|
| 137 |
+
def is_season_complete(self, endpoint: str, season: str) -> bool:
|
| 138 |
+
checkpoint = self.load_checkpoint()
|
| 139 |
+
return f"{endpoint}_{season}" in checkpoint["completed_seasons"]
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
# =============================================================================
|
| 143 |
+
# GAME DATA COLLECTOR
|
| 144 |
+
# =============================================================================
|
| 145 |
+
class GameDataCollector:
|
| 146 |
+
"""Collects game-level data."""
|
| 147 |
+
|
| 148 |
+
def __init__(self):
|
| 149 |
+
self.cache = CacheManager()
|
| 150 |
+
|
| 151 |
+
@retry_with_backoff
|
| 152 |
+
def _fetch_season_games(self, season: str) -> pd.DataFrame:
|
| 153 |
+
games = leaguegamefinder.LeagueGameFinder(
|
| 154 |
+
season_nullable=season,
|
| 155 |
+
league_id_nullable="00"
|
| 156 |
+
)
|
| 157 |
+
return games.get_data_frames()[0]
|
| 158 |
+
|
| 159 |
+
def get_season_games(self, season: str, force_refresh: bool = False) -> pd.DataFrame:
|
| 160 |
+
if not force_refresh and self.cache.is_cached("games", season):
|
| 161 |
+
logger.info(f"Loading cached games for {season}")
|
| 162 |
+
return self.cache.load_cached("games", season)
|
| 163 |
+
|
| 164 |
+
logger.info(f"Fetching games for {season} from API...")
|
| 165 |
+
df = self._fetch_season_games(season)
|
| 166 |
+
self.cache.save_to_cache(df, "games", season)
|
| 167 |
+
self.cache.mark_season_complete("games", season)
|
| 168 |
+
return df
|
| 169 |
+
|
| 170 |
+
def collect_all_seasons(self, seasons: List[str] = None, force_refresh: bool = False) -> pd.DataFrame:
|
| 171 |
+
if seasons is None:
|
| 172 |
+
seasons = SEASON_STRINGS
|
| 173 |
+
|
| 174 |
+
all_games = []
|
| 175 |
+
for season in tqdm(seasons, desc="Collecting games"):
|
| 176 |
+
if not force_refresh and self.cache.is_season_complete("games", season):
|
| 177 |
+
df = self.cache.load_cached("games", season)
|
| 178 |
+
else:
|
| 179 |
+
df = self.get_season_games(season, force_refresh)
|
| 180 |
+
all_games.append(df)
|
| 181 |
+
|
| 182 |
+
combined = pd.concat(all_games, ignore_index=True)
|
| 183 |
+
combined.to_parquet(RAW_DATA_DIR / "all_games.parquet", index=False)
|
| 184 |
+
logger.info(f"Saved {len(combined)} total games to all_games.parquet")
|
| 185 |
+
return combined
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
# =============================================================================
|
| 189 |
+
# TEAM DATA COLLECTOR (ENHANCED)
|
| 190 |
+
# =============================================================================
|
| 191 |
+
class TeamDataCollector:
|
| 192 |
+
"""Collects comprehensive team statistics."""
|
| 193 |
+
|
| 194 |
+
def __init__(self):
|
| 195 |
+
self.cache = CacheManager()
|
| 196 |
+
|
| 197 |
+
@retry_with_backoff
|
| 198 |
+
def _fetch_team_stats(self, season: str) -> pd.DataFrame:
|
| 199 |
+
stats = leaguedashteamstats.LeagueDashTeamStats(
|
| 200 |
+
season=season,
|
| 201 |
+
per_mode_detailed="PerGame"
|
| 202 |
+
)
|
| 203 |
+
return stats.get_data_frames()[0]
|
| 204 |
+
|
| 205 |
+
@retry_with_backoff
|
| 206 |
+
def _fetch_team_advanced(self, season: str) -> pd.DataFrame:
|
| 207 |
+
"""Fetch advanced team metrics: NET_RTG, PACE, PIE, TS%, eFG%"""
|
| 208 |
+
try:
|
| 209 |
+
stats = teamestimatedmetrics.TeamEstimatedMetrics(season=season)
|
| 210 |
+
return stats.get_data_frames()[0]
|
| 211 |
+
except Exception as e:
|
| 212 |
+
logger.warning(f"TeamEstimatedMetrics failed for {season}: {e}")
|
| 213 |
+
return pd.DataFrame()
|
| 214 |
+
|
| 215 |
+
@retry_with_backoff
|
| 216 |
+
def _fetch_team_clutch(self, season: str) -> pd.DataFrame:
|
| 217 |
+
"""Fetch clutch stats: performance in close games"""
|
| 218 |
+
try:
|
| 219 |
+
stats = leaguedashteamclutch.LeagueDashTeamClutch(
|
| 220 |
+
season=season,
|
| 221 |
+
clutch_time="Last 5 Minutes",
|
| 222 |
+
point_diff=5
|
| 223 |
+
)
|
| 224 |
+
return stats.get_data_frames()[0]
|
| 225 |
+
except Exception as e:
|
| 226 |
+
logger.warning(f"Team clutch stats failed for {season}: {e}")
|
| 227 |
+
return pd.DataFrame()
|
| 228 |
+
|
| 229 |
+
@retry_with_backoff
|
| 230 |
+
def _fetch_team_hustle(self, season: str) -> pd.DataFrame:
|
| 231 |
+
"""Fetch hustle stats: deflections, loose balls, charges"""
|
| 232 |
+
try:
|
| 233 |
+
stats = leaguehustlestatsteam.LeagueHustleStatsTeam(season=season)
|
| 234 |
+
return stats.get_data_frames()[0]
|
| 235 |
+
except Exception as e:
|
| 236 |
+
logger.warning(f"Team hustle stats failed for {season}: {e}")
|
| 237 |
+
return pd.DataFrame()
|
| 238 |
+
|
| 239 |
+
@retry_with_backoff
|
| 240 |
+
def _fetch_team_defense(self, season: str) -> pd.DataFrame:
|
| 241 |
+
"""Fetch defensive stats"""
|
| 242 |
+
try:
|
| 243 |
+
stats = leaguedashptteamdefend.LeagueDashPtTeamDefend(
|
| 244 |
+
season=season,
|
| 245 |
+
defense_category="Overall"
|
| 246 |
+
)
|
| 247 |
+
return stats.get_data_frames()[0]
|
| 248 |
+
except Exception as e:
|
| 249 |
+
logger.warning(f"Team defense stats failed for {season}: {e}")
|
| 250 |
+
return pd.DataFrame()
|
| 251 |
+
|
| 252 |
+
@retry_with_backoff
|
| 253 |
+
def _fetch_team_shooting(self, season: str) -> pd.DataFrame:
|
| 254 |
+
"""Fetch team shooting stats"""
|
| 255 |
+
try:
|
| 256 |
+
stats = leaguedashteamptshot.LeagueDashTeamPtShot(season=season)
|
| 257 |
+
return stats.get_data_frames()[0]
|
| 258 |
+
except Exception as e:
|
| 259 |
+
logger.warning(f"Team shooting stats failed for {season}: {e}")
|
| 260 |
+
return pd.DataFrame()
|
| 261 |
+
|
| 262 |
+
@retry_with_backoff
|
| 263 |
+
def _fetch_standings(self, season: str) -> pd.DataFrame:
|
| 264 |
+
"""Fetch standings with win streaks"""
|
| 265 |
+
try:
|
| 266 |
+
standings = leaguestandingsv3.LeagueStandingsV3(
|
| 267 |
+
season=season,
|
| 268 |
+
league_id="00"
|
| 269 |
+
)
|
| 270 |
+
return standings.get_data_frames()[0]
|
| 271 |
+
except Exception as e:
|
| 272 |
+
logger.warning(f"Standings failed for {season}: {e}")
|
| 273 |
+
return pd.DataFrame()
|
| 274 |
+
|
| 275 |
+
def get_team_stats(self, season: str, force_refresh: bool = False) -> pd.DataFrame:
|
| 276 |
+
if not force_refresh and self.cache.is_cached("team_stats", season):
|
| 277 |
+
logger.info(f"Loading cached team stats for {season}")
|
| 278 |
+
return self.cache.load_cached("team_stats", season)
|
| 279 |
+
|
| 280 |
+
logger.info(f"Fetching team stats for {season}...")
|
| 281 |
+
df = self._fetch_team_stats(season)
|
| 282 |
+
self.cache.save_to_cache(df, "team_stats", season)
|
| 283 |
+
return df
|
| 284 |
+
|
| 285 |
+
def get_team_advanced(self, season: str, force_refresh: bool = False) -> pd.DataFrame:
|
| 286 |
+
if not force_refresh and self.cache.is_cached("team_advanced", season):
|
| 287 |
+
return self.cache.load_cached("team_advanced", season)
|
| 288 |
+
|
| 289 |
+
logger.info(f"Fetching team advanced metrics for {season}...")
|
| 290 |
+
df = self._fetch_team_advanced(season)
|
| 291 |
+
if not df.empty:
|
| 292 |
+
self.cache.save_to_cache(df, "team_advanced", season)
|
| 293 |
+
return df
|
| 294 |
+
|
| 295 |
+
def get_team_clutch(self, season: str, force_refresh: bool = False) -> pd.DataFrame:
|
| 296 |
+
if not force_refresh and self.cache.is_cached("team_clutch", season):
|
| 297 |
+
return self.cache.load_cached("team_clutch", season)
|
| 298 |
+
|
| 299 |
+
logger.info(f"Fetching team clutch stats for {season}...")
|
| 300 |
+
df = self._fetch_team_clutch(season)
|
| 301 |
+
if not df.empty:
|
| 302 |
+
self.cache.save_to_cache(df, "team_clutch", season)
|
| 303 |
+
return df
|
| 304 |
+
|
| 305 |
+
def get_team_hustle(self, season: str, force_refresh: bool = False) -> pd.DataFrame:
|
| 306 |
+
if not force_refresh and self.cache.is_cached("team_hustle", season):
|
| 307 |
+
return self.cache.load_cached("team_hustle", season)
|
| 308 |
+
|
| 309 |
+
logger.info(f"Fetching team hustle stats for {season}...")
|
| 310 |
+
df = self._fetch_team_hustle(season)
|
| 311 |
+
if not df.empty:
|
| 312 |
+
self.cache.save_to_cache(df, "team_hustle", season)
|
| 313 |
+
return df
|
| 314 |
+
|
| 315 |
+
def get_team_defense(self, season: str, force_refresh: bool = False) -> pd.DataFrame:
|
| 316 |
+
if not force_refresh and self.cache.is_cached("team_defense", season):
|
| 317 |
+
return self.cache.load_cached("team_defense", season)
|
| 318 |
+
|
| 319 |
+
logger.info(f"Fetching team defense stats for {season}...")
|
| 320 |
+
df = self._fetch_team_defense(season)
|
| 321 |
+
if not df.empty:
|
| 322 |
+
self.cache.save_to_cache(df, "team_defense", season)
|
| 323 |
+
return df
|
| 324 |
+
|
| 325 |
+
def get_standings(self, season: str, force_refresh: bool = False) -> pd.DataFrame:
|
| 326 |
+
if not force_refresh and self.cache.is_cached("standings", season):
|
| 327 |
+
return self.cache.load_cached("standings", season)
|
| 328 |
+
|
| 329 |
+
logger.info(f"Fetching standings for {season}...")
|
| 330 |
+
df = self._fetch_standings(season)
|
| 331 |
+
if not df.empty:
|
| 332 |
+
self.cache.save_to_cache(df, "standings", season)
|
| 333 |
+
return df
|
| 334 |
+
|
| 335 |
+
def collect_all_seasons(self, seasons: List[str] = None, force_refresh: bool = False) -> Dict[str, pd.DataFrame]:
|
| 336 |
+
if seasons is None:
|
| 337 |
+
seasons = SEASON_STRINGS
|
| 338 |
+
|
| 339 |
+
results = {
|
| 340 |
+
"team_stats": [],
|
| 341 |
+
"team_advanced": [],
|
| 342 |
+
"team_clutch": [],
|
| 343 |
+
"team_hustle": [],
|
| 344 |
+
"team_defense": [],
|
| 345 |
+
"standings": []
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
for season in tqdm(seasons, desc="Collecting team data"):
|
| 349 |
+
# Basic stats
|
| 350 |
+
df = self.get_team_stats(season, force_refresh)
|
| 351 |
+
df["SEASON"] = season
|
| 352 |
+
results["team_stats"].append(df)
|
| 353 |
+
|
| 354 |
+
# Advanced metrics
|
| 355 |
+
df = self.get_team_advanced(season, force_refresh)
|
| 356 |
+
if not df.empty:
|
| 357 |
+
df["SEASON"] = season
|
| 358 |
+
results["team_advanced"].append(df)
|
| 359 |
+
|
| 360 |
+
# Clutch stats
|
| 361 |
+
df = self.get_team_clutch(season, force_refresh)
|
| 362 |
+
if not df.empty:
|
| 363 |
+
df["SEASON"] = season
|
| 364 |
+
results["team_clutch"].append(df)
|
| 365 |
+
|
| 366 |
+
# Hustle stats
|
| 367 |
+
df = self.get_team_hustle(season, force_refresh)
|
| 368 |
+
if not df.empty:
|
| 369 |
+
df["SEASON"] = season
|
| 370 |
+
results["team_hustle"].append(df)
|
| 371 |
+
|
| 372 |
+
# Defense stats
|
| 373 |
+
df = self.get_team_defense(season, force_refresh)
|
| 374 |
+
if not df.empty:
|
| 375 |
+
df["SEASON"] = season
|
| 376 |
+
results["team_defense"].append(df)
|
| 377 |
+
|
| 378 |
+
# Standings
|
| 379 |
+
df = self.get_standings(season, force_refresh)
|
| 380 |
+
if not df.empty:
|
| 381 |
+
df["SEASON"] = season
|
| 382 |
+
results["standings"].append(df)
|
| 383 |
+
|
| 384 |
+
# Save all combined data
|
| 385 |
+
for key, data in results.items():
|
| 386 |
+
if data:
|
| 387 |
+
combined = pd.concat(data, ignore_index=True)
|
| 388 |
+
combined.to_parquet(RAW_DATA_DIR / f"all_{key}.parquet", index=False)
|
| 389 |
+
logger.info(f"Saved {len(combined)} rows to all_{key}.parquet")
|
| 390 |
+
|
| 391 |
+
return results
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
# =============================================================================
|
| 395 |
+
# PLAYER DATA COLLECTOR (ENHANCED)
|
| 396 |
+
# =============================================================================
|
| 397 |
+
class PlayerDataCollector:
|
| 398 |
+
"""Collects comprehensive player statistics."""
|
| 399 |
+
|
| 400 |
+
def __init__(self):
|
| 401 |
+
self.cache = CacheManager()
|
| 402 |
+
|
| 403 |
+
@retry_with_backoff
|
| 404 |
+
def _fetch_player_stats(self, season: str) -> pd.DataFrame:
|
| 405 |
+
stats = leaguedashplayerstats.LeagueDashPlayerStats(
|
| 406 |
+
season=season,
|
| 407 |
+
per_mode_detailed="PerGame"
|
| 408 |
+
)
|
| 409 |
+
return stats.get_data_frames()[0]
|
| 410 |
+
|
| 411 |
+
@retry_with_backoff
|
| 412 |
+
def _fetch_player_advanced(self, season: str) -> pd.DataFrame:
|
| 413 |
+
"""Fetch advanced player metrics: PER, USG%, TS%, eFG%"""
|
| 414 |
+
try:
|
| 415 |
+
stats = playerestimatedmetrics.PlayerEstimatedMetrics(season=season)
|
| 416 |
+
return stats.get_data_frames()[0]
|
| 417 |
+
except Exception as e:
|
| 418 |
+
logger.warning(f"PlayerEstimatedMetrics failed for {season}: {e}")
|
| 419 |
+
return pd.DataFrame()
|
| 420 |
+
|
| 421 |
+
@retry_with_backoff
|
| 422 |
+
def _fetch_player_clutch(self, season: str) -> pd.DataFrame:
|
| 423 |
+
"""Fetch player clutch stats"""
|
| 424 |
+
try:
|
| 425 |
+
stats = leaguedashplayerclutch.LeagueDashPlayerClutch(
|
| 426 |
+
season=season,
|
| 427 |
+
clutch_time="Last 5 Minutes",
|
| 428 |
+
point_diff=5
|
| 429 |
+
)
|
| 430 |
+
return stats.get_data_frames()[0]
|
| 431 |
+
except Exception as e:
|
| 432 |
+
logger.warning(f"Player clutch stats failed for {season}: {e}")
|
| 433 |
+
return pd.DataFrame()
|
| 434 |
+
|
| 435 |
+
@retry_with_backoff
|
| 436 |
+
def _fetch_player_hustle(self, season: str) -> pd.DataFrame:
|
| 437 |
+
"""Fetch player hustle stats"""
|
| 438 |
+
try:
|
| 439 |
+
stats = leaguehustlestatsplayer.LeagueHustleStatsPlayer(season=season)
|
| 440 |
+
return stats.get_data_frames()[0]
|
| 441 |
+
except Exception as e:
|
| 442 |
+
logger.warning(f"Player hustle stats failed for {season}: {e}")
|
| 443 |
+
return pd.DataFrame()
|
| 444 |
+
|
| 445 |
+
@retry_with_backoff
|
| 446 |
+
def _fetch_player_shooting(self, season: str) -> pd.DataFrame:
|
| 447 |
+
"""Fetch player shooting stats"""
|
| 448 |
+
try:
|
| 449 |
+
stats = leaguedashplayerptshot.LeagueDashPlayerPtShot(season=season)
|
| 450 |
+
return stats.get_data_frames()[0]
|
| 451 |
+
except Exception as e:
|
| 452 |
+
logger.warning(f"Player shooting stats failed for {season}: {e}")
|
| 453 |
+
return pd.DataFrame()
|
| 454 |
+
|
| 455 |
+
def get_player_stats(self, season: str, force_refresh: bool = False) -> pd.DataFrame:
|
| 456 |
+
if not force_refresh and self.cache.is_cached("player_stats", season):
|
| 457 |
+
logger.info(f"Loading cached player stats for {season}")
|
| 458 |
+
return self.cache.load_cached("player_stats", season)
|
| 459 |
+
|
| 460 |
+
logger.info(f"Fetching player stats for {season}...")
|
| 461 |
+
df = self._fetch_player_stats(season)
|
| 462 |
+
self.cache.save_to_cache(df, "player_stats", season)
|
| 463 |
+
return df
|
| 464 |
+
|
| 465 |
+
def get_player_advanced(self, season: str, force_refresh: bool = False) -> pd.DataFrame:
|
| 466 |
+
if not force_refresh and self.cache.is_cached("player_advanced", season):
|
| 467 |
+
return self.cache.load_cached("player_advanced", season)
|
| 468 |
+
|
| 469 |
+
logger.info(f"Fetching player advanced metrics for {season}...")
|
| 470 |
+
df = self._fetch_player_advanced(season)
|
| 471 |
+
if not df.empty:
|
| 472 |
+
self.cache.save_to_cache(df, "player_advanced", season)
|
| 473 |
+
return df
|
| 474 |
+
|
| 475 |
+
def get_player_clutch(self, season: str, force_refresh: bool = False) -> pd.DataFrame:
|
| 476 |
+
if not force_refresh and self.cache.is_cached("player_clutch", season):
|
| 477 |
+
return self.cache.load_cached("player_clutch", season)
|
| 478 |
+
|
| 479 |
+
logger.info(f"Fetching player clutch stats for {season}...")
|
| 480 |
+
df = self._fetch_player_clutch(season)
|
| 481 |
+
if not df.empty:
|
| 482 |
+
self.cache.save_to_cache(df, "player_clutch", season)
|
| 483 |
+
return df
|
| 484 |
+
|
| 485 |
+
def get_player_hustle(self, season: str, force_refresh: bool = False) -> pd.DataFrame:
|
| 486 |
+
if not force_refresh and self.cache.is_cached("player_hustle", season):
|
| 487 |
+
return self.cache.load_cached("player_hustle", season)
|
| 488 |
+
|
| 489 |
+
logger.info(f"Fetching player hustle stats for {season}...")
|
| 490 |
+
df = self._fetch_player_hustle(season)
|
| 491 |
+
if not df.empty:
|
| 492 |
+
self.cache.save_to_cache(df, "player_hustle", season)
|
| 493 |
+
return df
|
| 494 |
+
|
| 495 |
+
def collect_all_seasons(self, seasons: List[str] = None, force_refresh: bool = False) -> Dict[str, pd.DataFrame]:
|
| 496 |
+
if seasons is None:
|
| 497 |
+
seasons = SEASON_STRINGS
|
| 498 |
+
|
| 499 |
+
results = {
|
| 500 |
+
"player_stats": [],
|
| 501 |
+
"player_advanced": [],
|
| 502 |
+
"player_clutch": [],
|
| 503 |
+
"player_hustle": []
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
for season in tqdm(seasons, desc="Collecting player data"):
|
| 507 |
+
# Basic stats
|
| 508 |
+
df = self.get_player_stats(season, force_refresh)
|
| 509 |
+
df["SEASON"] = season
|
| 510 |
+
results["player_stats"].append(df)
|
| 511 |
+
|
| 512 |
+
# Advanced metrics
|
| 513 |
+
df = self.get_player_advanced(season, force_refresh)
|
| 514 |
+
if not df.empty:
|
| 515 |
+
df["SEASON"] = season
|
| 516 |
+
results["player_advanced"].append(df)
|
| 517 |
+
|
| 518 |
+
# Clutch stats
|
| 519 |
+
df = self.get_player_clutch(season, force_refresh)
|
| 520 |
+
if not df.empty:
|
| 521 |
+
df["SEASON"] = season
|
| 522 |
+
results["player_clutch"].append(df)
|
| 523 |
+
|
| 524 |
+
# Hustle stats
|
| 525 |
+
df = self.get_player_hustle(season, force_refresh)
|
| 526 |
+
if not df.empty:
|
| 527 |
+
df["SEASON"] = season
|
| 528 |
+
results["player_hustle"].append(df)
|
| 529 |
+
|
| 530 |
+
# Save all combined data
|
| 531 |
+
for key, data in results.items():
|
| 532 |
+
if data:
|
| 533 |
+
combined = pd.concat(data, ignore_index=True)
|
| 534 |
+
combined.to_parquet(RAW_DATA_DIR / f"all_{key}.parquet", index=False)
|
| 535 |
+
logger.info(f"Saved {len(combined)} rows to all_{key}.parquet")
|
| 536 |
+
|
| 537 |
+
return results
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
# =============================================================================
|
| 541 |
+
# LEAGUE LEADERS COLLECTOR
|
| 542 |
+
# =============================================================================
|
| 543 |
+
class LeagueLeadersCollector:
|
| 544 |
+
"""Collects league leaders data."""
|
| 545 |
+
|
| 546 |
+
def __init__(self):
|
| 547 |
+
self.cache = CacheManager()
|
| 548 |
+
|
| 549 |
+
@retry_with_backoff
|
| 550 |
+
def _fetch_leaders(self, season: str, stat_category: str = "PTS") -> pd.DataFrame:
|
| 551 |
+
leaders = leagueleaders.LeagueLeaders(
|
| 552 |
+
season=season,
|
| 553 |
+
stat_category_abbreviation=stat_category
|
| 554 |
+
)
|
| 555 |
+
return leaders.get_data_frames()[0]
|
| 556 |
+
|
| 557 |
+
def get_leaders(self, season: str, force_refresh: bool = False) -> pd.DataFrame:
|
| 558 |
+
if not force_refresh and self.cache.is_cached("leaders", season):
|
| 559 |
+
return self.cache.load_cached("leaders", season)
|
| 560 |
+
|
| 561 |
+
logger.info(f"Fetching league leaders for {season}...")
|
| 562 |
+
df = self._fetch_leaders(season)
|
| 563 |
+
self.cache.save_to_cache(df, "leaders", season)
|
| 564 |
+
return df
|
| 565 |
+
|
| 566 |
+
|
| 567 |
+
# =============================================================================
|
| 568 |
+
# MASTER COLLECTOR
|
| 569 |
+
# =============================================================================
|
| 570 |
+
class NBADataCollector:
|
| 571 |
+
"""Master collector that orchestrates ALL data collection."""
|
| 572 |
+
|
| 573 |
+
def __init__(self):
|
| 574 |
+
self.games = GameDataCollector()
|
| 575 |
+
self.teams = TeamDataCollector()
|
| 576 |
+
self.players = PlayerDataCollector()
|
| 577 |
+
self.leaders = LeagueLeadersCollector()
|
| 578 |
+
self.cache = CacheManager()
|
| 579 |
+
|
| 580 |
+
def collect_all(self, seasons: List[str] = None, force_refresh: bool = False):
|
| 581 |
+
"""Collect ALL data for specified seasons."""
|
| 582 |
+
if seasons is None:
|
| 583 |
+
seasons = SEASON_STRINGS
|
| 584 |
+
|
| 585 |
+
logger.info(f"Starting COMPREHENSIVE data collection for {len(seasons)} seasons...")
|
| 586 |
+
logger.info(f"Seasons: {seasons[0]} to {seasons[-1]}")
|
| 587 |
+
logger.info("This will take several hours. Data is cached, so it can resume if interrupted.\n")
|
| 588 |
+
|
| 589 |
+
# Collect games
|
| 590 |
+
logger.info("=" * 50)
|
| 591 |
+
logger.info("=== PHASE 1: Collecting Games ===")
|
| 592 |
+
logger.info("=" * 50)
|
| 593 |
+
self.games.collect_all_seasons(seasons, force_refresh)
|
| 594 |
+
|
| 595 |
+
# Collect team stats (all types)
|
| 596 |
+
logger.info("\n" + "=" * 50)
|
| 597 |
+
logger.info("=== PHASE 2: Collecting Team Stats (6 data types) ===")
|
| 598 |
+
logger.info("=" * 50)
|
| 599 |
+
self.teams.collect_all_seasons(seasons, force_refresh)
|
| 600 |
+
|
| 601 |
+
# Collect player stats (all types)
|
| 602 |
+
logger.info("\n" + "=" * 50)
|
| 603 |
+
logger.info("=== PHASE 3: Collecting Player Stats (4 data types) ===")
|
| 604 |
+
logger.info("=" * 50)
|
| 605 |
+
self.players.collect_all_seasons(seasons, force_refresh)
|
| 606 |
+
|
| 607 |
+
logger.info("\n" + "=" * 50)
|
| 608 |
+
logger.info("=== DATA COLLECTION COMPLETE ===")
|
| 609 |
+
logger.info("=" * 50)
|
| 610 |
+
logger.info(f"Data saved to: {RAW_DATA_DIR}")
|
| 611 |
+
|
| 612 |
+
# List all generated files
|
| 613 |
+
parquet_files = list(RAW_DATA_DIR.glob("*.parquet"))
|
| 614 |
+
logger.info(f"\nGenerated {len(parquet_files)} data files:")
|
| 615 |
+
for f in parquet_files:
|
| 616 |
+
size_mb = f.stat().st_size / (1024 * 1024)
|
| 617 |
+
logger.info(f" - {f.name} ({size_mb:.2f} MB)")
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
# =============================================================================
|
| 621 |
+
# CLI INTERFACE
|
| 622 |
+
# =============================================================================
|
| 623 |
+
if __name__ == "__main__":
|
| 624 |
+
import argparse
|
| 625 |
+
|
| 626 |
+
parser = argparse.ArgumentParser(description="NBA Comprehensive Data Collector")
|
| 627 |
+
parser.add_argument("--seasons", nargs="+", help="Specific seasons to collect (e.g., 2023-24)")
|
| 628 |
+
parser.add_argument("--force", action="store_true", help="Force refresh, ignore cache")
|
| 629 |
+
parser.add_argument("--games-only", action="store_true", help="Only collect games")
|
| 630 |
+
parser.add_argument("--teams-only", action="store_true", help="Only collect team stats")
|
| 631 |
+
parser.add_argument("--players-only", action="store_true", help="Only collect player stats")
|
| 632 |
+
parser.add_argument("--test", action="store_true", help="Test with single season")
|
| 633 |
+
|
| 634 |
+
args = parser.parse_args()
|
| 635 |
+
|
| 636 |
+
collector = NBADataCollector()
|
| 637 |
+
|
| 638 |
+
if args.test:
|
| 639 |
+
print("Running in test mode (single season 2024-25)...")
|
| 640 |
+
collector.collect_all(["2024-25"], args.force)
|
| 641 |
+
print("Test complete!")
|
| 642 |
+
elif args.games_only:
|
| 643 |
+
collector.games.collect_all_seasons(args.seasons, args.force)
|
| 644 |
+
elif args.teams_only:
|
| 645 |
+
collector.teams.collect_all_seasons(args.seasons, args.force)
|
| 646 |
+
elif args.players_only:
|
| 647 |
+
collector.players.collect_all_seasons(args.seasons, args.force)
|
| 648 |
+
else:
|
| 649 |
+
collector.collect_all(args.seasons, args.force)
|
src/feature_engineering.py
ADDED
|
@@ -0,0 +1,695 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NBA ML Prediction System - Comprehensive Feature Engineering
|
| 3 |
+
=============================================================
|
| 4 |
+
Time-aware feature generation using ALL available stats:
|
| 5 |
+
- ELO ratings
|
| 6 |
+
- Era normalization (Z-score by season)
|
| 7 |
+
- Rolling averages (basic + advanced)
|
| 8 |
+
- Clutch performance
|
| 9 |
+
- Hustle metrics
|
| 10 |
+
- Defensive ratings
|
| 11 |
+
- Data leakage prevention
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import pandas as pd
|
| 15 |
+
import numpy as np
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
from typing import Dict, List, Optional, Tuple
|
| 18 |
+
from datetime import datetime
|
| 19 |
+
import logging
|
| 20 |
+
|
| 21 |
+
from src.config import (
|
| 22 |
+
ELO_CONFIG,
|
| 23 |
+
FEATURE_CONFIG,
|
| 24 |
+
RAW_DATA_DIR,
|
| 25 |
+
PROCESSED_DATA_DIR,
|
| 26 |
+
NBA_TEAMS
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
logger = logging.getLogger(__name__)
|
| 30 |
+
|
| 31 |
+
# =============================================================================
|
| 32 |
+
# ALL STAT COLUMNS BY CATEGORY
|
| 33 |
+
# =============================================================================
|
| 34 |
+
BASIC_STATS = ["PTS", "AST", "REB", "STL", "BLK", "TOV", "FGM", "FGA", "FG_PCT",
|
| 35 |
+
"FG3M", "FG3A", "FG3_PCT", "FTM", "FTA", "FT_PCT", "OREB", "DREB"]
|
| 36 |
+
|
| 37 |
+
ADVANCED_STATS = ["E_OFF_RATING", "E_DEF_RATING", "E_NET_RATING", "E_PACE",
|
| 38 |
+
"E_AST_RATIO", "E_OREB_PCT", "E_DREB_PCT", "E_REB_PCT",
|
| 39 |
+
"E_TM_TOV_PCT", "E_EFG_PCT", "E_TS_PCT"]
|
| 40 |
+
|
| 41 |
+
CLUTCH_STATS = ["CLUTCH_PTS", "CLUTCH_FG_PCT", "CLUTCH_FG3_PCT", "CLUTCH_PLUS_MINUS"]
|
| 42 |
+
|
| 43 |
+
HUSTLE_STATS = ["DEFLECTIONS", "LOOSE_BALLS_RECOVERED", "CHARGES_DRAWN",
|
| 44 |
+
"CONTESTED_SHOTS", "SCREEN_ASSISTS"]
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# =============================================================================
|
| 48 |
+
# ELO RATING SYSTEM
|
| 49 |
+
# =============================================================================
|
| 50 |
+
class ELOCalculator:
|
| 51 |
+
"""
|
| 52 |
+
Calculates ELO ratings for NBA teams.
|
| 53 |
+
ELO is extremely predictive in sports - can add +3-5% accuracy.
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
def __init__(self, config=ELO_CONFIG):
|
| 57 |
+
self.initial_rating = config.initial_rating
|
| 58 |
+
self.k_factor = config.k_factor
|
| 59 |
+
self.home_advantage = config.home_advantage
|
| 60 |
+
self.season_regression = config.season_regression
|
| 61 |
+
self.ratings: Dict[int, float] = {}
|
| 62 |
+
|
| 63 |
+
def reset_ratings(self):
|
| 64 |
+
self.ratings = {}
|
| 65 |
+
|
| 66 |
+
def get_rating(self, team_id: int) -> float:
|
| 67 |
+
if team_id not in self.ratings:
|
| 68 |
+
self.ratings[team_id] = self.initial_rating
|
| 69 |
+
return self.ratings[team_id]
|
| 70 |
+
|
| 71 |
+
def regress_to_mean(self):
|
| 72 |
+
mean_rating = np.mean(list(self.ratings.values())) if self.ratings else self.initial_rating
|
| 73 |
+
for team_id in self.ratings:
|
| 74 |
+
self.ratings[team_id] = (
|
| 75 |
+
self.season_regression * mean_rating +
|
| 76 |
+
(1 - self.season_regression) * self.ratings[team_id]
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
def expected_win_probability(self, team_rating: float, opponent_rating: float,
|
| 80 |
+
is_home: bool = False) -> float:
|
| 81 |
+
rating_diff = team_rating - opponent_rating
|
| 82 |
+
if is_home:
|
| 83 |
+
rating_diff += self.home_advantage
|
| 84 |
+
return 1.0 / (1.0 + 10 ** (-rating_diff / 400))
|
| 85 |
+
|
| 86 |
+
def update_ratings(self, team_id: int, opponent_id: int,
|
| 87 |
+
won: bool, is_home: bool = False) -> Tuple[float, float]:
|
| 88 |
+
team_rating = self.get_rating(team_id)
|
| 89 |
+
opponent_rating = self.get_rating(opponent_id)
|
| 90 |
+
|
| 91 |
+
expected = self.expected_win_probability(team_rating, opponent_rating, is_home)
|
| 92 |
+
actual = 1.0 if won else 0.0
|
| 93 |
+
|
| 94 |
+
delta = self.k_factor * (actual - expected)
|
| 95 |
+
self.ratings[team_id] = team_rating + delta
|
| 96 |
+
self.ratings[opponent_id] = opponent_rating - delta
|
| 97 |
+
|
| 98 |
+
return self.ratings[team_id], self.ratings[opponent_id]
|
| 99 |
+
|
| 100 |
+
def calculate_game_features(self, team_id: int, opponent_id: int,
|
| 101 |
+
is_home: bool) -> Dict[str, float]:
|
| 102 |
+
team_elo = self.get_rating(team_id)
|
| 103 |
+
opponent_elo = self.get_rating(opponent_id)
|
| 104 |
+
|
| 105 |
+
return {
|
| 106 |
+
"team_elo": team_elo,
|
| 107 |
+
"opponent_elo": opponent_elo,
|
| 108 |
+
"elo_diff": team_elo - opponent_elo,
|
| 109 |
+
"elo_win_prob": self.expected_win_probability(team_elo, opponent_elo, is_home),
|
| 110 |
+
"home_elo_boost": self.home_advantage if is_home else 0
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
# =============================================================================
|
| 115 |
+
# ERA NORMALIZATION
|
| 116 |
+
# =============================================================================
|
| 117 |
+
class EraNormalizer:
|
| 118 |
+
"""Z-score normalization within season to handle era differences."""
|
| 119 |
+
|
| 120 |
+
def __init__(self):
|
| 121 |
+
self.season_stats: Dict[str, Dict[str, Tuple[float, float]]] = {}
|
| 122 |
+
|
| 123 |
+
def fit_season(self, df: pd.DataFrame, season: str, stat_columns: List[str]):
|
| 124 |
+
self.season_stats[season] = {}
|
| 125 |
+
for col in stat_columns:
|
| 126 |
+
if col in df.columns:
|
| 127 |
+
mean = df[col].mean()
|
| 128 |
+
std = df[col].std()
|
| 129 |
+
self.season_stats[season][col] = (mean, std if std > 0 else 1.0)
|
| 130 |
+
|
| 131 |
+
def transform(self, df: pd.DataFrame, season: str, stat_columns: List[str]) -> pd.DataFrame:
|
| 132 |
+
df = df.copy()
|
| 133 |
+
if season not in self.season_stats:
|
| 134 |
+
return df
|
| 135 |
+
|
| 136 |
+
for col in stat_columns:
|
| 137 |
+
if col in df.columns and col in self.season_stats[season]:
|
| 138 |
+
mean, std = self.season_stats[season][col]
|
| 139 |
+
df[f"{col}_zscore"] = (df[col] - mean) / std
|
| 140 |
+
|
| 141 |
+
return df
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
# =============================================================================
|
| 145 |
+
# COMPREHENSIVE STAT LOADER
|
| 146 |
+
# =============================================================================
|
| 147 |
+
class StatLoader:
|
| 148 |
+
"""Loads and merges all collected stats for a team/player."""
|
| 149 |
+
|
| 150 |
+
def __init__(self):
|
| 151 |
+
self.team_stats = None
|
| 152 |
+
self.team_advanced = None
|
| 153 |
+
self.team_clutch = None
|
| 154 |
+
self.team_hustle = None
|
| 155 |
+
self.team_defense = None
|
| 156 |
+
self.player_stats = None
|
| 157 |
+
self.player_advanced = None
|
| 158 |
+
self._loaded = False
|
| 159 |
+
|
| 160 |
+
def load_all_stats(self):
|
| 161 |
+
"""Load all available stat files."""
|
| 162 |
+
if self._loaded:
|
| 163 |
+
return
|
| 164 |
+
|
| 165 |
+
logger.info("Loading all stat files...")
|
| 166 |
+
|
| 167 |
+
# Team stats
|
| 168 |
+
try:
|
| 169 |
+
self.team_stats = pd.read_parquet(RAW_DATA_DIR / "all_team_stats.parquet")
|
| 170 |
+
logger.info(f" Loaded team_stats: {len(self.team_stats)} rows")
|
| 171 |
+
except:
|
| 172 |
+
self.team_stats = pd.DataFrame()
|
| 173 |
+
|
| 174 |
+
try:
|
| 175 |
+
self.team_advanced = pd.read_parquet(RAW_DATA_DIR / "all_team_advanced.parquet")
|
| 176 |
+
logger.info(f" Loaded team_advanced: {len(self.team_advanced)} rows")
|
| 177 |
+
except:
|
| 178 |
+
self.team_advanced = pd.DataFrame()
|
| 179 |
+
|
| 180 |
+
try:
|
| 181 |
+
self.team_clutch = pd.read_parquet(RAW_DATA_DIR / "all_team_clutch.parquet")
|
| 182 |
+
logger.info(f" Loaded team_clutch: {len(self.team_clutch)} rows")
|
| 183 |
+
except:
|
| 184 |
+
self.team_clutch = pd.DataFrame()
|
| 185 |
+
|
| 186 |
+
try:
|
| 187 |
+
self.team_hustle = pd.read_parquet(RAW_DATA_DIR / "all_team_hustle.parquet")
|
| 188 |
+
logger.info(f" Loaded team_hustle: {len(self.team_hustle)} rows")
|
| 189 |
+
except:
|
| 190 |
+
self.team_hustle = pd.DataFrame()
|
| 191 |
+
|
| 192 |
+
try:
|
| 193 |
+
self.team_defense = pd.read_parquet(RAW_DATA_DIR / "all_team_defense.parquet")
|
| 194 |
+
logger.info(f" Loaded team_defense: {len(self.team_defense)} rows")
|
| 195 |
+
except:
|
| 196 |
+
self.team_defense = pd.DataFrame()
|
| 197 |
+
|
| 198 |
+
# Player stats
|
| 199 |
+
try:
|
| 200 |
+
self.player_stats = pd.read_parquet(RAW_DATA_DIR / "all_player_stats.parquet")
|
| 201 |
+
logger.info(f" Loaded player_stats: {len(self.player_stats)} rows")
|
| 202 |
+
except:
|
| 203 |
+
self.player_stats = pd.DataFrame()
|
| 204 |
+
|
| 205 |
+
try:
|
| 206 |
+
self.player_advanced = pd.read_parquet(RAW_DATA_DIR / "all_player_advanced.parquet")
|
| 207 |
+
logger.info(f" Loaded player_advanced: {len(self.player_advanced)} rows")
|
| 208 |
+
except:
|
| 209 |
+
self.player_advanced = pd.DataFrame()
|
| 210 |
+
|
| 211 |
+
self._loaded = True
|
| 212 |
+
|
| 213 |
+
def get_team_season_stats(self, team_id: int, season: str) -> Dict[str, float]:
|
| 214 |
+
"""Get all stats for a team in a season."""
|
| 215 |
+
self.load_all_stats()
|
| 216 |
+
|
| 217 |
+
features = {}
|
| 218 |
+
|
| 219 |
+
# Basic team stats
|
| 220 |
+
if not self.team_stats.empty:
|
| 221 |
+
mask = (self.team_stats["TEAM_ID"] == team_id) & (self.team_stats["SEASON"] == season)
|
| 222 |
+
row = self.team_stats[mask]
|
| 223 |
+
if not row.empty:
|
| 224 |
+
row = row.iloc[0]
|
| 225 |
+
for col in BASIC_STATS:
|
| 226 |
+
if col in row.index:
|
| 227 |
+
features[f"team_{col}"] = row[col]
|
| 228 |
+
|
| 229 |
+
# Advanced metrics
|
| 230 |
+
if not self.team_advanced.empty:
|
| 231 |
+
mask = (self.team_advanced["TEAM_ID"] == team_id) & (self.team_advanced["SEASON"] == season)
|
| 232 |
+
row = self.team_advanced[mask]
|
| 233 |
+
if not row.empty:
|
| 234 |
+
row = row.iloc[0]
|
| 235 |
+
for col in ADVANCED_STATS:
|
| 236 |
+
if col in row.index:
|
| 237 |
+
features[f"team_{col}"] = row[col]
|
| 238 |
+
|
| 239 |
+
# Clutch stats
|
| 240 |
+
if not self.team_clutch.empty:
|
| 241 |
+
mask = (self.team_clutch["TEAM_ID"] == team_id) & (self.team_clutch["SEASON"] == season)
|
| 242 |
+
row = self.team_clutch[mask]
|
| 243 |
+
if not row.empty:
|
| 244 |
+
row = row.iloc[0]
|
| 245 |
+
features["team_clutch_pts"] = row.get("PTS", 0)
|
| 246 |
+
features["team_clutch_fg_pct"] = row.get("FG_PCT", 0)
|
| 247 |
+
features["team_clutch_plus_minus"] = row.get("PLUS_MINUS", 0)
|
| 248 |
+
|
| 249 |
+
# Hustle stats
|
| 250 |
+
if not self.team_hustle.empty:
|
| 251 |
+
mask = (self.team_hustle["TEAM_ID"] == team_id) & (self.team_hustle["SEASON"] == season)
|
| 252 |
+
row = self.team_hustle[mask]
|
| 253 |
+
if not row.empty:
|
| 254 |
+
row = row.iloc[0]
|
| 255 |
+
for col in ["DEFLECTIONS", "LOOSE_BALLS_RECOVERED", "CHARGES_DRAWN",
|
| 256 |
+
"CONTESTED_SHOTS_2PT", "CONTESTED_SHOTS_3PT"]:
|
| 257 |
+
if col in row.index:
|
| 258 |
+
features[f"team_{col.lower()}"] = row[col]
|
| 259 |
+
|
| 260 |
+
return features
|
| 261 |
+
|
| 262 |
+
def get_team_top_players_stats(self, team_id: int, season: str, top_n: int = 5) -> Dict[str, float]:
|
| 263 |
+
"""Get aggregated stats for top N players on a team."""
|
| 264 |
+
self.load_all_stats()
|
| 265 |
+
|
| 266 |
+
features = {}
|
| 267 |
+
|
| 268 |
+
if self.player_stats.empty:
|
| 269 |
+
return features
|
| 270 |
+
|
| 271 |
+
# Get team's players for the season
|
| 272 |
+
mask = (self.player_stats["TEAM_ID"] == team_id) & (self.player_stats["SEASON"] == season)
|
| 273 |
+
team_players = self.player_stats[mask].copy()
|
| 274 |
+
|
| 275 |
+
if team_players.empty:
|
| 276 |
+
return features
|
| 277 |
+
|
| 278 |
+
# Sort by minutes and get top players
|
| 279 |
+
if "MIN" in team_players.columns:
|
| 280 |
+
team_players = team_players.sort_values("MIN", ascending=False).head(top_n)
|
| 281 |
+
|
| 282 |
+
# Aggregate stats
|
| 283 |
+
features["top_players_avg_pts"] = team_players["PTS"].mean() if "PTS" in team_players.columns else 0
|
| 284 |
+
features["top_players_avg_ast"] = team_players["AST"].mean() if "AST" in team_players.columns else 0
|
| 285 |
+
features["top_players_avg_reb"] = team_players["REB"].mean() if "REB" in team_players.columns else 0
|
| 286 |
+
features["top_players_avg_stl"] = team_players["STL"].mean() if "STL" in team_players.columns else 0
|
| 287 |
+
features["top_players_avg_blk"] = team_players["BLK"].mean() if "BLK" in team_players.columns else 0
|
| 288 |
+
|
| 289 |
+
# Star player concentration (how much does top player score vs team)
|
| 290 |
+
if "PTS" in team_players.columns and len(team_players) > 0:
|
| 291 |
+
top_scorer_pts = team_players["PTS"].max()
|
| 292 |
+
total_pts = team_players["PTS"].sum()
|
| 293 |
+
features["star_concentration"] = top_scorer_pts / total_pts if total_pts > 0 else 0
|
| 294 |
+
|
| 295 |
+
return features
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
# =============================================================================
|
| 299 |
+
# COMPREHENSIVE FEATURE GENERATOR
|
| 300 |
+
# =============================================================================
|
| 301 |
+
class FeatureGenerator:
|
| 302 |
+
"""Generates ALL features with strict data leakage prevention."""
|
| 303 |
+
|
| 304 |
+
def __init__(self, config=FEATURE_CONFIG):
|
| 305 |
+
self.rolling_windows = config.rolling_windows
|
| 306 |
+
self.min_games = config.min_games_for_features
|
| 307 |
+
self.elo = ELOCalculator()
|
| 308 |
+
self.normalizer = EraNormalizer()
|
| 309 |
+
self.stat_loader = StatLoader()
|
| 310 |
+
|
| 311 |
+
# League-average fills for cold-start handling (typical NBA averages)
|
| 312 |
+
LEAGUE_AVERAGES = {
|
| 313 |
+
"PTS": 112.0,
|
| 314 |
+
"AST": 25.0,
|
| 315 |
+
"REB": 44.0,
|
| 316 |
+
"FG_PCT": 0.465,
|
| 317 |
+
"FG3_PCT": 0.360,
|
| 318 |
+
"FT_PCT": 0.780,
|
| 319 |
+
"PLUS_MINUS": 0.0,
|
| 320 |
+
"STL": 7.5,
|
| 321 |
+
"BLK": 5.0,
|
| 322 |
+
"DREB": 34.0,
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
def calculate_rolling_stats(self, team_games: pd.DataFrame,
|
| 326 |
+
current_date: datetime,
|
| 327 |
+
stat_columns: List[str]) -> Dict[str, float]:
|
| 328 |
+
"""
|
| 329 |
+
Calculate rolling averages (time-aware) with cold-start handling.
|
| 330 |
+
|
| 331 |
+
For early-season games with insufficient history, uses league-average
|
| 332 |
+
fills instead of NaN to maintain prediction quality.
|
| 333 |
+
"""
|
| 334 |
+
past_games = team_games[pd.to_datetime(team_games["GAME_DATE"]) < current_date]
|
| 335 |
+
past_games = past_games.sort_values("GAME_DATE", ascending=False)
|
| 336 |
+
|
| 337 |
+
features = {}
|
| 338 |
+
games_available = len(past_games)
|
| 339 |
+
|
| 340 |
+
for window in self.rolling_windows:
|
| 341 |
+
recent_games = past_games.head(window)
|
| 342 |
+
|
| 343 |
+
if len(recent_games) < self.min_games:
|
| 344 |
+
# Cold-start: Use league averages instead of NaN
|
| 345 |
+
for col in stat_columns:
|
| 346 |
+
league_avg = self.LEAGUE_AVERAGES.get(col, 0)
|
| 347 |
+
|
| 348 |
+
if games_available > 0 and col in past_games.columns:
|
| 349 |
+
# Blend available data with league average
|
| 350 |
+
# Weight: available_games / min_games
|
| 351 |
+
blend_weight = games_available / self.min_games
|
| 352 |
+
team_avg = past_games.head(games_available)[col].mean()
|
| 353 |
+
features[f"{col}_last{window}"] = (
|
| 354 |
+
blend_weight * team_avg +
|
| 355 |
+
(1 - blend_weight) * league_avg
|
| 356 |
+
)
|
| 357 |
+
else:
|
| 358 |
+
features[f"{col}_last{window}"] = league_avg
|
| 359 |
+
else:
|
| 360 |
+
for col in stat_columns:
|
| 361 |
+
if col in recent_games.columns:
|
| 362 |
+
features[f"{col}_last{window}"] = recent_games[col].mean()
|
| 363 |
+
else:
|
| 364 |
+
features[f"{col}_last{window}"] = self.LEAGUE_AVERAGES.get(col, 0)
|
| 365 |
+
|
| 366 |
+
return features
|
| 367 |
+
|
| 368 |
+
def calculate_defensive_stats(self, team_games: pd.DataFrame,
|
| 369 |
+
current_date: datetime) -> Dict[str, float]:
|
| 370 |
+
"""Calculate defensive rolling stats."""
|
| 371 |
+
past_games = team_games[pd.to_datetime(team_games["GAME_DATE"]) < current_date]
|
| 372 |
+
past_games = past_games.sort_values("GAME_DATE", ascending=False).head(10)
|
| 373 |
+
|
| 374 |
+
features = {}
|
| 375 |
+
|
| 376 |
+
if len(past_games) >= 3:
|
| 377 |
+
for col in ["STL", "BLK", "DREB"]:
|
| 378 |
+
if col in past_games.columns:
|
| 379 |
+
features[f"{col}_last10"] = past_games[col].mean()
|
| 380 |
+
|
| 381 |
+
# Points allowed (opponent points)
|
| 382 |
+
# This would need opponent data, so we estimate from +/-
|
| 383 |
+
if "PLUS_MINUS" in past_games.columns and "PTS" in past_games.columns:
|
| 384 |
+
features["pts_allowed_last10"] = past_games["PTS"].mean() - past_games["PLUS_MINUS"].mean()
|
| 385 |
+
|
| 386 |
+
return features
|
| 387 |
+
|
| 388 |
+
def calculate_season_stats(self, team_games: pd.DataFrame,
|
| 389 |
+
current_date: datetime,
|
| 390 |
+
stat_columns: List[str]) -> Dict[str, float]:
|
| 391 |
+
"""Calculate season-to-date stats (time-aware)."""
|
| 392 |
+
past_games = team_games[pd.to_datetime(team_games["GAME_DATE"]) < current_date]
|
| 393 |
+
|
| 394 |
+
features = {}
|
| 395 |
+
for col in stat_columns:
|
| 396 |
+
if col in past_games.columns:
|
| 397 |
+
features[f"{col}_season_avg"] = past_games[col].mean()
|
| 398 |
+
|
| 399 |
+
# Win percentage
|
| 400 |
+
if "WL" in past_games.columns:
|
| 401 |
+
wins = (past_games["WL"] == "W").sum()
|
| 402 |
+
total = len(past_games)
|
| 403 |
+
features["win_pct_season"] = wins / total if total > 0 else 0.5
|
| 404 |
+
features["games_played"] = total
|
| 405 |
+
|
| 406 |
+
return features
|
| 407 |
+
|
| 408 |
+
def calculate_momentum(self, team_games: pd.DataFrame,
|
| 409 |
+
current_date: datetime) -> Dict[str, float]:
|
| 410 |
+
"""Calculate momentum features (streaks, recent form)."""
|
| 411 |
+
past_games = team_games[pd.to_datetime(team_games["GAME_DATE"]) < current_date]
|
| 412 |
+
past_games = past_games.sort_values("GAME_DATE", ascending=False)
|
| 413 |
+
|
| 414 |
+
features = {}
|
| 415 |
+
|
| 416 |
+
if len(past_games) >= 5:
|
| 417 |
+
last5 = past_games.head(5)
|
| 418 |
+
|
| 419 |
+
# Win streak
|
| 420 |
+
wins_last5 = (last5["WL"] == "W").sum() if "WL" in last5.columns else 0
|
| 421 |
+
features["wins_last5"] = wins_last5
|
| 422 |
+
features["hot_streak"] = 1 if wins_last5 >= 4 else 0
|
| 423 |
+
features["cold_streak"] = 1 if wins_last5 <= 1 else 0
|
| 424 |
+
|
| 425 |
+
# Point differential trend
|
| 426 |
+
if "PLUS_MINUS" in last5.columns:
|
| 427 |
+
features["plus_minus_last5"] = last5["PLUS_MINUS"].mean()
|
| 428 |
+
|
| 429 |
+
if len(past_games) >= 10:
|
| 430 |
+
last10 = past_games.head(10)
|
| 431 |
+
wins_last10 = (last10["WL"] == "W").sum() if "WL" in last10.columns else 0
|
| 432 |
+
features["wins_last10"] = wins_last10
|
| 433 |
+
|
| 434 |
+
return features
|
| 435 |
+
|
| 436 |
+
def calculate_rest_fatigue(self, team_games: pd.DataFrame,
|
| 437 |
+
current_date: datetime) -> Dict[str, float]:
|
| 438 |
+
"""Calculate rest and fatigue features."""
|
| 439 |
+
past_games = team_games[pd.to_datetime(team_games["GAME_DATE"]) < current_date]
|
| 440 |
+
past_games = past_games.sort_values("GAME_DATE", ascending=False)
|
| 441 |
+
|
| 442 |
+
features = {}
|
| 443 |
+
|
| 444 |
+
if len(past_games) > 0:
|
| 445 |
+
last_game = pd.to_datetime(past_games["GAME_DATE"].iloc[0])
|
| 446 |
+
days_rest = (current_date - last_game).days
|
| 447 |
+
features["days_rest"] = days_rest
|
| 448 |
+
features["back_to_back"] = 1 if days_rest == 1 else 0
|
| 449 |
+
features["well_rested"] = 1 if days_rest >= 3 else 0
|
| 450 |
+
else:
|
| 451 |
+
features["days_rest"] = 3
|
| 452 |
+
features["back_to_back"] = 0
|
| 453 |
+
features["well_rested"] = 1
|
| 454 |
+
|
| 455 |
+
# Games in last 7 days (fatigue)
|
| 456 |
+
week_ago = current_date - pd.Timedelta(days=7)
|
| 457 |
+
recent_games = past_games[pd.to_datetime(past_games["GAME_DATE"]) >= week_ago]
|
| 458 |
+
features["games_last_week"] = len(recent_games)
|
| 459 |
+
|
| 460 |
+
return features
|
| 461 |
+
|
| 462 |
+
def calculate_form_index(self, team_games: pd.DataFrame,
|
| 463 |
+
current_date: datetime) -> Dict[str, float]:
|
| 464 |
+
"""
|
| 465 |
+
Calculate exponentially-weighted form index for fast regime-change detection.
|
| 466 |
+
|
| 467 |
+
Recent games are weighted more heavily than older games, allowing the model
|
| 468 |
+
to quickly adapt when a team's performance regime changes (e.g., after
|
| 469 |
+
major trades, injuries, or coaching changes).
|
| 470 |
+
"""
|
| 471 |
+
past_games = team_games[pd.to_datetime(team_games["GAME_DATE"]) < current_date]
|
| 472 |
+
past_games = past_games.sort_values("GAME_DATE", ascending=False).head(10)
|
| 473 |
+
|
| 474 |
+
features = {}
|
| 475 |
+
|
| 476 |
+
if len(past_games) < 3:
|
| 477 |
+
features["form_index"] = 0.5 # Neutral for cold start
|
| 478 |
+
features["form_trend"] = 0.0
|
| 479 |
+
return features
|
| 480 |
+
|
| 481 |
+
# Exponential weights: most recent game has ~2x weight of 5th game
|
| 482 |
+
# decay_rate=0.15 means game 5 has weight e^(-0.15*4) ≈ 0.55 vs 1.0 for game 1
|
| 483 |
+
weights = np.exp(-np.arange(len(past_games)) * 0.15)
|
| 484 |
+
weights = weights / weights.sum() # Normalize to sum to 1
|
| 485 |
+
|
| 486 |
+
# Win-based form index (0-1 scale)
|
| 487 |
+
if "WL" in past_games.columns:
|
| 488 |
+
wins = (past_games["WL"] == "W").astype(float).values
|
| 489 |
+
form_index = (wins * weights).sum()
|
| 490 |
+
features["form_index"] = form_index
|
| 491 |
+
|
| 492 |
+
# Form trend: compare last 3 vs previous 3
|
| 493 |
+
if len(past_games) >= 6:
|
| 494 |
+
recent_3_wins = (past_games.head(3)["WL"] == "W").mean()
|
| 495 |
+
prev_3_wins = (past_games.iloc[3:6]["WL"] == "W").mean()
|
| 496 |
+
features["form_trend"] = recent_3_wins - prev_3_wins
|
| 497 |
+
else:
|
| 498 |
+
features["form_trend"] = 0.0
|
| 499 |
+
else:
|
| 500 |
+
features["form_index"] = 0.5
|
| 501 |
+
features["form_trend"] = 0.0
|
| 502 |
+
|
| 503 |
+
# Point differential form (exponentially weighted)
|
| 504 |
+
if "PLUS_MINUS" in past_games.columns:
|
| 505 |
+
pm_values = past_games["PLUS_MINUS"].fillna(0).values
|
| 506 |
+
features["form_plus_minus"] = (pm_values * weights).sum()
|
| 507 |
+
|
| 508 |
+
return features
|
| 509 |
+
|
| 510 |
+
def generate_game_features(self, games_df: pd.DataFrame,
|
| 511 |
+
game_row: pd.Series,
|
| 512 |
+
season: str = None) -> Dict[str, float]:
|
| 513 |
+
"""Generate ALL features for a single game prediction."""
|
| 514 |
+
game_date = pd.to_datetime(game_row["GAME_DATE"])
|
| 515 |
+
team_id = game_row["TEAM_ID"]
|
| 516 |
+
matchup = game_row.get("MATCHUP", "")
|
| 517 |
+
is_home = "@" not in matchup
|
| 518 |
+
|
| 519 |
+
# Get opponent ID
|
| 520 |
+
opponent_abbrev = matchup.split(" ")[-1] if matchup else ""
|
| 521 |
+
opponent_id = next(
|
| 522 |
+
(tid for tid, abbrev in NBA_TEAMS.items() if abbrev == opponent_abbrev),
|
| 523 |
+
None
|
| 524 |
+
)
|
| 525 |
+
|
| 526 |
+
# Get team's past games
|
| 527 |
+
team_games = games_df[
|
| 528 |
+
(games_df["TEAM_ID"] == team_id) &
|
| 529 |
+
(pd.to_datetime(games_df["GAME_DATE"]) < game_date)
|
| 530 |
+
]
|
| 531 |
+
|
| 532 |
+
# Start with basic features
|
| 533 |
+
features = {"is_home": 1 if is_home else 0}
|
| 534 |
+
|
| 535 |
+
# ELO features
|
| 536 |
+
if opponent_id:
|
| 537 |
+
elo_features = self.elo.calculate_game_features(team_id, opponent_id, is_home)
|
| 538 |
+
features.update(elo_features)
|
| 539 |
+
|
| 540 |
+
# Rolling stats (basic)
|
| 541 |
+
basic_cols = ["PTS", "AST", "REB", "FG_PCT", "FG3_PCT", "FT_PCT", "PLUS_MINUS"]
|
| 542 |
+
rolling_features = self.calculate_rolling_stats(team_games, game_date, basic_cols)
|
| 543 |
+
features.update(rolling_features)
|
| 544 |
+
|
| 545 |
+
# Defensive stats
|
| 546 |
+
def_features = self.calculate_defensive_stats(team_games, game_date)
|
| 547 |
+
features.update(def_features)
|
| 548 |
+
|
| 549 |
+
# Season-to-date stats
|
| 550 |
+
season_features = self.calculate_season_stats(team_games, game_date, basic_cols)
|
| 551 |
+
features.update(season_features)
|
| 552 |
+
|
| 553 |
+
# Momentum features
|
| 554 |
+
momentum_features = self.calculate_momentum(team_games, game_date)
|
| 555 |
+
features.update(momentum_features)
|
| 556 |
+
|
| 557 |
+
# Rest/fatigue features
|
| 558 |
+
rest_features = self.calculate_rest_fatigue(team_games, game_date)
|
| 559 |
+
features.update(rest_features)
|
| 560 |
+
|
| 561 |
+
# Form index (exponentially-weighted recent performance)
|
| 562 |
+
form_features = self.calculate_form_index(team_games, game_date)
|
| 563 |
+
features.update(form_features)
|
| 564 |
+
|
| 565 |
+
# Season-level team stats (advanced, clutch, hustle)
|
| 566 |
+
if season:
|
| 567 |
+
team_season_stats = self.stat_loader.get_team_season_stats(team_id, season)
|
| 568 |
+
features.update(team_season_stats)
|
| 569 |
+
|
| 570 |
+
# Top players stats
|
| 571 |
+
player_features = self.stat_loader.get_team_top_players_stats(team_id, season)
|
| 572 |
+
features.update(player_features)
|
| 573 |
+
|
| 574 |
+
return features
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
# =============================================================================
|
| 578 |
+
# BATCH PROCESSOR
|
| 579 |
+
# =============================================================================
|
| 580 |
+
def process_all_games(games_df: pd.DataFrame,
|
| 581 |
+
output_path: Optional[Path] = None) -> pd.DataFrame:
|
| 582 |
+
"""Process ALL games with comprehensive features."""
|
| 583 |
+
logger.info(f"Processing {len(games_df)} games with COMPREHENSIVE features...")
|
| 584 |
+
|
| 585 |
+
games_df = games_df.sort_values("GAME_DATE").copy()
|
| 586 |
+
|
| 587 |
+
generator = FeatureGenerator()
|
| 588 |
+
all_features = []
|
| 589 |
+
current_season = None
|
| 590 |
+
|
| 591 |
+
from tqdm import tqdm
|
| 592 |
+
|
| 593 |
+
for idx, row in tqdm(games_df.iterrows(), total=len(games_df), desc="Processing games"):
|
| 594 |
+
season = row.get("SEASON_ID", "")
|
| 595 |
+
|
| 596 |
+
# Parse season for stat lookup
|
| 597 |
+
if isinstance(season, str) and len(season) >= 5:
|
| 598 |
+
year = season[1:5]
|
| 599 |
+
season_str = f"{year}-{str(int(year)+1)[-2:]}"
|
| 600 |
+
else:
|
| 601 |
+
season_str = None
|
| 602 |
+
|
| 603 |
+
# Regress ELO at season change
|
| 604 |
+
if season != current_season:
|
| 605 |
+
if current_season is not None:
|
| 606 |
+
generator.elo.regress_to_mean()
|
| 607 |
+
current_season = season
|
| 608 |
+
|
| 609 |
+
# Generate features
|
| 610 |
+
features = generator.generate_game_features(games_df, row, season_str)
|
| 611 |
+
features["GAME_ID"] = row["GAME_ID"]
|
| 612 |
+
features["TEAM_ID"] = row["TEAM_ID"]
|
| 613 |
+
features["GAME_DATE"] = row["GAME_DATE"]
|
| 614 |
+
features["SEASON_ID"] = row.get("SEASON_ID", "")
|
| 615 |
+
features["WL"] = row.get("WL", None)
|
| 616 |
+
|
| 617 |
+
all_features.append(features)
|
| 618 |
+
|
| 619 |
+
# Update ELO after game
|
| 620 |
+
if row.get("WL") and features.get("opponent_elo"):
|
| 621 |
+
opponent_abbrev = row.get("MATCHUP", "").split(" ")[-1]
|
| 622 |
+
opponent_id = next(
|
| 623 |
+
(tid for tid, abbrev in NBA_TEAMS.items() if abbrev == opponent_abbrev),
|
| 624 |
+
None
|
| 625 |
+
)
|
| 626 |
+
if opponent_id:
|
| 627 |
+
won = row["WL"] == "W"
|
| 628 |
+
is_home = "@" not in row.get("MATCHUP", "")
|
| 629 |
+
generator.elo.update_ratings(row["TEAM_ID"], opponent_id, won, is_home)
|
| 630 |
+
|
| 631 |
+
result_df = pd.DataFrame(all_features)
|
| 632 |
+
|
| 633 |
+
if output_path:
|
| 634 |
+
PROCESSED_DATA_DIR.mkdir(parents=True, exist_ok=True)
|
| 635 |
+
result_df.to_parquet(output_path, index=False)
|
| 636 |
+
logger.info(f"Saved features to {output_path}")
|
| 637 |
+
|
| 638 |
+
return result_df
|
| 639 |
+
|
| 640 |
+
|
| 641 |
+
# =============================================================================
|
| 642 |
+
# CLI INTERFACE
|
| 643 |
+
# =============================================================================
|
| 644 |
+
if __name__ == "__main__":
|
| 645 |
+
import argparse
|
| 646 |
+
|
| 647 |
+
parser = argparse.ArgumentParser(description="Comprehensive Feature Engineering")
|
| 648 |
+
parser.add_argument("--test", action="store_true", help="Run tests only")
|
| 649 |
+
parser.add_argument("--process", action="store_true", help="Process collected data")
|
| 650 |
+
|
| 651 |
+
args = parser.parse_args()
|
| 652 |
+
|
| 653 |
+
logging.basicConfig(level=logging.INFO)
|
| 654 |
+
|
| 655 |
+
if args.test or (not args.process and not args.test):
|
| 656 |
+
print("Testing ELO Calculator...")
|
| 657 |
+
elo = ELOCalculator()
|
| 658 |
+
|
| 659 |
+
lal_rating = elo.get_rating(1610612747)
|
| 660 |
+
bos_rating = elo.get_rating(1610612738)
|
| 661 |
+
|
| 662 |
+
print(f"Initial ratings - LAL: {lal_rating}, BOS: {bos_rating}")
|
| 663 |
+
|
| 664 |
+
elo.update_ratings(1610612747, 1610612738, won=True, is_home=True)
|
| 665 |
+
print(f"After LAL home win - LAL: {elo.get_rating(1610612747):.1f}, BOS: {elo.get_rating(1610612738):.1f}")
|
| 666 |
+
|
| 667 |
+
features = elo.calculate_game_features(1610612747, 1610612738, is_home=True)
|
| 668 |
+
print(f"\nGame features: {features}")
|
| 669 |
+
|
| 670 |
+
if args.process:
|
| 671 |
+
print("\n=== Processing Collected Data with COMPREHENSIVE Features ===")
|
| 672 |
+
|
| 673 |
+
games_path = RAW_DATA_DIR / "all_games.parquet"
|
| 674 |
+
output_path = PROCESSED_DATA_DIR / "game_features.parquet"
|
| 675 |
+
|
| 676 |
+
if not games_path.exists():
|
| 677 |
+
print(f"ERROR: Games data not found at {games_path}")
|
| 678 |
+
print("Run 'python -m src.data_collector' first to collect data.")
|
| 679 |
+
exit(1)
|
| 680 |
+
|
| 681 |
+
print(f"Loading games from {games_path}...")
|
| 682 |
+
games_df = pd.read_parquet(games_path)
|
| 683 |
+
print(f"Loaded {len(games_df)} games")
|
| 684 |
+
|
| 685 |
+
print("\nGenerating COMPREHENSIVE features (this may take a while)...")
|
| 686 |
+
print("Features include: ELO, rolling stats, defense, momentum, rest, advanced metrics, clutch, hustle...")
|
| 687 |
+
|
| 688 |
+
result_df = process_all_games(games_df, output_path)
|
| 689 |
+
|
| 690 |
+
print(f"\n✅ Features saved to: {output_path}")
|
| 691 |
+
print(f" Total rows: {len(result_df)}")
|
| 692 |
+
print(f" Total features: {len(result_df.columns)}")
|
| 693 |
+
print(f"\nFeature columns ({len(result_df.columns)} total):")
|
| 694 |
+
for col in sorted(result_df.columns):
|
| 695 |
+
print(f" - {col}")
|
src/injury_collector.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NBA ML Prediction System - Injury Collector
|
| 3 |
+
============================================
|
| 4 |
+
Real-time injury data integration using nbainjuries package.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import pandas as pd
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from datetime import datetime, timedelta
|
| 10 |
+
from typing import Optional, Dict, List
|
| 11 |
+
import json
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
# Try to import nbainjuries, fall back to manual fetching if not available
|
| 15 |
+
try:
|
| 16 |
+
from nbainjuries import injuries
|
| 17 |
+
HAS_NBA_INJURIES = True
|
| 18 |
+
except ImportError:
|
| 19 |
+
HAS_NBA_INJURIES = False
|
| 20 |
+
logging.warning("nbainjuries package not installed. Using fallback injury data.")
|
| 21 |
+
|
| 22 |
+
from src.config import API_CACHE_DIR, INJURY_IMPACT
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger(__name__)
|
| 25 |
+
|
| 26 |
+
# =============================================================================
|
| 27 |
+
# INJURY CACHE
|
| 28 |
+
# =============================================================================
|
| 29 |
+
class InjuryCache:
|
| 30 |
+
"""Cache for injury data with configurable refresh interval."""
|
| 31 |
+
|
| 32 |
+
def __init__(self, cache_dir: Path = API_CACHE_DIR, cache_hours: float = 3.0):
|
| 33 |
+
self.cache_dir = cache_dir
|
| 34 |
+
self.cache_file = cache_dir / "injury_cache.json"
|
| 35 |
+
self.cache_hours = cache_hours
|
| 36 |
+
|
| 37 |
+
def is_cache_valid(self) -> bool:
|
| 38 |
+
"""Check if cache exists and is within refresh window."""
|
| 39 |
+
if not self.cache_file.exists():
|
| 40 |
+
return False
|
| 41 |
+
|
| 42 |
+
with open(self.cache_file, 'r') as f:
|
| 43 |
+
cache_data = json.load(f)
|
| 44 |
+
|
| 45 |
+
cached_time = datetime.fromisoformat(cache_data.get("timestamp", "2000-01-01"))
|
| 46 |
+
return datetime.now() - cached_time < timedelta(hours=self.cache_hours)
|
| 47 |
+
|
| 48 |
+
def load(self) -> Optional[Dict]:
|
| 49 |
+
"""Load cached injury data."""
|
| 50 |
+
if not self.cache_file.exists():
|
| 51 |
+
return None
|
| 52 |
+
|
| 53 |
+
with open(self.cache_file, 'r') as f:
|
| 54 |
+
return json.load(f)
|
| 55 |
+
|
| 56 |
+
def save(self, data: Dict):
|
| 57 |
+
"""Save injury data to cache."""
|
| 58 |
+
cache_data = {
|
| 59 |
+
"timestamp": datetime.now().isoformat(),
|
| 60 |
+
"injuries": data
|
| 61 |
+
}
|
| 62 |
+
with open(self.cache_file, 'w') as f:
|
| 63 |
+
json.dump(cache_data, f, indent=2)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# =============================================================================
|
| 67 |
+
# INJURY COLLECTOR
|
| 68 |
+
# =============================================================================
|
| 69 |
+
class InjuryCollector:
|
| 70 |
+
"""Collects and processes injury data for predictions."""
|
| 71 |
+
|
| 72 |
+
def __init__(self, cache_hours: float = 3.0):
|
| 73 |
+
self.cache = InjuryCache(cache_hours=cache_hours)
|
| 74 |
+
self._injuries_df = None
|
| 75 |
+
self._last_fetch_time = None
|
| 76 |
+
self._memory_cache_ttl = 300 # 5 minutes in-memory cache
|
| 77 |
+
|
| 78 |
+
def fetch_injuries(self, force_refresh: bool = False) -> pd.DataFrame:
|
| 79 |
+
"""
|
| 80 |
+
Fetch current injury data.
|
| 81 |
+
|
| 82 |
+
Returns DataFrame with columns:
|
| 83 |
+
- player_name: Player's full name
|
| 84 |
+
- team: Team abbreviation
|
| 85 |
+
- status: Out, Questionable, Doubtful, Probable, Available
|
| 86 |
+
- injury: Injury description
|
| 87 |
+
- date: Report date
|
| 88 |
+
"""
|
| 89 |
+
# Use in-memory cache if fresh (for fast repeated calls)
|
| 90 |
+
if not force_refresh and self._injuries_df is not None and self._last_fetch_time:
|
| 91 |
+
from datetime import datetime
|
| 92 |
+
elapsed = (datetime.now() - self._last_fetch_time).total_seconds()
|
| 93 |
+
if elapsed < self._memory_cache_ttl:
|
| 94 |
+
return self._injuries_df
|
| 95 |
+
|
| 96 |
+
# Check file cache next
|
| 97 |
+
if not force_refresh and self.cache.is_cache_valid():
|
| 98 |
+
cache_data = self.cache.load()
|
| 99 |
+
if cache_data and "injuries" in cache_data:
|
| 100 |
+
self._injuries_df = pd.DataFrame(cache_data["injuries"])
|
| 101 |
+
from datetime import datetime
|
| 102 |
+
self._last_fetch_time = datetime.now()
|
| 103 |
+
return self._injuries_df
|
| 104 |
+
|
| 105 |
+
# Fetch fresh data only if needed
|
| 106 |
+
if HAS_NBA_INJURIES:
|
| 107 |
+
try:
|
| 108 |
+
injury_data = injuries.get_injuries()
|
| 109 |
+
df = pd.DataFrame(injury_data)
|
| 110 |
+
self.cache.save(df.to_dict('records'))
|
| 111 |
+
self._injuries_df = df
|
| 112 |
+
from datetime import datetime
|
| 113 |
+
self._last_fetch_time = datetime.now()
|
| 114 |
+
return df
|
| 115 |
+
except Exception as e:
|
| 116 |
+
pass
|
| 117 |
+
|
| 118 |
+
# Return empty DataFrame without logging every time
|
| 119 |
+
self._injuries_df = self._get_empty_injuries_df()
|
| 120 |
+
from datetime import datetime
|
| 121 |
+
self._last_fetch_time = datetime.now()
|
| 122 |
+
return self._injuries_df
|
| 123 |
+
|
| 124 |
+
def _get_empty_injuries_df(self) -> pd.DataFrame:
|
| 125 |
+
"""Return empty injuries DataFrame with proper schema."""
|
| 126 |
+
return pd.DataFrame(columns=[
|
| 127 |
+
"player_name", "team", "status", "injury", "date"
|
| 128 |
+
])
|
| 129 |
+
|
| 130 |
+
def get_team_injuries(self, team_abbrev: str) -> pd.DataFrame:
|
| 131 |
+
"""Get injuries for a specific team."""
|
| 132 |
+
df = self.fetch_injuries()
|
| 133 |
+
if df.empty:
|
| 134 |
+
return df
|
| 135 |
+
return df[df["team"] == team_abbrev]
|
| 136 |
+
|
| 137 |
+
def calculate_injury_impact(self, team_abbrev: str,
|
| 138 |
+
player_usage: Optional[Dict[str, float]] = None) -> float:
|
| 139 |
+
"""
|
| 140 |
+
Calculate total injury impact for a team.
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
team_abbrev: Team abbreviation (e.g., "LAL")
|
| 144 |
+
player_usage: Dict mapping player names to usage rates (0-1)
|
| 145 |
+
If None, uses equal weighting
|
| 146 |
+
|
| 147 |
+
Returns:
|
| 148 |
+
Injury impact score (0 = no injuries, higher = more impact)
|
| 149 |
+
"""
|
| 150 |
+
team_injuries = self.get_team_injuries(team_abbrev)
|
| 151 |
+
|
| 152 |
+
if team_injuries.empty:
|
| 153 |
+
return 0.0
|
| 154 |
+
|
| 155 |
+
total_impact = 0.0
|
| 156 |
+
for _, injury in team_injuries.iterrows():
|
| 157 |
+
status = injury.get("status", "Available")
|
| 158 |
+
base_impact = INJURY_IMPACT.get(status, 0.0)
|
| 159 |
+
|
| 160 |
+
# Weight by player usage if provided
|
| 161 |
+
if player_usage and injury["player_name"] in player_usage:
|
| 162 |
+
player_weight = player_usage[injury["player_name"]]
|
| 163 |
+
else:
|
| 164 |
+
# Default: assume equal importance for all injured players
|
| 165 |
+
player_weight = 0.2
|
| 166 |
+
|
| 167 |
+
total_impact += base_impact * player_weight
|
| 168 |
+
|
| 169 |
+
return min(total_impact, 1.0) # Cap at 1.0
|
| 170 |
+
|
| 171 |
+
def get_injury_summary(self, team_abbrev: str) -> Dict:
|
| 172 |
+
"""Get a summary of team injuries for display."""
|
| 173 |
+
team_injuries = self.get_team_injuries(team_abbrev)
|
| 174 |
+
|
| 175 |
+
summary = {
|
| 176 |
+
"total_injuries": len(team_injuries),
|
| 177 |
+
"out": 0,
|
| 178 |
+
"questionable": 0,
|
| 179 |
+
"doubtful": 0,
|
| 180 |
+
"probable": 0,
|
| 181 |
+
"players": []
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
if team_injuries.empty:
|
| 185 |
+
return summary
|
| 186 |
+
|
| 187 |
+
for _, injury in team_injuries.iterrows():
|
| 188 |
+
status = injury.get("status", "").lower()
|
| 189 |
+
if "out" in status:
|
| 190 |
+
summary["out"] += 1
|
| 191 |
+
elif "questionable" in status:
|
| 192 |
+
summary["questionable"] += 1
|
| 193 |
+
elif "doubtful" in status:
|
| 194 |
+
summary["doubtful"] += 1
|
| 195 |
+
elif "probable" in status:
|
| 196 |
+
summary["probable"] += 1
|
| 197 |
+
|
| 198 |
+
summary["players"].append({
|
| 199 |
+
"name": injury.get("player_name", "Unknown"),
|
| 200 |
+
"status": injury.get("status", "Unknown"),
|
| 201 |
+
"injury": injury.get("injury", "Unknown")
|
| 202 |
+
})
|
| 203 |
+
|
| 204 |
+
return summary
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
# =============================================================================
|
| 208 |
+
# CLI INTERFACE
|
| 209 |
+
# =============================================================================
|
| 210 |
+
if __name__ == "__main__":
|
| 211 |
+
collector = InjuryCollector()
|
| 212 |
+
|
| 213 |
+
print("Fetching current NBA injuries...")
|
| 214 |
+
injuries_df = collector.fetch_injuries()
|
| 215 |
+
|
| 216 |
+
if not injuries_df.empty:
|
| 217 |
+
print(f"\nFound {len(injuries_df)} injury reports")
|
| 218 |
+
print("\nSample injuries:")
|
| 219 |
+
print(injuries_df.head(10))
|
| 220 |
+
|
| 221 |
+
# Test impact calculation
|
| 222 |
+
print("\nInjury impact for LAL:", collector.calculate_injury_impact("LAL"))
|
| 223 |
+
else:
|
| 224 |
+
print("No injury data available")
|
src/live_data_collector.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NBA ML Prediction System - Live Data Collector
|
| 3 |
+
===============================================
|
| 4 |
+
Real-time data collection from NBA Live API endpoints.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
from datetime import datetime, timezone
|
| 9 |
+
from typing import Dict, List, Optional
|
| 10 |
+
import time
|
| 11 |
+
|
| 12 |
+
from nba_api.live.nba.endpoints import scoreboard, boxscore
|
| 13 |
+
|
| 14 |
+
from src.config import API_CONFIG, NBA_TEAMS
|
| 15 |
+
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class LiveDataCollector:
|
| 20 |
+
"""
|
| 21 |
+
Collects live game data from NBA API.
|
| 22 |
+
|
| 23 |
+
Uses nba_api.live endpoints:
|
| 24 |
+
- scoreboard.ScoreBoard() for today's games with live scores
|
| 25 |
+
- boxscore.BoxScore(game_id) for detailed game box scores
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
GAME_STATUS_MAP = {
|
| 29 |
+
1: "NOT_STARTED",
|
| 30 |
+
2: "IN_PROGRESS",
|
| 31 |
+
3: "FINAL"
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
def __init__(self):
|
| 35 |
+
self._last_scoreboard_fetch = None
|
| 36 |
+
self._cached_scoreboard = None
|
| 37 |
+
self._cache_ttl = 10 # Seconds to cache scoreboard
|
| 38 |
+
|
| 39 |
+
def get_live_scoreboard(self, force_refresh: bool = False) -> List[Dict]:
|
| 40 |
+
"""
|
| 41 |
+
Get today's games with live scores.
|
| 42 |
+
|
| 43 |
+
Returns list of games with:
|
| 44 |
+
- game_id, game_code
|
| 45 |
+
- home_team, away_team (tricodes)
|
| 46 |
+
- home_score, away_score
|
| 47 |
+
- status (NOT_STARTED, IN_PROGRESS, FINAL)
|
| 48 |
+
- period, clock
|
| 49 |
+
- home_record, away_record
|
| 50 |
+
"""
|
| 51 |
+
# Use cache if available and fresh
|
| 52 |
+
if not force_refresh and self._cached_scoreboard:
|
| 53 |
+
if self._last_scoreboard_fetch:
|
| 54 |
+
elapsed = (datetime.now() - self._last_scoreboard_fetch).total_seconds()
|
| 55 |
+
if elapsed < self._cache_ttl:
|
| 56 |
+
return self._cached_scoreboard
|
| 57 |
+
|
| 58 |
+
try:
|
| 59 |
+
sb = scoreboard.ScoreBoard()
|
| 60 |
+
games_data = sb.games.get_dict()
|
| 61 |
+
|
| 62 |
+
games_list = []
|
| 63 |
+
for game in games_data:
|
| 64 |
+
game_status = game.get("gameStatus", 1)
|
| 65 |
+
home_team = game.get("homeTeam", {})
|
| 66 |
+
away_team = game.get("awayTeam", {})
|
| 67 |
+
|
| 68 |
+
# Parse periods for quarter scores
|
| 69 |
+
home_periods = [p.get("score", 0) for p in home_team.get("periods", [])]
|
| 70 |
+
away_periods = [p.get("score", 0) for p in away_team.get("periods", [])]
|
| 71 |
+
|
| 72 |
+
games_list.append({
|
| 73 |
+
"game_id": game.get("gameId", ""),
|
| 74 |
+
"game_code": game.get("gameCode", ""),
|
| 75 |
+
"game_date": game.get("gameEt", "")[:10] if game.get("gameEt") else "",
|
| 76 |
+
"game_time_utc": game.get("gameTimeUTC", ""),
|
| 77 |
+
"game_time_et": game.get("gameEt", ""),
|
| 78 |
+
|
| 79 |
+
# Teams
|
| 80 |
+
"home_team": home_team.get("teamTricode", ""),
|
| 81 |
+
"away_team": away_team.get("teamTricode", ""),
|
| 82 |
+
"home_team_id": home_team.get("teamId", 0),
|
| 83 |
+
"away_team_id": away_team.get("teamId", 0),
|
| 84 |
+
"home_team_name": home_team.get("teamName", ""),
|
| 85 |
+
"away_team_name": away_team.get("teamName", ""),
|
| 86 |
+
|
| 87 |
+
# Scores
|
| 88 |
+
"home_score": home_team.get("score", 0),
|
| 89 |
+
"away_score": away_team.get("score", 0),
|
| 90 |
+
"home_periods": home_periods,
|
| 91 |
+
"away_periods": away_periods,
|
| 92 |
+
|
| 93 |
+
# Status
|
| 94 |
+
"status": self.GAME_STATUS_MAP.get(game_status, "UNKNOWN"),
|
| 95 |
+
"status_text": game.get("gameStatusText", ""),
|
| 96 |
+
"period": game.get("period", 0),
|
| 97 |
+
"clock": game.get("gameClock", ""),
|
| 98 |
+
|
| 99 |
+
# Records
|
| 100 |
+
"home_wins": home_team.get("wins", 0),
|
| 101 |
+
"home_losses": home_team.get("losses", 0),
|
| 102 |
+
"away_wins": away_team.get("wins", 0),
|
| 103 |
+
"away_losses": away_team.get("losses", 0),
|
| 104 |
+
"home_record": f"{home_team.get('wins', 0)}-{home_team.get('losses', 0)}",
|
| 105 |
+
"away_record": f"{away_team.get('wins', 0)}-{away_team.get('losses', 0)}",
|
| 106 |
+
|
| 107 |
+
# Leaders (for in-progress/final games)
|
| 108 |
+
"home_leader": game.get("gameLeaders", {}).get("homeLeaders", {}),
|
| 109 |
+
"away_leader": game.get("gameLeaders", {}).get("awayLeaders", {}),
|
| 110 |
+
})
|
| 111 |
+
|
| 112 |
+
# Update cache
|
| 113 |
+
self._cached_scoreboard = games_list
|
| 114 |
+
self._last_scoreboard_fetch = datetime.now()
|
| 115 |
+
|
| 116 |
+
logger.info(f"Fetched {len(games_list)} games from Live Scoreboard")
|
| 117 |
+
return games_list
|
| 118 |
+
|
| 119 |
+
except Exception as e:
|
| 120 |
+
logger.error(f"Failed to fetch live scoreboard: {e}")
|
| 121 |
+
return self._cached_scoreboard or []
|
| 122 |
+
|
| 123 |
+
def get_game_boxscore(self, game_id: str) -> Optional[Dict]:
|
| 124 |
+
"""
|
| 125 |
+
Get detailed box score for a specific game.
|
| 126 |
+
|
| 127 |
+
Returns:
|
| 128 |
+
Dict with game details, team stats, player stats
|
| 129 |
+
"""
|
| 130 |
+
try:
|
| 131 |
+
box = boxscore.BoxScore(game_id)
|
| 132 |
+
game_data = box.game.get_dict()
|
| 133 |
+
|
| 134 |
+
return {
|
| 135 |
+
"game_id": game_data.get("gameId", game_id),
|
| 136 |
+
"game_status": game_data.get("gameStatus", 1),
|
| 137 |
+
"game_status_text": game_data.get("gameStatusText", ""),
|
| 138 |
+
"period": game_data.get("period", 0),
|
| 139 |
+
"clock": game_data.get("gameClock", ""),
|
| 140 |
+
|
| 141 |
+
"home_team": game_data.get("homeTeam", {}),
|
| 142 |
+
"away_team": game_data.get("awayTeam", {}),
|
| 143 |
+
|
| 144 |
+
"arena": game_data.get("arena", {}),
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
except Exception as e:
|
| 148 |
+
logger.error(f"Failed to fetch boxscore for {game_id}: {e}")
|
| 149 |
+
return None
|
| 150 |
+
|
| 151 |
+
def get_game_status(self, game_id: str) -> str:
|
| 152 |
+
"""
|
| 153 |
+
Get current status for a specific game.
|
| 154 |
+
|
| 155 |
+
Returns:
|
| 156 |
+
'NOT_STARTED', 'IN_PROGRESS', or 'FINAL'
|
| 157 |
+
"""
|
| 158 |
+
games = self.get_live_scoreboard()
|
| 159 |
+
for game in games:
|
| 160 |
+
if game["game_id"] == game_id:
|
| 161 |
+
return game["status"]
|
| 162 |
+
return "UNKNOWN"
|
| 163 |
+
|
| 164 |
+
def get_winner(self, game_id: str) -> Optional[str]:
|
| 165 |
+
"""
|
| 166 |
+
Get the winner of a completed game.
|
| 167 |
+
|
| 168 |
+
Returns:
|
| 169 |
+
Team tricode of winner, or None if game not finished
|
| 170 |
+
"""
|
| 171 |
+
games = self.get_live_scoreboard()
|
| 172 |
+
for game in games:
|
| 173 |
+
if game["game_id"] == game_id:
|
| 174 |
+
if game["status"] == "FINAL":
|
| 175 |
+
if game["home_score"] > game["away_score"]:
|
| 176 |
+
return game["home_team"]
|
| 177 |
+
else:
|
| 178 |
+
return game["away_team"]
|
| 179 |
+
return None
|
| 180 |
+
|
| 181 |
+
def get_games_by_status(self, status: str) -> List[Dict]:
|
| 182 |
+
"""
|
| 183 |
+
Filter games by status.
|
| 184 |
+
|
| 185 |
+
Args:
|
| 186 |
+
status: 'NOT_STARTED', 'IN_PROGRESS', or 'FINAL'
|
| 187 |
+
"""
|
| 188 |
+
games = self.get_live_scoreboard()
|
| 189 |
+
return [g for g in games if g["status"] == status]
|
| 190 |
+
|
| 191 |
+
def get_live_games(self) -> List[Dict]:
|
| 192 |
+
"""Get all currently in-progress games."""
|
| 193 |
+
return self.get_games_by_status("IN_PROGRESS")
|
| 194 |
+
|
| 195 |
+
def get_final_games(self) -> List[Dict]:
|
| 196 |
+
"""Get all completed games from today."""
|
| 197 |
+
return self.get_games_by_status("FINAL")
|
| 198 |
+
|
| 199 |
+
def get_upcoming_games(self) -> List[Dict]:
|
| 200 |
+
"""Get all not-yet-started games from today."""
|
| 201 |
+
return self.get_games_by_status("NOT_STARTED")
|
| 202 |
+
|
| 203 |
+
def format_game_summary(self, game: Dict) -> str:
|
| 204 |
+
"""Format a game into a readable summary string."""
|
| 205 |
+
status = game["status"]
|
| 206 |
+
away = game["away_team"]
|
| 207 |
+
home = game["home_team"]
|
| 208 |
+
|
| 209 |
+
if status == "NOT_STARTED":
|
| 210 |
+
return f"{away} @ {home} - {game['status_text']}"
|
| 211 |
+
elif status == "IN_PROGRESS":
|
| 212 |
+
return f"{away} {game['away_score']} @ {home} {game['home_score']} - {game['status_text']}"
|
| 213 |
+
else: # FINAL
|
| 214 |
+
return f"{away} {game['away_score']} @ {home} {game['home_score']} - FINAL"
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
# =============================================================================
|
| 218 |
+
# CLI INTERFACE
|
| 219 |
+
# =============================================================================
|
| 220 |
+
if __name__ == "__main__":
|
| 221 |
+
logging.basicConfig(level=logging.INFO)
|
| 222 |
+
|
| 223 |
+
collector = LiveDataCollector()
|
| 224 |
+
|
| 225 |
+
print("\n=== Today's NBA Games ===\n")
|
| 226 |
+
games = collector.get_live_scoreboard()
|
| 227 |
+
|
| 228 |
+
if not games:
|
| 229 |
+
print("No games scheduled for today")
|
| 230 |
+
else:
|
| 231 |
+
for game in games:
|
| 232 |
+
print(collector.format_game_summary(game))
|
| 233 |
+
if game["status"] == "IN_PROGRESS":
|
| 234 |
+
print(f" Q{game['period']} {game['clock']}")
|
| 235 |
+
print()
|
src/models/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Empty init file
|
src/models/championship_predictor.py
ADDED
|
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NBA ML Prediction System - Championship Predictor
|
| 3 |
+
==================================================
|
| 4 |
+
Model to predict NBA Finals winner with playoff experience features.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import pandas as pd
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import Dict, List, Optional
|
| 11 |
+
import xgboost as xgb
|
| 12 |
+
import joblib
|
| 13 |
+
import logging
|
| 14 |
+
|
| 15 |
+
from src.config import MODELS_DIR, NBA_TEAMS
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
# =============================================================================
|
| 20 |
+
# PLAYOFF EXPERIENCE INDEX
|
| 21 |
+
# =============================================================================
|
| 22 |
+
class PlayoffExperienceCalculator:
|
| 23 |
+
"""
|
| 24 |
+
Calculates playoff experience index for teams.
|
| 25 |
+
Teams with playoff experience perform better in postseason.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
# Historical playoff appearances (last 5 years weight)
|
| 29 |
+
PLAYOFF_HISTORY = {
|
| 30 |
+
"BOS": {"appearances": 5, "finals": 2, "championships": 1},
|
| 31 |
+
"MIA": {"appearances": 4, "finals": 2, "championships": 0},
|
| 32 |
+
"DEN": {"appearances": 4, "finals": 1, "championships": 1},
|
| 33 |
+
"GSW": {"appearances": 5, "finals": 3, "championships": 2},
|
| 34 |
+
"PHX": {"appearances": 3, "finals": 1, "championships": 0},
|
| 35 |
+
"MIL": {"appearances": 5, "finals": 1, "championships": 1},
|
| 36 |
+
"LAL": {"appearances": 4, "finals": 1, "championships": 1},
|
| 37 |
+
"DAL": {"appearances": 3, "finals": 1, "championships": 0},
|
| 38 |
+
"CLE": {"appearances": 3, "finals": 0, "championships": 0},
|
| 39 |
+
"OKC": {"appearances": 2, "finals": 0, "championships": 0},
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
def calculate_experience_index(self, team_abbrev: str,
|
| 43 |
+
core_continuity: float = 0.8) -> float:
|
| 44 |
+
"""
|
| 45 |
+
Calculate playoff experience index.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
team_abbrev: Team abbreviation
|
| 49 |
+
core_continuity: How much of the playoff core remains (0-1)
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
Experience index (0-1 scale)
|
| 53 |
+
"""
|
| 54 |
+
history = self.PLAYOFF_HISTORY.get(team_abbrev, {
|
| 55 |
+
"appearances": 0, "finals": 0, "championships": 0
|
| 56 |
+
})
|
| 57 |
+
|
| 58 |
+
# Weight factors
|
| 59 |
+
appearance_weight = 0.3
|
| 60 |
+
finals_weight = 0.4
|
| 61 |
+
championship_weight = 0.3
|
| 62 |
+
|
| 63 |
+
# Calculate raw score
|
| 64 |
+
raw_score = (
|
| 65 |
+
history["appearances"] / 5 * appearance_weight +
|
| 66 |
+
history["finals"] / 3 * finals_weight +
|
| 67 |
+
history["championships"] / 2 * championship_weight
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
# Apply core continuity discount
|
| 71 |
+
adjusted_score = raw_score * core_continuity
|
| 72 |
+
|
| 73 |
+
return min(adjusted_score, 1.0)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
# =============================================================================
|
| 77 |
+
# CHAMPIONSHIP PREDICTOR
|
| 78 |
+
# =============================================================================
|
| 79 |
+
class ChampionshipPredictor:
|
| 80 |
+
"""
|
| 81 |
+
Predicts championship probability for each team.
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
def __init__(self):
|
| 85 |
+
self.model = None
|
| 86 |
+
self.experience_calc = PlayoffExperienceCalculator()
|
| 87 |
+
self.feature_columns = None
|
| 88 |
+
self.trained = False
|
| 89 |
+
|
| 90 |
+
def calculate_team_strength(self, team_stats: Dict) -> float:
|
| 91 |
+
"""Calculate overall team strength rating."""
|
| 92 |
+
# Weighted combination of team metrics
|
| 93 |
+
strength = (
|
| 94 |
+
team_stats.get("win_pct", 0.5) * 30 +
|
| 95 |
+
team_stats.get("net_rating", 0) * 0.5 +
|
| 96 |
+
team_stats.get("elo", 1500) / 100 +
|
| 97 |
+
team_stats.get("playoff_experience", 0) * 10
|
| 98 |
+
)
|
| 99 |
+
return strength
|
| 100 |
+
|
| 101 |
+
def calculate_injury_sensitivity(self, team_stats: Dict) -> float:
|
| 102 |
+
"""
|
| 103 |
+
Calculate how dependent team is on star players.
|
| 104 |
+
High sensitivity = risky championship bet.
|
| 105 |
+
"""
|
| 106 |
+
# Simplified: use points concentration
|
| 107 |
+
top_scorer_pts = team_stats.get("top_scorer_ppg", 25)
|
| 108 |
+
team_ppg = team_stats.get("team_ppg", 110)
|
| 109 |
+
|
| 110 |
+
# High concentration = high sensitivity
|
| 111 |
+
concentration = top_scorer_pts / team_ppg if team_ppg > 0 else 0.3
|
| 112 |
+
|
| 113 |
+
return concentration
|
| 114 |
+
|
| 115 |
+
def prepare_features(self, team_df: pd.DataFrame) -> pd.DataFrame:
|
| 116 |
+
"""Prepare features for championship prediction."""
|
| 117 |
+
df = team_df.copy()
|
| 118 |
+
|
| 119 |
+
# Add playoff experience
|
| 120 |
+
df["playoff_experience"] = df["TEAM_ABBREVIATION"].apply(
|
| 121 |
+
lambda x: self.experience_calc.calculate_experience_index(x)
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
# Calculate strength rating
|
| 125 |
+
df["strength_rating"] = df.apply(lambda row: self.calculate_team_strength({
|
| 126 |
+
"win_pct": row.get("W_PCT", 0.5),
|
| 127 |
+
"net_rating": row.get("NET_RATING", 0),
|
| 128 |
+
"elo": row.get("ELO", 1500),
|
| 129 |
+
"playoff_experience": row.get("playoff_experience", 0)
|
| 130 |
+
}), axis=1)
|
| 131 |
+
|
| 132 |
+
return df
|
| 133 |
+
|
| 134 |
+
def predict_probabilities(self, team_df: pd.DataFrame) -> pd.DataFrame:
|
| 135 |
+
"""
|
| 136 |
+
Predict championship probability for each team.
|
| 137 |
+
Uses a formula-based approach if model not trained.
|
| 138 |
+
"""
|
| 139 |
+
df = self.prepare_features(team_df)
|
| 140 |
+
|
| 141 |
+
# Calculate raw championship scores
|
| 142 |
+
df["champ_score"] = (
|
| 143 |
+
df.get("W_PCT", 0.5) * 40 +
|
| 144 |
+
df.get("NET_RATING", 0) * 2 +
|
| 145 |
+
df["playoff_experience"] * 20 +
|
| 146 |
+
df["strength_rating"] * 0.5
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
# Convert to probabilities (softmax-like normalization)
|
| 150 |
+
total = df["champ_score"].sum()
|
| 151 |
+
df["champ_probability"] = df["champ_score"] / total if total > 0 else 1/len(df)
|
| 152 |
+
|
| 153 |
+
# Sort by probability
|
| 154 |
+
df = df.sort_values("champ_probability", ascending=False)
|
| 155 |
+
|
| 156 |
+
return df[["TEAM_ABBREVIATION", "W_PCT", "playoff_experience",
|
| 157 |
+
"strength_rating", "champ_probability"]]
|
| 158 |
+
|
| 159 |
+
def get_top_contenders(self, team_df: pd.DataFrame, top_n: int = 8) -> pd.DataFrame:
|
| 160 |
+
"""Get top championship contenders."""
|
| 161 |
+
probs = self.predict_probabilities(team_df)
|
| 162 |
+
return probs.head(top_n)
|
| 163 |
+
|
| 164 |
+
def simulate_playoff_bracket(self, team_df: pd.DataFrame) -> Dict:
|
| 165 |
+
"""
|
| 166 |
+
Simple playoff bracket simulation.
|
| 167 |
+
Returns predicted conference champions and finals winner.
|
| 168 |
+
"""
|
| 169 |
+
probs = self.predict_probabilities(team_df)
|
| 170 |
+
|
| 171 |
+
# Split by conference (simplified - top 8 each)
|
| 172 |
+
# In reality, would use actual standings
|
| 173 |
+
top_teams = probs.head(16)
|
| 174 |
+
|
| 175 |
+
east_teams = top_teams.head(8) # Simplified
|
| 176 |
+
west_teams = top_teams.tail(8)
|
| 177 |
+
|
| 178 |
+
# Pick conference champions (highest probability each)
|
| 179 |
+
east_champ = east_teams.iloc[0]["TEAM_ABBREVIATION"]
|
| 180 |
+
west_champ = west_teams.iloc[0]["TEAM_ABBREVIATION"]
|
| 181 |
+
|
| 182 |
+
# Finals winner
|
| 183 |
+
finals_winner = probs.iloc[0]["TEAM_ABBREVIATION"]
|
| 184 |
+
|
| 185 |
+
return {
|
| 186 |
+
"east_champion": east_champ,
|
| 187 |
+
"west_champion": west_champ,
|
| 188 |
+
"finals_winner": finals_winner,
|
| 189 |
+
"champion_probability": probs.iloc[0]["champ_probability"]
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
def save(self, path: Path = None):
|
| 193 |
+
"""Save model to disk."""
|
| 194 |
+
if path is None:
|
| 195 |
+
path = MODELS_DIR / "championship_predictor.joblib"
|
| 196 |
+
|
| 197 |
+
joblib.dump({
|
| 198 |
+
"model": self.model,
|
| 199 |
+
"feature_columns": self.feature_columns,
|
| 200 |
+
"trained": self.trained
|
| 201 |
+
}, path)
|
| 202 |
+
logger.info(f"Saved championship model to {path}")
|
| 203 |
+
|
| 204 |
+
def load(self, path: Path = None):
|
| 205 |
+
"""Load model from disk."""
|
| 206 |
+
if path is None:
|
| 207 |
+
path = MODELS_DIR / "championship_predictor.joblib"
|
| 208 |
+
|
| 209 |
+
data = joblib.load(path)
|
| 210 |
+
self.model = data["model"]
|
| 211 |
+
self.feature_columns = data["feature_columns"]
|
| 212 |
+
self.trained = data["trained"]
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
# =============================================================================
|
| 216 |
+
# CLI INTERFACE
|
| 217 |
+
# =============================================================================
|
| 218 |
+
if __name__ == "__main__":
|
| 219 |
+
print("Testing Championship Predictor...")
|
| 220 |
+
|
| 221 |
+
# Create sample team data
|
| 222 |
+
sample_teams = pd.DataFrame({
|
| 223 |
+
"TEAM_ABBREVIATION": ["BOS", "DEN", "MIL", "PHX", "GSW", "MIA", "LAL", "DAL"],
|
| 224 |
+
"W_PCT": [0.68, 0.65, 0.63, 0.60, 0.58, 0.55, 0.52, 0.50],
|
| 225 |
+
"NET_RATING": [8.5, 6.2, 5.1, 4.0, 3.5, 2.0, 1.5, 0.5]
|
| 226 |
+
})
|
| 227 |
+
|
| 228 |
+
predictor = ChampionshipPredictor()
|
| 229 |
+
|
| 230 |
+
print("\nChampionship Probabilities:")
|
| 231 |
+
probs = predictor.get_top_contenders(sample_teams)
|
| 232 |
+
print(probs.to_string(index=False))
|
| 233 |
+
|
| 234 |
+
print("\nPlayoff Bracket Simulation:")
|
| 235 |
+
bracket = predictor.simulate_playoff_bracket(sample_teams)
|
| 236 |
+
for k, v in bracket.items():
|
| 237 |
+
print(f" {k}: {v}")
|
src/models/game_predictor.py
ADDED
|
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NBA ML Prediction System - Game Predictor
|
| 3 |
+
==========================================
|
| 4 |
+
XGBoost + LightGBM ensemble for game win prediction.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import pandas as pd
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import Dict, List, Tuple, Optional
|
| 11 |
+
from sklearn.metrics import accuracy_score, brier_score_loss, log_loss
|
| 12 |
+
from sklearn.calibration import calibration_curve
|
| 13 |
+
import xgboost as xgb
|
| 14 |
+
import lightgbm as lgb
|
| 15 |
+
import joblib
|
| 16 |
+
import logging
|
| 17 |
+
|
| 18 |
+
from src.config import MODEL_CONFIG, MODELS_DIR
|
| 19 |
+
# Import preprocessing so pickle can find DataPreprocessor class
|
| 20 |
+
from src.preprocessing import DataPreprocessor, GameDatasetBuilder
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
# =============================================================================
|
| 25 |
+
# GAME PREDICTOR MODEL
|
| 26 |
+
# =============================================================================
|
| 27 |
+
class GamePredictor:
|
| 28 |
+
"""
|
| 29 |
+
Ensemble model for predicting game outcomes.
|
| 30 |
+
Uses XGBoost + LightGBM with weighted averaging.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(self,
|
| 34 |
+
xgb_weight: float = 0.5,
|
| 35 |
+
lgb_weight: float = 0.5):
|
| 36 |
+
self.xgb_weight = xgb_weight
|
| 37 |
+
self.lgb_weight = lgb_weight
|
| 38 |
+
|
| 39 |
+
self.xgb_model = None
|
| 40 |
+
self.lgb_model = None
|
| 41 |
+
self.feature_columns = None
|
| 42 |
+
self.trained = False
|
| 43 |
+
|
| 44 |
+
def train(self, X_train: np.ndarray, y_train: np.ndarray,
|
| 45 |
+
X_val: np.ndarray = None, y_val: np.ndarray = None,
|
| 46 |
+
feature_columns: List[str] = None):
|
| 47 |
+
"""
|
| 48 |
+
Train both XGBoost and LightGBM models.
|
| 49 |
+
"""
|
| 50 |
+
self.feature_columns = feature_columns
|
| 51 |
+
|
| 52 |
+
logger.info("Training XGBoost model...")
|
| 53 |
+
self.xgb_model = xgb.XGBClassifier(**MODEL_CONFIG.xgb_params)
|
| 54 |
+
|
| 55 |
+
if X_val is not None:
|
| 56 |
+
self.xgb_model.fit(
|
| 57 |
+
X_train, y_train,
|
| 58 |
+
eval_set=[(X_val, y_val)],
|
| 59 |
+
verbose=False
|
| 60 |
+
)
|
| 61 |
+
else:
|
| 62 |
+
self.xgb_model.fit(X_train, y_train)
|
| 63 |
+
|
| 64 |
+
logger.info("Training LightGBM model...")
|
| 65 |
+
self.lgb_model = lgb.LGBMClassifier(**MODEL_CONFIG.lgb_params)
|
| 66 |
+
|
| 67 |
+
if X_val is not None:
|
| 68 |
+
self.lgb_model.fit(
|
| 69 |
+
X_train, y_train,
|
| 70 |
+
eval_set=[(X_val, y_val)]
|
| 71 |
+
)
|
| 72 |
+
else:
|
| 73 |
+
self.lgb_model.fit(X_train, y_train)
|
| 74 |
+
|
| 75 |
+
self.trained = True
|
| 76 |
+
logger.info("Training complete!")
|
| 77 |
+
|
| 78 |
+
def predict_proba(self, X: np.ndarray) -> np.ndarray:
|
| 79 |
+
"""
|
| 80 |
+
Predict win probabilities using ensemble.
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
Array of shape (n_samples, 2) with [loss_prob, win_prob]
|
| 84 |
+
"""
|
| 85 |
+
if not self.trained:
|
| 86 |
+
raise ValueError("Model not trained. Call train() first.")
|
| 87 |
+
|
| 88 |
+
xgb_proba = self.xgb_model.predict_proba(X)
|
| 89 |
+
lgb_proba = self.lgb_model.predict_proba(X)
|
| 90 |
+
|
| 91 |
+
# Weighted average
|
| 92 |
+
ensemble_proba = (
|
| 93 |
+
self.xgb_weight * xgb_proba +
|
| 94 |
+
self.lgb_weight * lgb_proba
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
return ensemble_proba
|
| 98 |
+
|
| 99 |
+
def predict(self, X: np.ndarray, threshold: float = 0.5) -> np.ndarray:
|
| 100 |
+
"""Predict win/loss (1/0)."""
|
| 101 |
+
proba = self.predict_proba(X)
|
| 102 |
+
return (proba[:, 1] >= threshold).astype(int)
|
| 103 |
+
|
| 104 |
+
def predict_with_confidence(self, X: np.ndarray) -> List[Dict]:
|
| 105 |
+
"""
|
| 106 |
+
Predict with detailed confidence information.
|
| 107 |
+
Shows individual model predictions and disagreement.
|
| 108 |
+
"""
|
| 109 |
+
xgb_proba = self.xgb_model.predict_proba(X)[:, 1]
|
| 110 |
+
lgb_proba = self.lgb_model.predict_proba(X)[:, 1]
|
| 111 |
+
ensemble_proba = self.predict_proba(X)[:, 1]
|
| 112 |
+
|
| 113 |
+
results = []
|
| 114 |
+
for i in range(len(X)):
|
| 115 |
+
# Check model disagreement
|
| 116 |
+
disagreement = abs(xgb_proba[i] - lgb_proba[i])
|
| 117 |
+
|
| 118 |
+
results.append({
|
| 119 |
+
"win_probability": ensemble_proba[i],
|
| 120 |
+
"xgb_probability": xgb_proba[i],
|
| 121 |
+
"lgb_probability": lgb_proba[i],
|
| 122 |
+
"model_disagreement": disagreement,
|
| 123 |
+
"confidence": "high" if disagreement < 0.1 else ("medium" if disagreement < 0.2 else "low"),
|
| 124 |
+
"prediction": "WIN" if ensemble_proba[i] >= 0.5 else "LOSS"
|
| 125 |
+
})
|
| 126 |
+
|
| 127 |
+
return results
|
| 128 |
+
|
| 129 |
+
def evaluate(self, X: np.ndarray, y: np.ndarray) -> Dict[str, float]:
|
| 130 |
+
"""
|
| 131 |
+
Comprehensive model evaluation.
|
| 132 |
+
|
| 133 |
+
Returns:
|
| 134 |
+
Dict with accuracy, brier score, and other metrics
|
| 135 |
+
"""
|
| 136 |
+
y_pred = self.predict(X)
|
| 137 |
+
y_proba = self.predict_proba(X)[:, 1]
|
| 138 |
+
|
| 139 |
+
metrics = {
|
| 140 |
+
"accuracy": accuracy_score(y, y_pred),
|
| 141 |
+
"brier_score": brier_score_loss(y, y_proba),
|
| 142 |
+
"log_loss": log_loss(y, y_proba)
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
# Individual model metrics
|
| 146 |
+
xgb_pred = self.xgb_model.predict(X)
|
| 147 |
+
lgb_pred = self.lgb_model.predict(X)
|
| 148 |
+
|
| 149 |
+
metrics["xgb_accuracy"] = accuracy_score(y, xgb_pred)
|
| 150 |
+
metrics["lgb_accuracy"] = accuracy_score(y, lgb_pred)
|
| 151 |
+
|
| 152 |
+
return metrics
|
| 153 |
+
|
| 154 |
+
def get_feature_importance(self) -> pd.DataFrame:
|
| 155 |
+
"""Get feature importance from both models."""
|
| 156 |
+
if not self.trained or self.feature_columns is None:
|
| 157 |
+
return pd.DataFrame()
|
| 158 |
+
|
| 159 |
+
xgb_importance = self.xgb_model.feature_importances_
|
| 160 |
+
lgb_importance = self.lgb_model.feature_importances_
|
| 161 |
+
|
| 162 |
+
df = pd.DataFrame({
|
| 163 |
+
"feature": self.feature_columns,
|
| 164 |
+
"xgb_importance": xgb_importance,
|
| 165 |
+
"lgb_importance": lgb_importance,
|
| 166 |
+
"avg_importance": (xgb_importance + lgb_importance) / 2
|
| 167 |
+
})
|
| 168 |
+
|
| 169 |
+
return df.sort_values("avg_importance", ascending=False)
|
| 170 |
+
|
| 171 |
+
def explain_prediction(self, X: np.ndarray, top_n: int = 5) -> List[Dict]:
|
| 172 |
+
"""
|
| 173 |
+
Explain predictions using feature importance.
|
| 174 |
+
Returns top N contributing features for each prediction.
|
| 175 |
+
"""
|
| 176 |
+
if not self.trained or self.feature_columns is None:
|
| 177 |
+
return []
|
| 178 |
+
|
| 179 |
+
importance = self.get_feature_importance()
|
| 180 |
+
top_features = importance.head(top_n)["feature"].tolist()
|
| 181 |
+
|
| 182 |
+
explanations = []
|
| 183 |
+
for i in range(len(X)):
|
| 184 |
+
feature_contributions = []
|
| 185 |
+
for j, feat in enumerate(self.feature_columns):
|
| 186 |
+
if feat in top_features:
|
| 187 |
+
feature_contributions.append({
|
| 188 |
+
"feature": feat,
|
| 189 |
+
"value": X[i, j],
|
| 190 |
+
"importance": importance[importance["feature"] == feat]["avg_importance"].values[0]
|
| 191 |
+
})
|
| 192 |
+
|
| 193 |
+
# Sort by importance
|
| 194 |
+
feature_contributions.sort(key=lambda x: x["importance"], reverse=True)
|
| 195 |
+
|
| 196 |
+
explanations.append({
|
| 197 |
+
"top_features": feature_contributions[:top_n],
|
| 198 |
+
"prediction": self.predict(X[i:i+1])[0]
|
| 199 |
+
})
|
| 200 |
+
|
| 201 |
+
return explanations
|
| 202 |
+
|
| 203 |
+
def save(self, path: Path = None):
|
| 204 |
+
"""Save model to disk."""
|
| 205 |
+
if path is None:
|
| 206 |
+
path = MODELS_DIR / "game_predictor.joblib"
|
| 207 |
+
|
| 208 |
+
joblib.dump({
|
| 209 |
+
"xgb_model": self.xgb_model,
|
| 210 |
+
"lgb_model": self.lgb_model,
|
| 211 |
+
"xgb_weight": self.xgb_weight,
|
| 212 |
+
"lgb_weight": self.lgb_weight,
|
| 213 |
+
"feature_columns": self.feature_columns,
|
| 214 |
+
"trained": self.trained
|
| 215 |
+
}, path)
|
| 216 |
+
logger.info(f"Saved model to {path}")
|
| 217 |
+
|
| 218 |
+
def load(self, path: Path = None):
|
| 219 |
+
"""Load model from disk."""
|
| 220 |
+
if path is None:
|
| 221 |
+
path = MODELS_DIR / "game_predictor.joblib"
|
| 222 |
+
|
| 223 |
+
data = joblib.load(path)
|
| 224 |
+
self.xgb_model = data["xgb_model"]
|
| 225 |
+
self.lgb_model = data["lgb_model"]
|
| 226 |
+
self.xgb_weight = data["xgb_weight"]
|
| 227 |
+
self.lgb_weight = data["lgb_weight"]
|
| 228 |
+
self.feature_columns = data["feature_columns"]
|
| 229 |
+
self.trained = data["trained"]
|
| 230 |
+
logger.info(f"Loaded model from {path}")
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
# =============================================================================
|
| 234 |
+
# TRAINING PIPELINE
|
| 235 |
+
# =============================================================================
|
| 236 |
+
def train_game_predictor(dataset: Dict) -> GamePredictor:
|
| 237 |
+
"""
|
| 238 |
+
Full training pipeline for game predictor.
|
| 239 |
+
"""
|
| 240 |
+
logger.info("Starting game predictor training...")
|
| 241 |
+
|
| 242 |
+
model = GamePredictor()
|
| 243 |
+
model.train(
|
| 244 |
+
X_train=dataset["X_train"],
|
| 245 |
+
y_train=dataset["y_train"],
|
| 246 |
+
X_val=dataset["X_val"],
|
| 247 |
+
y_val=dataset["y_val"],
|
| 248 |
+
feature_columns=dataset["feature_columns"]
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
# Evaluate on all splits
|
| 252 |
+
logger.info("\n=== Training Metrics ===")
|
| 253 |
+
train_metrics = model.evaluate(dataset["X_train"], dataset["y_train"])
|
| 254 |
+
logger.info(f"Train Accuracy: {train_metrics['accuracy']:.4f}")
|
| 255 |
+
|
| 256 |
+
logger.info("\n=== Validation Metrics ===")
|
| 257 |
+
val_metrics = model.evaluate(dataset["X_val"], dataset["y_val"])
|
| 258 |
+
logger.info(f"Val Accuracy: {val_metrics['accuracy']:.4f}")
|
| 259 |
+
logger.info(f"Val Brier Score: {val_metrics['brier_score']:.4f}")
|
| 260 |
+
|
| 261 |
+
logger.info("\n=== Test Metrics ===")
|
| 262 |
+
test_metrics = model.evaluate(dataset["X_test"], dataset["y_test"])
|
| 263 |
+
logger.info(f"Test Accuracy: {test_metrics['accuracy']:.4f}")
|
| 264 |
+
logger.info(f"Test Brier Score: {test_metrics['brier_score']:.4f}")
|
| 265 |
+
|
| 266 |
+
# Check if we meet target
|
| 267 |
+
if test_metrics["accuracy"] >= 0.65:
|
| 268 |
+
logger.info("✓ Target accuracy (>65%) achieved!")
|
| 269 |
+
else:
|
| 270 |
+
logger.warning(f"✗ Below target accuracy. Got {test_metrics['accuracy']:.2%}")
|
| 271 |
+
|
| 272 |
+
# Feature importance
|
| 273 |
+
logger.info("\n=== Top Features ===")
|
| 274 |
+
importance = model.get_feature_importance()
|
| 275 |
+
print(importance.head(10))
|
| 276 |
+
|
| 277 |
+
# Save model
|
| 278 |
+
model.save()
|
| 279 |
+
|
| 280 |
+
return model
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
# =============================================================================
|
| 284 |
+
# CLI INTERFACE
|
| 285 |
+
# =============================================================================
|
| 286 |
+
if __name__ == "__main__":
|
| 287 |
+
import argparse
|
| 288 |
+
|
| 289 |
+
parser = argparse.ArgumentParser(description="Game Predictor Training")
|
| 290 |
+
parser.add_argument("--train", action="store_true", help="Train model")
|
| 291 |
+
parser.add_argument("--evaluate", action="store_true", help="Evaluate existing model")
|
| 292 |
+
|
| 293 |
+
args = parser.parse_args()
|
| 294 |
+
|
| 295 |
+
if args.train:
|
| 296 |
+
from src.preprocessing import GameDatasetBuilder
|
| 297 |
+
|
| 298 |
+
logging.basicConfig(level=logging.INFO)
|
| 299 |
+
|
| 300 |
+
print("Loading dataset...")
|
| 301 |
+
builder = GameDatasetBuilder()
|
| 302 |
+
|
| 303 |
+
try:
|
| 304 |
+
dataset = builder.load_dataset()
|
| 305 |
+
print(f"Loaded dataset with {len(dataset['feature_columns'])} features")
|
| 306 |
+
except FileNotFoundError:
|
| 307 |
+
print("No dataset found. Please run 'python -m src.preprocessing --build' first.")
|
| 308 |
+
exit(1)
|
| 309 |
+
except Exception as e:
|
| 310 |
+
print(f"Error loading dataset: {e}")
|
| 311 |
+
import traceback
|
| 312 |
+
traceback.print_exc()
|
| 313 |
+
exit(1)
|
| 314 |
+
|
| 315 |
+
model = train_game_predictor(dataset)
|
| 316 |
+
print("\nTraining complete!")
|
| 317 |
+
|
| 318 |
+
elif args.evaluate:
|
| 319 |
+
model = GamePredictor()
|
| 320 |
+
model.load()
|
| 321 |
+
|
| 322 |
+
from src.preprocessing import GameDatasetBuilder
|
| 323 |
+
builder = GameDatasetBuilder()
|
| 324 |
+
dataset = builder.load_dataset()
|
| 325 |
+
|
| 326 |
+
metrics = model.evaluate(dataset["X_test"], dataset["y_test"])
|
| 327 |
+
print("\n=== Test Metrics ===")
|
| 328 |
+
for k, v in metrics.items():
|
| 329 |
+
print(f"{k}: {v:.4f}")
|
| 330 |
+
else:
|
| 331 |
+
print("Use --train to train or --evaluate to evaluate")
|
src/models/mvp_predictor.py
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NBA ML Prediction System - MVP Predictor
|
| 3 |
+
=========================================
|
| 4 |
+
Model to predict MVP based on player performance, team success,
|
| 5 |
+
and historical MVP similarity.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import pandas as pd
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import Dict, List, Optional
|
| 12 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
| 13 |
+
from sklearn.preprocessing import StandardScaler
|
| 14 |
+
import xgboost as xgb
|
| 15 |
+
import joblib
|
| 16 |
+
import logging
|
| 17 |
+
|
| 18 |
+
from src.config import MODELS_DIR, RAW_DATA_DIR
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
# =============================================================================
|
| 23 |
+
# HISTORICAL MVP PROFILES
|
| 24 |
+
# =============================================================================
|
| 25 |
+
# Historical MVP seasons (approximate stats for similarity comparison)
|
| 26 |
+
HISTORICAL_MVP_PROFILES = {
|
| 27 |
+
"2023-24": {"player": "Nikola Jokic", "ppg": 26.4, "rpg": 12.4, "apg": 9.0, "ws": 17.8, "team_wins": 57},
|
| 28 |
+
"2022-23": {"player": "Joel Embiid", "ppg": 33.1, "rpg": 10.2, "apg": 4.2, "ws": 14.3, "team_wins": 54},
|
| 29 |
+
"2021-22": {"player": "Nikola Jokic", "ppg": 27.1, "rpg": 13.8, "apg": 7.9, "ws": 15.2, "team_wins": 48},
|
| 30 |
+
"2020-21": {"player": "Nikola Jokic", "ppg": 26.4, "rpg": 10.8, "apg": 8.3, "ws": 15.6, "team_wins": 47},
|
| 31 |
+
"2019-20": {"player": "Giannis Antetokounmpo", "ppg": 29.5, "rpg": 13.6, "apg": 5.6, "ws": 14.4, "team_wins": 56},
|
| 32 |
+
"2018-19": {"player": "Giannis Antetokounmpo", "ppg": 27.7, "rpg": 12.5, "apg": 5.9, "ws": 14.4, "team_wins": 60},
|
| 33 |
+
"2017-18": {"player": "James Harden", "ppg": 30.4, "rpg": 5.4, "apg": 8.8, "ws": 15.4, "team_wins": 65},
|
| 34 |
+
"2016-17": {"player": "Russell Westbrook", "ppg": 31.6, "rpg": 10.7, "apg": 10.4, "ws": 13.1, "team_wins": 47},
|
| 35 |
+
"2015-16": {"player": "Stephen Curry", "ppg": 30.1, "rpg": 5.4, "apg": 6.7, "ws": 17.9, "team_wins": 73},
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
# =============================================================================
|
| 40 |
+
# MVP PREDICTOR
|
| 41 |
+
# =============================================================================
|
| 42 |
+
class MVPPredictor:
|
| 43 |
+
"""
|
| 44 |
+
Predicts MVP vote share using gradient boosting with narrative features.
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
def __init__(self):
|
| 48 |
+
self.model = None
|
| 49 |
+
self.scaler = StandardScaler()
|
| 50 |
+
self.feature_columns = None
|
| 51 |
+
self.trained = False
|
| 52 |
+
|
| 53 |
+
def calculate_mvp_similarity(self, player_stats: Dict) -> float:
|
| 54 |
+
"""
|
| 55 |
+
Calculate cosine similarity to historical MVP profiles.
|
| 56 |
+
Captures voter psychology by finding players who "look like" past MVPs.
|
| 57 |
+
"""
|
| 58 |
+
# Create feature vector for player
|
| 59 |
+
player_vector = np.array([
|
| 60 |
+
player_stats.get("ppg", 0),
|
| 61 |
+
player_stats.get("rpg", 0),
|
| 62 |
+
player_stats.get("apg", 0),
|
| 63 |
+
player_stats.get("ws", 0),
|
| 64 |
+
player_stats.get("team_wins", 0) / 82 # Normalize to 0-1
|
| 65 |
+
]).reshape(1, -1)
|
| 66 |
+
|
| 67 |
+
# Create matrix of historical MVP profiles
|
| 68 |
+
mvp_vectors = []
|
| 69 |
+
for season, profile in HISTORICAL_MVP_PROFILES.items():
|
| 70 |
+
mvp_vectors.append([
|
| 71 |
+
profile["ppg"],
|
| 72 |
+
profile["rpg"],
|
| 73 |
+
profile["apg"],
|
| 74 |
+
profile["ws"],
|
| 75 |
+
profile["team_wins"] / 82
|
| 76 |
+
])
|
| 77 |
+
|
| 78 |
+
mvp_matrix = np.array(mvp_vectors)
|
| 79 |
+
|
| 80 |
+
# Normalize
|
| 81 |
+
if len(mvp_matrix) > 0:
|
| 82 |
+
mvp_matrix_normalized = self.scaler.fit_transform(mvp_matrix)
|
| 83 |
+
player_normalized = self.scaler.transform(player_vector)
|
| 84 |
+
|
| 85 |
+
# Calculate similarity to each MVP season
|
| 86 |
+
similarities = cosine_similarity(player_normalized, mvp_matrix_normalized)[0]
|
| 87 |
+
|
| 88 |
+
# Return max similarity (closest to any MVP)
|
| 89 |
+
return float(np.max(similarities))
|
| 90 |
+
|
| 91 |
+
return 0.0
|
| 92 |
+
|
| 93 |
+
def calculate_narrative_features(self, player_stats: Dict,
|
| 94 |
+
prev_season_stats: Optional[Dict] = None) -> Dict:
|
| 95 |
+
"""
|
| 96 |
+
Calculate narrative momentum features that voters care about.
|
| 97 |
+
"""
|
| 98 |
+
features = {}
|
| 99 |
+
|
| 100 |
+
# Stat improvement year-over-year
|
| 101 |
+
if prev_season_stats:
|
| 102 |
+
features["ppg_improvement"] = player_stats.get("ppg", 0) - prev_season_stats.get("ppg", 0)
|
| 103 |
+
features["rpg_improvement"] = player_stats.get("rpg", 0) - prev_season_stats.get("rpg", 0)
|
| 104 |
+
features["apg_improvement"] = player_stats.get("apg", 0) - prev_season_stats.get("apg", 0)
|
| 105 |
+
else:
|
| 106 |
+
features["ppg_improvement"] = 0
|
| 107 |
+
features["rpg_improvement"] = 0
|
| 108 |
+
features["apg_improvement"] = 0
|
| 109 |
+
|
| 110 |
+
# Team success
|
| 111 |
+
features["team_wins"] = player_stats.get("team_wins", 0)
|
| 112 |
+
features["team_win_pct"] = player_stats.get("team_wins", 41) / 82
|
| 113 |
+
|
| 114 |
+
# Games played (durability matters)
|
| 115 |
+
features["games_played"] = player_stats.get("gp", 0)
|
| 116 |
+
features["games_played_pct"] = player_stats.get("gp", 0) / 82
|
| 117 |
+
|
| 118 |
+
return features
|
| 119 |
+
|
| 120 |
+
def prepare_features(self, player_df: pd.DataFrame) -> pd.DataFrame:
|
| 121 |
+
"""Prepare all features for MVP prediction."""
|
| 122 |
+
features = player_df.copy()
|
| 123 |
+
|
| 124 |
+
# Calculate MVP similarity for each player
|
| 125 |
+
features["mvp_similarity"] = features.apply(
|
| 126 |
+
lambda row: self.calculate_mvp_similarity({
|
| 127 |
+
"ppg": row.get("PTS", 0),
|
| 128 |
+
"rpg": row.get("REB", 0),
|
| 129 |
+
"apg": row.get("AST", 0),
|
| 130 |
+
"ws": row.get("WS", 10), # Default if not available
|
| 131 |
+
"team_wins": row.get("TEAM_WINS", 41)
|
| 132 |
+
}), axis=1
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
return features
|
| 136 |
+
|
| 137 |
+
def train(self, X: np.ndarray, y: np.ndarray, feature_columns: List[str]):
|
| 138 |
+
"""Train the MVP prediction model."""
|
| 139 |
+
self.feature_columns = feature_columns
|
| 140 |
+
|
| 141 |
+
self.model = xgb.XGBRegressor(
|
| 142 |
+
n_estimators=200,
|
| 143 |
+
max_depth=5,
|
| 144 |
+
learning_rate=0.1,
|
| 145 |
+
random_state=42
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
self.model.fit(X, y)
|
| 149 |
+
self.trained = True
|
| 150 |
+
logger.info("MVP model trained")
|
| 151 |
+
|
| 152 |
+
def predict_vote_share(self, X: np.ndarray) -> np.ndarray:
|
| 153 |
+
"""Predict MVP vote share (0-1 scale)."""
|
| 154 |
+
if not self.trained:
|
| 155 |
+
raise ValueError("Model not trained")
|
| 156 |
+
return self.model.predict(X)
|
| 157 |
+
|
| 158 |
+
def rank_candidates(self, player_df: pd.DataFrame, top_n: int = 10) -> pd.DataFrame:
|
| 159 |
+
"""
|
| 160 |
+
Rank MVP candidates and return top N.
|
| 161 |
+
Uses real stats-based scoring formula.
|
| 162 |
+
"""
|
| 163 |
+
df = player_df.copy()
|
| 164 |
+
|
| 165 |
+
# MVP score based on stats available from NBA API
|
| 166 |
+
# Weighted formula considering:
|
| 167 |
+
# - Scoring (30%): Points per game
|
| 168 |
+
# - Playmaking (20%): Assists per game
|
| 169 |
+
# - Rebounding (15%): Rebounds per game
|
| 170 |
+
# - Defense (10%): Steals + Blocks
|
| 171 |
+
# - Efficiency (10%): Plus/Minus and FG%
|
| 172 |
+
# - Team Success (15%): Team win percentage
|
| 173 |
+
|
| 174 |
+
pts = df.get("PTS", pd.Series([0]*len(df))).fillna(0)
|
| 175 |
+
ast = df.get("AST", pd.Series([0]*len(df))).fillna(0)
|
| 176 |
+
reb = df.get("REB", pd.Series([0]*len(df))).fillna(0)
|
| 177 |
+
stl = df.get("STL", pd.Series([0]*len(df))).fillna(0)
|
| 178 |
+
blk = df.get("BLK", pd.Series([0]*len(df))).fillna(0)
|
| 179 |
+
plus_minus = df.get("PLUS_MINUS", pd.Series([0]*len(df))).fillna(0)
|
| 180 |
+
fg_pct = df.get("FG_PCT", pd.Series([0.45]*len(df))).fillna(0.45)
|
| 181 |
+
team_win_pct = df.get("TEAM_WIN_PCT", pd.Series([0.5]*len(df))).fillna(0.5)
|
| 182 |
+
|
| 183 |
+
df["mvp_score"] = (
|
| 184 |
+
pts * 1.0 + # Points (raw weight)
|
| 185 |
+
ast * 2.0 + # Assists (weighted more for playmaking)
|
| 186 |
+
reb * 1.0 + # Rebounds
|
| 187 |
+
(stl + blk) * 1.5 + # Defense
|
| 188 |
+
plus_minus * 0.3 + # Impact metric
|
| 189 |
+
fg_pct * 20 + # Efficiency bonus
|
| 190 |
+
team_win_pct * 30 # Team success (big factor for MVP)
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
# Add MVP similarity if we can calculate it
|
| 194 |
+
if "mvp_similarity" not in df.columns:
|
| 195 |
+
df = self.prepare_features(df)
|
| 196 |
+
|
| 197 |
+
if "mvp_similarity" in df.columns:
|
| 198 |
+
df["mvp_score"] = df["mvp_score"] + df["mvp_similarity"].fillna(0) * 10
|
| 199 |
+
|
| 200 |
+
# Sort and return top candidates
|
| 201 |
+
df = df.sort_values("mvp_score", ascending=False)
|
| 202 |
+
|
| 203 |
+
# Ensure columns exist for return
|
| 204 |
+
if "mvp_similarity" not in df.columns:
|
| 205 |
+
df["mvp_similarity"] = 0.0
|
| 206 |
+
|
| 207 |
+
return df.head(top_n)[["PLAYER_NAME", "PTS", "REB", "AST", "mvp_score", "mvp_similarity"]]
|
| 208 |
+
|
| 209 |
+
def save(self, path: Path = None):
|
| 210 |
+
"""Save model to disk."""
|
| 211 |
+
if path is None:
|
| 212 |
+
path = MODELS_DIR / "mvp_predictor.joblib"
|
| 213 |
+
|
| 214 |
+
joblib.dump({
|
| 215 |
+
"model": self.model,
|
| 216 |
+
"scaler": self.scaler,
|
| 217 |
+
"feature_columns": self.feature_columns,
|
| 218 |
+
"trained": self.trained
|
| 219 |
+
}, path)
|
| 220 |
+
logger.info(f"Saved MVP model to {path}")
|
| 221 |
+
|
| 222 |
+
def load(self, path: Path = None):
|
| 223 |
+
"""Load model from disk."""
|
| 224 |
+
if path is None:
|
| 225 |
+
path = MODELS_DIR / "mvp_predictor.joblib"
|
| 226 |
+
|
| 227 |
+
data = joblib.load(path)
|
| 228 |
+
self.model = data["model"]
|
| 229 |
+
self.scaler = data["scaler"]
|
| 230 |
+
self.feature_columns = data["feature_columns"]
|
| 231 |
+
self.trained = data["trained"]
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
# =============================================================================
|
| 235 |
+
# CLI INTERFACE
|
| 236 |
+
# =============================================================================
|
| 237 |
+
if __name__ == "__main__":
|
| 238 |
+
print("Testing MVP Similarity Calculator...")
|
| 239 |
+
|
| 240 |
+
predictor = MVPPredictor()
|
| 241 |
+
|
| 242 |
+
# Test with a hypothetical MVP-caliber season
|
| 243 |
+
test_stats = {
|
| 244 |
+
"ppg": 28.5,
|
| 245 |
+
"rpg": 12.0,
|
| 246 |
+
"apg": 8.5,
|
| 247 |
+
"ws": 15.0,
|
| 248 |
+
"team_wins": 55
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
similarity = predictor.calculate_mvp_similarity(test_stats)
|
| 252 |
+
print(f"MVP Similarity Score: {similarity:.3f}")
|
| 253 |
+
|
| 254 |
+
# Test narrative features
|
| 255 |
+
prev_stats = {"ppg": 25.0, "rpg": 10.0, "apg": 7.0}
|
| 256 |
+
narrative = predictor.calculate_narrative_features(test_stats, prev_stats)
|
| 257 |
+
print(f"Narrative Features: {narrative}")
|
src/prediction_pipeline.py
ADDED
|
@@ -0,0 +1,636 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NBA ML Prediction System - Prediction Pipeline
|
| 3 |
+
===============================================
|
| 4 |
+
End-to-end pipeline for generating predictions with live data integration.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import pandas as pd
|
| 8 |
+
import numpy as np
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from datetime import datetime, timedelta
|
| 11 |
+
from typing import Dict, List, Optional
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
from nba_api.stats.endpoints import leaguegamefinder
|
| 15 |
+
from nba_api.stats.static import teams
|
| 16 |
+
|
| 17 |
+
from src.config import (
|
| 18 |
+
API_CACHE_DIR,
|
| 19 |
+
MODELS_DIR,
|
| 20 |
+
NBA_TEAMS,
|
| 21 |
+
API_CONFIG
|
| 22 |
+
)
|
| 23 |
+
from src.data_collector import CacheManager, retry_with_backoff
|
| 24 |
+
from src.feature_engineering import FeatureGenerator
|
| 25 |
+
from src.injury_collector import InjuryCollector
|
| 26 |
+
from src.models.game_predictor import GamePredictor
|
| 27 |
+
from src.models.mvp_predictor import MVPPredictor
|
| 28 |
+
from src.models.championship_predictor import ChampionshipPredictor
|
| 29 |
+
from src.preprocessing import DataPreprocessor
|
| 30 |
+
from src.live_data_collector import LiveDataCollector
|
| 31 |
+
from src.prediction_tracker import PredictionTracker
|
| 32 |
+
|
| 33 |
+
logger = logging.getLogger(__name__)
|
| 34 |
+
|
| 35 |
+
# =============================================================================
|
| 36 |
+
# PREDICTION PIPELINE
|
| 37 |
+
# =============================================================================
|
| 38 |
+
class PredictionPipeline:
|
| 39 |
+
"""
|
| 40 |
+
End-to-end prediction pipeline for:
|
| 41 |
+
- Today's games (with live scores)
|
| 42 |
+
- Upcoming games with predictions
|
| 43 |
+
- MVP race
|
| 44 |
+
- Championship odds
|
| 45 |
+
- Prediction tracking and accuracy
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
def __init__(self):
|
| 49 |
+
self.cache = CacheManager()
|
| 50 |
+
self.feature_gen = FeatureGenerator()
|
| 51 |
+
self.injury_collector = InjuryCollector()
|
| 52 |
+
|
| 53 |
+
# Live data and tracking
|
| 54 |
+
self.live_collector = LiveDataCollector()
|
| 55 |
+
self.prediction_tracker = PredictionTracker()
|
| 56 |
+
|
| 57 |
+
# Models (loaded on demand)
|
| 58 |
+
self._game_model = None
|
| 59 |
+
self._mvp_model = None
|
| 60 |
+
self._champ_model = None
|
| 61 |
+
self._preprocessor = None
|
| 62 |
+
|
| 63 |
+
# Initialize ELO ratings from historical games
|
| 64 |
+
self._initialize_elo_from_history()
|
| 65 |
+
|
| 66 |
+
def _initialize_elo_from_history(self):
|
| 67 |
+
"""
|
| 68 |
+
Process all historical games to build accurate ELO ratings.
|
| 69 |
+
This ensures predictions reflect actual team strength.
|
| 70 |
+
"""
|
| 71 |
+
try:
|
| 72 |
+
from src.config import API_CACHE_DIR
|
| 73 |
+
|
| 74 |
+
games_path = API_CACHE_DIR / "all_games_summary.parquet"
|
| 75 |
+
if not games_path.exists():
|
| 76 |
+
logger.warning("No historical game data found for ELO initialization")
|
| 77 |
+
return
|
| 78 |
+
|
| 79 |
+
games_df = pd.read_parquet(games_path)
|
| 80 |
+
|
| 81 |
+
# Sort by date to process games chronologically
|
| 82 |
+
games_df = games_df.sort_values("GAME_DATE").copy()
|
| 83 |
+
|
| 84 |
+
# Track processed game IDs to avoid double-counting (home & away)
|
| 85 |
+
processed_games = set()
|
| 86 |
+
current_season = None
|
| 87 |
+
|
| 88 |
+
for _, row in games_df.iterrows():
|
| 89 |
+
game_id = row["GAME_ID"]
|
| 90 |
+
|
| 91 |
+
# Skip if we've already processed this game
|
| 92 |
+
if game_id in processed_games:
|
| 93 |
+
continue
|
| 94 |
+
processed_games.add(game_id)
|
| 95 |
+
|
| 96 |
+
# Regress ELO at season changes
|
| 97 |
+
season = row.get("SEASON_ID", "")
|
| 98 |
+
if season != current_season:
|
| 99 |
+
if current_season is not None:
|
| 100 |
+
self.feature_gen.elo.regress_to_mean()
|
| 101 |
+
current_season = season
|
| 102 |
+
|
| 103 |
+
team_id = row["TEAM_ID"]
|
| 104 |
+
matchup = row.get("MATCHUP", "")
|
| 105 |
+
wl = row.get("WL", "")
|
| 106 |
+
|
| 107 |
+
if not matchup or not wl:
|
| 108 |
+
continue
|
| 109 |
+
|
| 110 |
+
# Parse opponent from matchup (e.g., "LAL vs. BOS" or "LAL @ BOS")
|
| 111 |
+
is_home = "vs." in matchup
|
| 112 |
+
opponent_abbrev = matchup.split(" ")[-1]
|
| 113 |
+
|
| 114 |
+
opponent_id = next(
|
| 115 |
+
(tid for tid, abbr in NBA_TEAMS.items() if abbr == opponent_abbrev),
|
| 116 |
+
None
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
if opponent_id:
|
| 120 |
+
won = wl == "W"
|
| 121 |
+
self.feature_gen.elo.update_ratings(team_id, opponent_id, won, is_home)
|
| 122 |
+
|
| 123 |
+
logger.info(f"Initialized ELO ratings from {len(processed_games)} games")
|
| 124 |
+
|
| 125 |
+
# Log some example ratings for verification
|
| 126 |
+
sample_teams = ["LAL", "BOS", "GSW", "MIL", "DEN"]
|
| 127 |
+
for abbrev in sample_teams:
|
| 128 |
+
team_id = next((tid for tid, abbr in NBA_TEAMS.items() if abbr == abbrev), None)
|
| 129 |
+
if team_id:
|
| 130 |
+
rating = self.feature_gen.elo.get_rating(team_id)
|
| 131 |
+
logger.info(f" {abbrev}: {rating:.0f}")
|
| 132 |
+
|
| 133 |
+
except Exception as e:
|
| 134 |
+
logger.warning(f"Could not initialize ELO from history: {e}")
|
| 135 |
+
|
| 136 |
+
@property
|
| 137 |
+
def game_model(self) -> GamePredictor:
|
| 138 |
+
if self._game_model is None:
|
| 139 |
+
self._game_model = GamePredictor()
|
| 140 |
+
try:
|
| 141 |
+
self._game_model.load()
|
| 142 |
+
except:
|
| 143 |
+
logger.warning("Game model not found, using untrained model")
|
| 144 |
+
return self._game_model
|
| 145 |
+
|
| 146 |
+
@property
|
| 147 |
+
def mvp_model(self) -> MVPPredictor:
|
| 148 |
+
if self._mvp_model is None:
|
| 149 |
+
self._mvp_model = MVPPredictor()
|
| 150 |
+
try:
|
| 151 |
+
self._mvp_model.load()
|
| 152 |
+
except:
|
| 153 |
+
logger.warning("MVP model not found, using untrained model")
|
| 154 |
+
return self._mvp_model
|
| 155 |
+
|
| 156 |
+
@property
|
| 157 |
+
def champ_model(self) -> ChampionshipPredictor:
|
| 158 |
+
if self._champ_model is None:
|
| 159 |
+
self._champ_model = ChampionshipPredictor()
|
| 160 |
+
try:
|
| 161 |
+
self._champ_model.load()
|
| 162 |
+
except:
|
| 163 |
+
logger.warning("Championship model not found, using untrained model")
|
| 164 |
+
return self._champ_model
|
| 165 |
+
|
| 166 |
+
def get_todays_games(self) -> List[Dict]:
|
| 167 |
+
"""Fetch today's games from NBA Live API using LiveDataCollector."""
|
| 168 |
+
return self.live_collector.get_live_scoreboard()
|
| 169 |
+
|
| 170 |
+
def get_live_games(self) -> List[Dict]:
|
| 171 |
+
"""Get currently in-progress games."""
|
| 172 |
+
return self.live_collector.get_live_games()
|
| 173 |
+
|
| 174 |
+
def get_final_games(self) -> List[Dict]:
|
| 175 |
+
"""Get completed games from today."""
|
| 176 |
+
return self.live_collector.get_final_games()
|
| 177 |
+
|
| 178 |
+
def get_upcoming_games(self, days_ahead: int = 7) -> List[Dict]:
|
| 179 |
+
"""
|
| 180 |
+
Get upcoming games using REAL NBA schedule.
|
| 181 |
+
|
| 182 |
+
Uses live API for today's not-started games, plus NBA schedule API
|
| 183 |
+
for future days.
|
| 184 |
+
"""
|
| 185 |
+
from datetime import timedelta
|
| 186 |
+
import time
|
| 187 |
+
|
| 188 |
+
upcoming = []
|
| 189 |
+
base_date = datetime.now()
|
| 190 |
+
|
| 191 |
+
# Today's not-started games from live API
|
| 192 |
+
todays_upcoming = self.live_collector.get_upcoming_games()
|
| 193 |
+
for game in todays_upcoming:
|
| 194 |
+
upcoming.append({
|
| 195 |
+
"game_id": game["game_id"],
|
| 196 |
+
"date": game["game_date"] or base_date.strftime("%Y-%m-%d"),
|
| 197 |
+
"time": game["status_text"] or "TBD",
|
| 198 |
+
"day_name": base_date.strftime("%A"),
|
| 199 |
+
"home_team": game["home_team"],
|
| 200 |
+
"away_team": game["away_team"],
|
| 201 |
+
"home_record": game.get("home_record", ""),
|
| 202 |
+
"away_record": game.get("away_record", ""),
|
| 203 |
+
})
|
| 204 |
+
|
| 205 |
+
# Note: NBA API doesn't reliably provide future game schedules
|
| 206 |
+
# Today's games from live scoreboard are accurate
|
| 207 |
+
# Future schedule requires web scraping or third-party API
|
| 208 |
+
|
| 209 |
+
return upcoming
|
| 210 |
+
|
| 211 |
+
def get_team_roster(self, team_abbrev: str) -> List[Dict]:
|
| 212 |
+
"""Get current roster with projected starters for a team."""
|
| 213 |
+
try:
|
| 214 |
+
from nba_api.stats.endpoints import commonteamroster
|
| 215 |
+
|
| 216 |
+
team_id = next((tid for tid, abbr in NBA_TEAMS.items() if abbr == team_abbrev), None)
|
| 217 |
+
if not team_id:
|
| 218 |
+
return self._get_mock_roster(team_abbrev)
|
| 219 |
+
|
| 220 |
+
roster = commonteamroster.CommonTeamRoster(team_id=team_id, season="2024-25")
|
| 221 |
+
players_df = roster.get_data_frames()[0]
|
| 222 |
+
|
| 223 |
+
players = []
|
| 224 |
+
for _, row in players_df.iterrows():
|
| 225 |
+
players.append({
|
| 226 |
+
"name": row.get("PLAYER", "Unknown"),
|
| 227 |
+
"number": row.get("NUM", ""),
|
| 228 |
+
"position": row.get("POSITION", ""),
|
| 229 |
+
"height": row.get("HEIGHT", ""),
|
| 230 |
+
"weight": row.get("WEIGHT", ""),
|
| 231 |
+
"age": row.get("AGE", ""),
|
| 232 |
+
})
|
| 233 |
+
|
| 234 |
+
# Return top 5 as projected starters (by roster order which is usually starters first)
|
| 235 |
+
return players[:5] if len(players) >= 5 else players
|
| 236 |
+
|
| 237 |
+
except Exception as e:
|
| 238 |
+
logger.warning(f"Could not fetch roster for {team_abbrev}: {e}")
|
| 239 |
+
return self._get_mock_roster(team_abbrev)
|
| 240 |
+
|
| 241 |
+
def _get_mock_roster(self, team_abbrev: str) -> List[Dict]:
|
| 242 |
+
"""Return mock starters for teams when API fails."""
|
| 243 |
+
# Real NBA starters for 2024-25 season
|
| 244 |
+
rosters = {
|
| 245 |
+
"BOS": [
|
| 246 |
+
{"name": "Jayson Tatum", "position": "SF", "number": "0"},
|
| 247 |
+
{"name": "Jaylen Brown", "position": "SG", "number": "7"},
|
| 248 |
+
{"name": "Derrick White", "position": "PG", "number": "9"},
|
| 249 |
+
{"name": "Kristaps Porzingis", "position": "C", "number": "8"},
|
| 250 |
+
{"name": "Al Horford", "position": "PF", "number": "42"},
|
| 251 |
+
],
|
| 252 |
+
"LAL": [
|
| 253 |
+
{"name": "LeBron James", "position": "SF", "number": "23"},
|
| 254 |
+
{"name": "Anthony Davis", "position": "PF", "number": "3"},
|
| 255 |
+
{"name": "Austin Reaves", "position": "SG", "number": "15"},
|
| 256 |
+
{"name": "D'Angelo Russell", "position": "PG", "number": "1"},
|
| 257 |
+
{"name": "Rui Hachimura", "position": "PF", "number": "28"},
|
| 258 |
+
],
|
| 259 |
+
"GSW": [
|
| 260 |
+
{"name": "Stephen Curry", "position": "PG", "number": "30"},
|
| 261 |
+
{"name": "Klay Thompson", "position": "SG", "number": "11"},
|
| 262 |
+
{"name": "Andrew Wiggins", "position": "SF", "number": "22"},
|
| 263 |
+
{"name": "Draymond Green", "position": "PF", "number": "23"},
|
| 264 |
+
{"name": "Kevon Looney", "position": "C", "number": "5"},
|
| 265 |
+
],
|
| 266 |
+
"MIL": [
|
| 267 |
+
{"name": "Giannis Antetokounmpo", "position": "PF", "number": "34"},
|
| 268 |
+
{"name": "Damian Lillard", "position": "PG", "number": "0"},
|
| 269 |
+
{"name": "Khris Middleton", "position": "SF", "number": "22"},
|
| 270 |
+
{"name": "Brook Lopez", "position": "C", "number": "11"},
|
| 271 |
+
{"name": "Malik Beasley", "position": "SG", "number": "5"},
|
| 272 |
+
],
|
| 273 |
+
"DEN": [
|
| 274 |
+
{"name": "Nikola Jokic", "position": "C", "number": "15"},
|
| 275 |
+
{"name": "Jamal Murray", "position": "PG", "number": "27"},
|
| 276 |
+
{"name": "Michael Porter Jr.", "position": "SF", "number": "1"},
|
| 277 |
+
{"name": "Aaron Gordon", "position": "PF", "number": "50"},
|
| 278 |
+
{"name": "Kentavious Caldwell-Pope", "position": "SG", "number": "5"},
|
| 279 |
+
],
|
| 280 |
+
"OKC": [
|
| 281 |
+
{"name": "Shai Gilgeous-Alexander", "position": "PG", "number": "2"},
|
| 282 |
+
{"name": "Jalen Williams", "position": "SF", "number": "8"},
|
| 283 |
+
{"name": "Chet Holmgren", "position": "C", "number": "7"},
|
| 284 |
+
{"name": "Lu Dort", "position": "SG", "number": "5"},
|
| 285 |
+
{"name": "Josh Giddey", "position": "PG", "number": "3"},
|
| 286 |
+
],
|
| 287 |
+
"PHX": [
|
| 288 |
+
{"name": "Kevin Durant", "position": "SF", "number": "35"},
|
| 289 |
+
{"name": "Devin Booker", "position": "SG", "number": "1"},
|
| 290 |
+
{"name": "Bradley Beal", "position": "PG", "number": "3"},
|
| 291 |
+
{"name": "Jusuf Nurkic", "position": "C", "number": "20"},
|
| 292 |
+
{"name": "Grayson Allen", "position": "SG", "number": "12"},
|
| 293 |
+
],
|
| 294 |
+
"DAL": [
|
| 295 |
+
{"name": "Luka Doncic", "position": "PG", "number": "77"},
|
| 296 |
+
{"name": "Kyrie Irving", "position": "SG", "number": "11"},
|
| 297 |
+
{"name": "Daniel Gafford", "position": "C", "number": "21"},
|
| 298 |
+
{"name": "P.J. Washington", "position": "PF", "number": "25"},
|
| 299 |
+
{"name": "Derrick Jones Jr.", "position": "SF", "number": "55"},
|
| 300 |
+
],
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
# Default roster if team not found
|
| 304 |
+
default = [
|
| 305 |
+
{"name": "Player 1", "position": "PG", "number": "1"},
|
| 306 |
+
{"name": "Player 2", "position": "SG", "number": "2"},
|
| 307 |
+
{"name": "Player 3", "position": "SF", "number": "3"},
|
| 308 |
+
{"name": "Player 4", "position": "PF", "number": "4"},
|
| 309 |
+
{"name": "Player 5", "position": "C", "number": "5"},
|
| 310 |
+
]
|
| 311 |
+
|
| 312 |
+
return rosters.get(team_abbrev, default)
|
| 313 |
+
|
| 314 |
+
def get_team_record(self, team_id: int, season: str = "2024-25") -> Dict:
|
| 315 |
+
"""Get current record for a team."""
|
| 316 |
+
try:
|
| 317 |
+
games = leaguegamefinder.LeagueGameFinder(
|
| 318 |
+
team_id_nullable=team_id,
|
| 319 |
+
season_nullable=season
|
| 320 |
+
).get_data_frames()[0]
|
| 321 |
+
|
| 322 |
+
if games.empty:
|
| 323 |
+
return {"wins": 0, "losses": 0, "win_pct": 0.5}
|
| 324 |
+
|
| 325 |
+
wins = (games["WL"] == "W").sum()
|
| 326 |
+
losses = (games["WL"] == "L").sum()
|
| 327 |
+
|
| 328 |
+
return {
|
| 329 |
+
"wins": wins,
|
| 330 |
+
"losses": losses,
|
| 331 |
+
"win_pct": wins / (wins + losses) if (wins + losses) > 0 else 0.5
|
| 332 |
+
}
|
| 333 |
+
except:
|
| 334 |
+
return {"wins": 0, "losses": 0, "win_pct": 0.5}
|
| 335 |
+
|
| 336 |
+
def predict_game(self, home_team: str, away_team: str) -> Dict:
|
| 337 |
+
"""
|
| 338 |
+
Generate prediction for a single game.
|
| 339 |
+
|
| 340 |
+
Args:
|
| 341 |
+
home_team: Home team abbreviation (e.g., "LAL")
|
| 342 |
+
away_team: Away team abbreviation (e.g., "BOS")
|
| 343 |
+
|
| 344 |
+
Returns:
|
| 345 |
+
Prediction dict with probabilities and explanations
|
| 346 |
+
"""
|
| 347 |
+
# Get team IDs
|
| 348 |
+
home_id = next((tid for tid, abbr in NBA_TEAMS.items() if abbr == home_team), None)
|
| 349 |
+
away_id = next((tid for tid, abbr in NBA_TEAMS.items() if abbr == away_team), None)
|
| 350 |
+
|
| 351 |
+
if not home_id or not away_id:
|
| 352 |
+
return {"error": "Unknown team"}
|
| 353 |
+
|
| 354 |
+
# Get ELO features
|
| 355 |
+
elo_features = self.feature_gen.elo.calculate_game_features(
|
| 356 |
+
home_id, away_id, is_home=True
|
| 357 |
+
)
|
| 358 |
+
|
| 359 |
+
# Get injury impact
|
| 360 |
+
home_injuries = self.injury_collector.get_injury_summary(home_team)
|
| 361 |
+
away_injuries = self.injury_collector.get_injury_summary(away_team)
|
| 362 |
+
|
| 363 |
+
home_injury_impact = self.injury_collector.calculate_injury_impact(home_team)
|
| 364 |
+
away_injury_impact = self.injury_collector.calculate_injury_impact(away_team)
|
| 365 |
+
|
| 366 |
+
# Build prediction result
|
| 367 |
+
result = {
|
| 368 |
+
"home_team": home_team,
|
| 369 |
+
"away_team": away_team,
|
| 370 |
+
"home_win_probability": elo_features["elo_win_prob"],
|
| 371 |
+
"away_win_probability": 1 - elo_features["elo_win_prob"],
|
| 372 |
+
"predicted_winner": home_team if elo_features["elo_win_prob"] > 0.5 else away_team,
|
| 373 |
+
"confidence": "high" if abs(elo_features["elo_win_prob"] - 0.5) > 0.15 else "medium",
|
| 374 |
+
"home_elo": elo_features["team_elo"],
|
| 375 |
+
"away_elo": elo_features["opponent_elo"],
|
| 376 |
+
"elo_diff": elo_features["elo_diff"],
|
| 377 |
+
"home_injuries": home_injuries,
|
| 378 |
+
"away_injuries": away_injuries,
|
| 379 |
+
"home_injury_impact": home_injury_impact,
|
| 380 |
+
"away_injury_impact": away_injury_impact,
|
| 381 |
+
"factors": []
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
# Add explaining factors
|
| 385 |
+
if elo_features["elo_diff"] > 50:
|
| 386 |
+
result["factors"].append(f"{home_team} has higher ELO rating (+{elo_features['elo_diff']:.0f})")
|
| 387 |
+
elif elo_features["elo_diff"] < -50:
|
| 388 |
+
result["factors"].append(f"{away_team} has higher ELO rating (+{-elo_features['elo_diff']:.0f})")
|
| 389 |
+
|
| 390 |
+
result["factors"].append(f"Home court advantage for {home_team}")
|
| 391 |
+
|
| 392 |
+
if home_injuries["total_injuries"] > 0:
|
| 393 |
+
result["factors"].append(f"{home_team} has {home_injuries['total_injuries']} injuries")
|
| 394 |
+
if away_injuries["total_injuries"] > 0:
|
| 395 |
+
result["factors"].append(f"{away_team} has {away_injuries['total_injuries']} injuries")
|
| 396 |
+
|
| 397 |
+
return result
|
| 398 |
+
|
| 399 |
+
def predict_todays_games(self, save_predictions: bool = True) -> List[Dict]:
|
| 400 |
+
"""
|
| 401 |
+
Generate predictions for all of today's games.
|
| 402 |
+
|
| 403 |
+
Args:
|
| 404 |
+
save_predictions: If True, save predictions to ChromaDB tracker
|
| 405 |
+
"""
|
| 406 |
+
games = self.get_todays_games()
|
| 407 |
+
|
| 408 |
+
if not games:
|
| 409 |
+
logger.info("No games today")
|
| 410 |
+
return []
|
| 411 |
+
|
| 412 |
+
predictions = []
|
| 413 |
+
for game in games:
|
| 414 |
+
home_team = game.get("home_team", "")
|
| 415 |
+
away_team = game.get("away_team", "")
|
| 416 |
+
|
| 417 |
+
if home_team and away_team:
|
| 418 |
+
pred = self.predict_game(home_team, away_team)
|
| 419 |
+
pred["game_id"] = game.get("game_id", "")
|
| 420 |
+
pred["game_date"] = game.get("game_date", "")
|
| 421 |
+
pred["game_status"] = game.get("status", "")
|
| 422 |
+
pred["current_home_score"] = game.get("home_score", 0)
|
| 423 |
+
pred["current_away_score"] = game.get("away_score", 0)
|
| 424 |
+
|
| 425 |
+
# Save prediction if game hasn't started and tracking enabled
|
| 426 |
+
if save_predictions and game.get("status") == "NOT_STARTED":
|
| 427 |
+
self.save_prediction_for_game(game["game_id"], pred)
|
| 428 |
+
|
| 429 |
+
predictions.append(pred)
|
| 430 |
+
|
| 431 |
+
return predictions
|
| 432 |
+
|
| 433 |
+
def save_prediction_for_game(self, game_id: str, prediction: Dict) -> bool:
|
| 434 |
+
"""Save a prediction to the tracker before game starts."""
|
| 435 |
+
return self.prediction_tracker.save_prediction(game_id, prediction)
|
| 436 |
+
|
| 437 |
+
def check_prediction_results(self) -> List[Dict]:
|
| 438 |
+
"""
|
| 439 |
+
Check completed games and update prediction results.
|
| 440 |
+
|
| 441 |
+
Returns:
|
| 442 |
+
List of updated predictions with results
|
| 443 |
+
"""
|
| 444 |
+
final_games = self.get_final_games()
|
| 445 |
+
updated = []
|
| 446 |
+
|
| 447 |
+
for game in final_games:
|
| 448 |
+
game_id = game["game_id"]
|
| 449 |
+
home_score = game["home_score"]
|
| 450 |
+
away_score = game["away_score"]
|
| 451 |
+
actual_winner = game["home_team"] if home_score > away_score else game["away_team"]
|
| 452 |
+
|
| 453 |
+
# Update the prediction in tracker
|
| 454 |
+
success = self.prediction_tracker.update_result(
|
| 455 |
+
game_id=game_id,
|
| 456 |
+
actual_winner=actual_winner,
|
| 457 |
+
home_score=home_score,
|
| 458 |
+
away_score=away_score
|
| 459 |
+
)
|
| 460 |
+
|
| 461 |
+
if success:
|
| 462 |
+
pred = self.prediction_tracker.get_prediction(game_id)
|
| 463 |
+
if pred:
|
| 464 |
+
pred["actual_winner"] = actual_winner
|
| 465 |
+
pred["home_score"] = home_score
|
| 466 |
+
pred["away_score"] = away_score
|
| 467 |
+
updated.append(pred)
|
| 468 |
+
|
| 469 |
+
return updated
|
| 470 |
+
|
| 471 |
+
def get_accuracy_stats(self) -> Dict:
|
| 472 |
+
"""Get comprehensive model accuracy statistics."""
|
| 473 |
+
return self.prediction_tracker.get_accuracy_stats()
|
| 474 |
+
|
| 475 |
+
def get_recent_predictions(self, n: int = 20) -> List[Dict]:
|
| 476 |
+
"""Get recent predictions with results."""
|
| 477 |
+
return self.prediction_tracker.get_recent_predictions(n)
|
| 478 |
+
|
| 479 |
+
def get_pending_predictions(self) -> List[Dict]:
|
| 480 |
+
"""Get predictions for games not yet completed."""
|
| 481 |
+
return self.prediction_tracker.get_pending_predictions()
|
| 482 |
+
|
| 483 |
+
def get_games_with_predictions(self) -> List[Dict]:
|
| 484 |
+
"""
|
| 485 |
+
Get all today's games with prediction data and live scores.
|
| 486 |
+
Enriches each game with prediction info and correctness status.
|
| 487 |
+
"""
|
| 488 |
+
games = self.get_todays_games()
|
| 489 |
+
enriched = []
|
| 490 |
+
|
| 491 |
+
for game in games:
|
| 492 |
+
game_data = dict(game) # Copy
|
| 493 |
+
|
| 494 |
+
# Get prediction for this game
|
| 495 |
+
pred = self.predict_game(game["home_team"], game["away_team"])
|
| 496 |
+
game_data["prediction"] = pred
|
| 497 |
+
|
| 498 |
+
# Check if prediction was correct (for completed games)
|
| 499 |
+
if game["status"] == "FINAL":
|
| 500 |
+
actual_winner = game["home_team"] if game["home_score"] > game["away_score"] else game["away_team"]
|
| 501 |
+
game_data["actual_winner"] = actual_winner
|
| 502 |
+
game_data["prediction_correct"] = pred["predicted_winner"] == actual_winner
|
| 503 |
+
else:
|
| 504 |
+
game_data["actual_winner"] = None
|
| 505 |
+
game_data["prediction_correct"] = None
|
| 506 |
+
|
| 507 |
+
enriched.append(game_data)
|
| 508 |
+
|
| 509 |
+
return enriched
|
| 510 |
+
|
| 511 |
+
def get_mvp_race(self, player_df: pd.DataFrame = None) -> pd.DataFrame:
|
| 512 |
+
"""Get current MVP race standings using ONLY current 2025-26 season data."""
|
| 513 |
+
# Always fetch real current season player stats from NBA API
|
| 514 |
+
try:
|
| 515 |
+
from nba_api.stats.endpoints import leaguedashplayerstats, leaguestandings
|
| 516 |
+
import time
|
| 517 |
+
|
| 518 |
+
time.sleep(0.5)
|
| 519 |
+
stats = leaguedashplayerstats.LeagueDashPlayerStats(
|
| 520 |
+
season='2025-26',
|
| 521 |
+
per_mode_detailed='PerGame'
|
| 522 |
+
)
|
| 523 |
+
df = stats.get_data_frames()[0]
|
| 524 |
+
|
| 525 |
+
# Get team standings for team win percentage
|
| 526 |
+
time.sleep(0.5)
|
| 527 |
+
standings = leaguestandings.LeagueStandings(season='2025-26')
|
| 528 |
+
standings_df = standings.get_data_frames()[0]
|
| 529 |
+
|
| 530 |
+
# Map team win% to players by TEAM_ID
|
| 531 |
+
team_win_pct = {}
|
| 532 |
+
for _, row in standings_df.iterrows():
|
| 533 |
+
team_id = row.get('TeamID', 0)
|
| 534 |
+
wins = row.get('WINS', 0)
|
| 535 |
+
losses = row.get('LOSSES', 0)
|
| 536 |
+
total = wins + losses
|
| 537 |
+
if total > 0:
|
| 538 |
+
team_win_pct[team_id] = wins / total
|
| 539 |
+
|
| 540 |
+
# Add team win% to player stats
|
| 541 |
+
df['TEAM_WIN_PCT'] = df['TEAM_ID'].map(team_win_pct).fillna(0.5)
|
| 542 |
+
|
| 543 |
+
# Filter to players with significant minutes (starters/key players)
|
| 544 |
+
df = df[
|
| 545 |
+
(df['MIN'] >= 25) &
|
| 546 |
+
(df['GP'] >= 15)
|
| 547 |
+
].copy()
|
| 548 |
+
|
| 549 |
+
# Calculate MVP score directly (no model dependency)
|
| 550 |
+
df['mvp_score'] = (
|
| 551 |
+
df['PTS'].fillna(0) * 1.0 + # Points
|
| 552 |
+
df['AST'].fillna(0) * 2.0 + # Assists (playmaking)
|
| 553 |
+
df['REB'].fillna(0) * 1.0 + # Rebounds
|
| 554 |
+
(df['STL'].fillna(0) + df['BLK'].fillna(0)) * 1.5 + # Defense
|
| 555 |
+
df['PLUS_MINUS'].fillna(0) * 0.3 + # Impact
|
| 556 |
+
df['FG_PCT'].fillna(0.45) * 20 + # Efficiency
|
| 557 |
+
df['TEAM_WIN_PCT'].fillna(0.5) * 30 # Team success
|
| 558 |
+
)
|
| 559 |
+
|
| 560 |
+
# Add similarity score (simplified - based on stats profile)
|
| 561 |
+
df['mvp_similarity'] = (
|
| 562 |
+
(df['PTS'] / 30.0).clip(0, 1) * 0.4 + # Elite scorer
|
| 563 |
+
(df['REB'] / 12.0).clip(0, 1) * 0.2 + # Elite rebounder
|
| 564 |
+
(df['AST'] / 10.0).clip(0, 1) * 0.2 + # Elite playmaker
|
| 565 |
+
df['TEAM_WIN_PCT'] * 0.2 # Winning team
|
| 566 |
+
).fillna(0)
|
| 567 |
+
|
| 568 |
+
# Sort by MVP score
|
| 569 |
+
df = df.sort_values('mvp_score', ascending=False)
|
| 570 |
+
|
| 571 |
+
# Return top 10 MVP candidates
|
| 572 |
+
return df.head(10)[['PLAYER_NAME', 'PTS', 'REB', 'AST', 'mvp_score', 'mvp_similarity']]
|
| 573 |
+
|
| 574 |
+
except Exception as e:
|
| 575 |
+
logger.warning(f"Could not fetch real MVP data: {e}")
|
| 576 |
+
# Return empty DataFrame on error
|
| 577 |
+
return pd.DataFrame({
|
| 578 |
+
'PLAYER_NAME': [],
|
| 579 |
+
'PTS': [],
|
| 580 |
+
'REB': [],
|
| 581 |
+
'AST': [],
|
| 582 |
+
'mvp_score': [],
|
| 583 |
+
'mvp_similarity': []
|
| 584 |
+
})
|
| 585 |
+
|
| 586 |
+
def get_championship_odds(self, team_df: pd.DataFrame = None) -> pd.DataFrame:
|
| 587 |
+
"""Get current championship odds."""
|
| 588 |
+
if team_df is None:
|
| 589 |
+
# Use mock data if no real data
|
| 590 |
+
team_df = pd.DataFrame({
|
| 591 |
+
"TEAM_ABBREVIATION": ["OKC", "CLE", "BOS", "DEN", "MEM", "HOU", "NYK", "GSW"],
|
| 592 |
+
"W_PCT": [0.70, 0.68, 0.65, 0.62, 0.60, 0.58, 0.55, 0.52],
|
| 593 |
+
"NET_RATING": [9.5, 8.2, 7.5, 6.0, 5.5, 4.0, 3.5, 2.0]
|
| 594 |
+
})
|
| 595 |
+
|
| 596 |
+
return self.champ_model.get_top_contenders(team_df)
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
# =============================================================================
|
| 600 |
+
# CLI INTERFACE
|
| 601 |
+
# =============================================================================
|
| 602 |
+
if __name__ == "__main__":
|
| 603 |
+
import argparse
|
| 604 |
+
|
| 605 |
+
parser = argparse.ArgumentParser(description="NBA Prediction Pipeline")
|
| 606 |
+
parser.add_argument("--test", action="store_true", help="Run test prediction")
|
| 607 |
+
parser.add_argument("--today", action="store_true", help="Predict today's games")
|
| 608 |
+
parser.add_argument("--game", nargs=2, help="Predict single game: HOME AWAY")
|
| 609 |
+
|
| 610 |
+
args = parser.parse_args()
|
| 611 |
+
|
| 612 |
+
pipeline = PredictionPipeline()
|
| 613 |
+
|
| 614 |
+
if args.test:
|
| 615 |
+
print("Testing prediction pipeline...")
|
| 616 |
+
result = pipeline.predict_game("LAL", "BOS")
|
| 617 |
+
for k, v in result.items():
|
| 618 |
+
print(f" {k}: {v}")
|
| 619 |
+
|
| 620 |
+
elif args.today:
|
| 621 |
+
print("Today's game predictions:")
|
| 622 |
+
predictions = pipeline.predict_todays_games()
|
| 623 |
+
for pred in predictions:
|
| 624 |
+
print(f"\n{pred['away_team']} @ {pred['home_team']}")
|
| 625 |
+
print(f" Predicted winner: {pred['predicted_winner']}")
|
| 626 |
+
print(f" Win probability: {pred['home_win_probability']:.1%}")
|
| 627 |
+
|
| 628 |
+
elif args.game:
|
| 629 |
+
home, away = args.game
|
| 630 |
+
result = pipeline.predict_game(home.upper(), away.upper())
|
| 631 |
+
print(f"\n{away.upper()} @ {home.upper()}")
|
| 632 |
+
for k, v in result.items():
|
| 633 |
+
print(f" {k}: {v}")
|
| 634 |
+
|
| 635 |
+
else:
|
| 636 |
+
print("Use --test, --today, or --game HOME AWAY")
|
src/prediction_tracker.py
ADDED
|
@@ -0,0 +1,507 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NBA ML Prediction System - Prediction Tracker
|
| 3 |
+
==============================================
|
| 4 |
+
Tracks predictions and measures accuracy using ChromaDB Cloud with local fallback.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
from typing import Dict, List, Optional
|
| 10 |
+
import json
|
| 11 |
+
import hashlib
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
|
| 14 |
+
from src.config import CHROMADB_CONFIG, PROCESSED_DATA_DIR
|
| 15 |
+
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class PredictionTracker:
|
| 20 |
+
"""
|
| 21 |
+
Tracks predictions and measures accuracy over time.
|
| 22 |
+
|
| 23 |
+
Uses ChromaDB Cloud if available, otherwise falls back to local JSON storage.
|
| 24 |
+
Stores predictions before games and updates with results after completion.
|
| 25 |
+
Provides accuracy statistics by team, confidence level, and over time.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
def __init__(self):
|
| 29 |
+
"""Initialize storage - try ChromaDB Cloud, fallback to local JSON."""
|
| 30 |
+
self.collection = None
|
| 31 |
+
self.client = None
|
| 32 |
+
self._use_local = False
|
| 33 |
+
self._local_file = PROCESSED_DATA_DIR / "predictions_local.json"
|
| 34 |
+
self._local_data: List[Dict] = []
|
| 35 |
+
|
| 36 |
+
# Try ChromaDB Cloud first
|
| 37 |
+
try:
|
| 38 |
+
import chromadb
|
| 39 |
+
|
| 40 |
+
# Try CloudClient (the official way for Chroma Cloud)
|
| 41 |
+
self.client = chromadb.CloudClient(
|
| 42 |
+
tenant=CHROMADB_CONFIG.tenant,
|
| 43 |
+
database=CHROMADB_CONFIG.database,
|
| 44 |
+
api_key=CHROMADB_CONFIG.api_key,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
# Get or create collection for predictions
|
| 48 |
+
self.collection = self.client.get_or_create_collection(
|
| 49 |
+
name=CHROMADB_CONFIG.collection_name,
|
| 50 |
+
metadata={"description": "NBA game predictions with results"}
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
logger.info("Connected to ChromaDB Cloud successfully")
|
| 54 |
+
|
| 55 |
+
except Exception as e:
|
| 56 |
+
logger.warning(f"ChromaDB Cloud unavailable ({e}), using local JSON storage")
|
| 57 |
+
self._use_local = True
|
| 58 |
+
self._load_local_data()
|
| 59 |
+
|
| 60 |
+
def _generate_id(self, game_id: str, prediction_date: str) -> str:
|
| 61 |
+
"""Generate unique ID for a prediction."""
|
| 62 |
+
return hashlib.md5(f"{game_id}_{prediction_date}".encode()).hexdigest()
|
| 63 |
+
|
| 64 |
+
def _load_local_data(self):
|
| 65 |
+
"""Load predictions from local JSON file."""
|
| 66 |
+
if self._local_file.exists():
|
| 67 |
+
try:
|
| 68 |
+
with open(self._local_file, 'r') as f:
|
| 69 |
+
self._local_data = json.load(f)
|
| 70 |
+
except Exception as e:
|
| 71 |
+
logger.error(f"Failed to load local data: {e}")
|
| 72 |
+
self._local_data = []
|
| 73 |
+
else:
|
| 74 |
+
self._local_data = []
|
| 75 |
+
|
| 76 |
+
def _save_local_data(self):
|
| 77 |
+
"""Save predictions to local JSON file."""
|
| 78 |
+
try:
|
| 79 |
+
with open(self._local_file, 'w') as f:
|
| 80 |
+
json.dump(self._local_data, f, indent=2)
|
| 81 |
+
except Exception as e:
|
| 82 |
+
logger.error(f"Failed to save local data: {e}")
|
| 83 |
+
|
| 84 |
+
def _find_local_prediction(self, game_id: str) -> Optional[int]:
|
| 85 |
+
"""Find index of prediction by game_id in local data."""
|
| 86 |
+
for i, pred in enumerate(self._local_data):
|
| 87 |
+
if pred.get("game_id") == game_id:
|
| 88 |
+
return i
|
| 89 |
+
return None
|
| 90 |
+
|
| 91 |
+
def save_prediction(self, game_id: str, prediction: Dict) -> bool:
|
| 92 |
+
"""
|
| 93 |
+
Store a prediction before game starts.
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
game_id: NBA game ID
|
| 97 |
+
prediction: Dict with home_team, away_team, predicted_winner,
|
| 98 |
+
home_win_prob, confidence, etc.
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
True if saved successfully
|
| 102 |
+
"""
|
| 103 |
+
now = datetime.now().isoformat()
|
| 104 |
+
doc_id = self._generate_id(game_id, now[:10])
|
| 105 |
+
|
| 106 |
+
# Prepare metadata
|
| 107 |
+
metadata = {
|
| 108 |
+
"id": doc_id,
|
| 109 |
+
"game_id": game_id,
|
| 110 |
+
"game_date": prediction.get("game_date", now[:10]),
|
| 111 |
+
"home_team": prediction.get("home_team", ""),
|
| 112 |
+
"away_team": prediction.get("away_team", ""),
|
| 113 |
+
"predicted_winner": prediction.get("predicted_winner", ""),
|
| 114 |
+
"home_win_prob": float(prediction.get("home_win_probability", 0.5)),
|
| 115 |
+
"away_win_prob": float(prediction.get("away_win_probability", 0.5)),
|
| 116 |
+
"confidence": prediction.get("confidence", "medium"),
|
| 117 |
+
"home_elo": float(prediction.get("home_elo", 1500)),
|
| 118 |
+
"away_elo": float(prediction.get("away_elo", 1500)),
|
| 119 |
+
"actual_winner": "", # Empty until game completes
|
| 120 |
+
"is_correct": -1, # -1 = pending, 0 = wrong, 1 = correct
|
| 121 |
+
"created_at": now,
|
| 122 |
+
"updated_at": now,
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
# Use local storage if ChromaDB not available
|
| 126 |
+
if self._use_local:
|
| 127 |
+
try:
|
| 128 |
+
# Check if exists and update, otherwise append
|
| 129 |
+
idx = self._find_local_prediction(game_id)
|
| 130 |
+
if idx is not None:
|
| 131 |
+
self._local_data[idx] = metadata
|
| 132 |
+
else:
|
| 133 |
+
self._local_data.append(metadata)
|
| 134 |
+
self._save_local_data()
|
| 135 |
+
logger.info(f"Saved prediction for game {game_id} (local)")
|
| 136 |
+
return True
|
| 137 |
+
except Exception as e:
|
| 138 |
+
logger.error(f"Failed to save prediction locally: {e}")
|
| 139 |
+
return False
|
| 140 |
+
|
| 141 |
+
# Use ChromaDB Cloud
|
| 142 |
+
if not self.collection:
|
| 143 |
+
logger.warning("ChromaDB not available, prediction not saved")
|
| 144 |
+
return False
|
| 145 |
+
|
| 146 |
+
try:
|
| 147 |
+
# Document text for semantic search
|
| 148 |
+
doc_text = (
|
| 149 |
+
f"NBA Game: {prediction.get('away_team')} @ {prediction.get('home_team')} "
|
| 150 |
+
f"on {metadata['game_date']}. "
|
| 151 |
+
f"Predicted winner: {metadata['predicted_winner']} "
|
| 152 |
+
f"with {metadata['confidence']} confidence "
|
| 153 |
+
f"({metadata['home_win_prob']:.1%} home win probability)"
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
# Upsert (update if exists, insert if not)
|
| 157 |
+
self.collection.upsert(
|
| 158 |
+
ids=[doc_id],
|
| 159 |
+
documents=[doc_text],
|
| 160 |
+
metadatas=[metadata]
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
logger.info(f"Saved prediction for game {game_id}")
|
| 164 |
+
return True
|
| 165 |
+
|
| 166 |
+
except Exception as e:
|
| 167 |
+
logger.error(f"Failed to save prediction: {e}")
|
| 168 |
+
return False
|
| 169 |
+
|
| 170 |
+
def update_result(self, game_id: str, actual_winner: str,
|
| 171 |
+
home_score: int = 0, away_score: int = 0) -> bool:
|
| 172 |
+
"""
|
| 173 |
+
Update prediction with actual game result.
|
| 174 |
+
|
| 175 |
+
Args:
|
| 176 |
+
game_id: NBA game ID
|
| 177 |
+
actual_winner: Team tricode of actual winner
|
| 178 |
+
home_score: Final home score
|
| 179 |
+
away_score: Final away score
|
| 180 |
+
|
| 181 |
+
Returns:
|
| 182 |
+
True if updated successfully
|
| 183 |
+
"""
|
| 184 |
+
# Handle local storage
|
| 185 |
+
if self._use_local:
|
| 186 |
+
try:
|
| 187 |
+
idx = self._find_local_prediction(game_id)
|
| 188 |
+
if idx is None:
|
| 189 |
+
logger.warning(f"No prediction found for game {game_id}")
|
| 190 |
+
return False
|
| 191 |
+
|
| 192 |
+
pred = self._local_data[idx]
|
| 193 |
+
predicted_winner = pred.get("predicted_winner", "")
|
| 194 |
+
is_correct = 1 if predicted_winner == actual_winner else 0
|
| 195 |
+
|
| 196 |
+
pred["actual_winner"] = actual_winner
|
| 197 |
+
pred["is_correct"] = is_correct
|
| 198 |
+
pred["home_score"] = home_score
|
| 199 |
+
pred["away_score"] = away_score
|
| 200 |
+
pred["updated_at"] = datetime.now().isoformat()
|
| 201 |
+
|
| 202 |
+
self._local_data[idx] = pred
|
| 203 |
+
self._save_local_data()
|
| 204 |
+
|
| 205 |
+
result_text = "CORRECT ✓" if is_correct else "WRONG ✗"
|
| 206 |
+
logger.info(f"Updated result for game {game_id}: {result_text} (local)")
|
| 207 |
+
return True
|
| 208 |
+
|
| 209 |
+
except Exception as e:
|
| 210 |
+
logger.error(f"Failed to update result locally: {e}")
|
| 211 |
+
return False
|
| 212 |
+
|
| 213 |
+
# Handle ChromaDB Cloud
|
| 214 |
+
if not self.collection:
|
| 215 |
+
return False
|
| 216 |
+
|
| 217 |
+
try:
|
| 218 |
+
# Find the prediction for this game
|
| 219 |
+
results = self.collection.get(
|
| 220 |
+
where={"game_id": game_id},
|
| 221 |
+
include=["metadatas", "documents"]
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
if not results["ids"]:
|
| 225 |
+
logger.warning(f"No prediction found for game {game_id}")
|
| 226 |
+
return False
|
| 227 |
+
|
| 228 |
+
doc_id = results["ids"][0]
|
| 229 |
+
metadata = results["metadatas"][0]
|
| 230 |
+
|
| 231 |
+
# Check if prediction was correct
|
| 232 |
+
predicted_winner = metadata.get("predicted_winner", "")
|
| 233 |
+
is_correct = 1 if predicted_winner == actual_winner else 0
|
| 234 |
+
|
| 235 |
+
# Update metadata
|
| 236 |
+
metadata["actual_winner"] = actual_winner
|
| 237 |
+
metadata["is_correct"] = is_correct
|
| 238 |
+
metadata["home_score"] = home_score
|
| 239 |
+
metadata["away_score"] = away_score
|
| 240 |
+
metadata["updated_at"] = datetime.now().isoformat()
|
| 241 |
+
|
| 242 |
+
# Update document text
|
| 243 |
+
result_text = "CORRECT ✓" if is_correct else "WRONG ✗"
|
| 244 |
+
doc_text = (
|
| 245 |
+
f"NBA Game: {metadata['away_team']} @ {metadata['home_team']}. "
|
| 246 |
+
f"Predicted: {predicted_winner}, Actual: {actual_winner}. "
|
| 247 |
+
f"Result: {result_text}"
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
self.collection.update(
|
| 251 |
+
ids=[doc_id],
|
| 252 |
+
documents=[doc_text],
|
| 253 |
+
metadatas=[metadata]
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
logger.info(f"Updated result for game {game_id}: {result_text}")
|
| 257 |
+
return True
|
| 258 |
+
|
| 259 |
+
except Exception as e:
|
| 260 |
+
logger.error(f"Failed to update result: {e}")
|
| 261 |
+
return False
|
| 262 |
+
|
| 263 |
+
def get_prediction(self, game_id: str) -> Optional[Dict]:
|
| 264 |
+
"""Get prediction for a specific game."""
|
| 265 |
+
# Handle local storage
|
| 266 |
+
if self._use_local:
|
| 267 |
+
idx = self._find_local_prediction(game_id)
|
| 268 |
+
if idx is not None:
|
| 269 |
+
return self._local_data[idx]
|
| 270 |
+
return None
|
| 271 |
+
|
| 272 |
+
# Handle ChromaDB
|
| 273 |
+
if not self.collection:
|
| 274 |
+
return None
|
| 275 |
+
|
| 276 |
+
try:
|
| 277 |
+
results = self.collection.get(
|
| 278 |
+
where={"game_id": game_id},
|
| 279 |
+
include=["metadatas"]
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
if results["ids"]:
|
| 283 |
+
return results["metadatas"][0]
|
| 284 |
+
return None
|
| 285 |
+
|
| 286 |
+
except Exception as e:
|
| 287 |
+
logger.error(f"Failed to get prediction: {e}")
|
| 288 |
+
return None
|
| 289 |
+
|
| 290 |
+
def get_recent_predictions(self, n: int = 20) -> List[Dict]:
|
| 291 |
+
"""Get N most recent predictions with results."""
|
| 292 |
+
# Handle local storage
|
| 293 |
+
if self._use_local:
|
| 294 |
+
predictions = sorted(
|
| 295 |
+
self._local_data,
|
| 296 |
+
key=lambda x: x.get("created_at", ""),
|
| 297 |
+
reverse=True
|
| 298 |
+
)
|
| 299 |
+
return predictions[:n]
|
| 300 |
+
|
| 301 |
+
# Handle ChromaDB
|
| 302 |
+
if not self.collection:
|
| 303 |
+
return []
|
| 304 |
+
|
| 305 |
+
try:
|
| 306 |
+
# Get all predictions and sort by date
|
| 307 |
+
results = self.collection.get(
|
| 308 |
+
include=["metadatas"]
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
if not results["ids"]:
|
| 312 |
+
return []
|
| 313 |
+
|
| 314 |
+
predictions = results["metadatas"]
|
| 315 |
+
|
| 316 |
+
# Sort by created_at descending
|
| 317 |
+
predictions.sort(key=lambda x: x.get("created_at", ""), reverse=True)
|
| 318 |
+
|
| 319 |
+
return predictions[:n]
|
| 320 |
+
|
| 321 |
+
except Exception as e:
|
| 322 |
+
logger.error(f"Failed to get recent predictions: {e}")
|
| 323 |
+
return []
|
| 324 |
+
|
| 325 |
+
def _calculate_accuracy_from_predictions(self, predictions: List[Dict]) -> Dict:
|
| 326 |
+
"""Calculate accuracy stats from a list of predictions."""
|
| 327 |
+
if not predictions:
|
| 328 |
+
return {
|
| 329 |
+
"total_predictions": 0,
|
| 330 |
+
"completed_games": 0,
|
| 331 |
+
"correct_predictions": 0,
|
| 332 |
+
"overall_accuracy": 0.0,
|
| 333 |
+
"by_confidence": {},
|
| 334 |
+
"by_team": {},
|
| 335 |
+
}
|
| 336 |
+
|
| 337 |
+
# Filter to completed games only
|
| 338 |
+
completed = [p for p in predictions if p.get("is_correct", -1) >= 0]
|
| 339 |
+
correct = [p for p in completed if p.get("is_correct") == 1]
|
| 340 |
+
|
| 341 |
+
# By confidence level
|
| 342 |
+
confidence_stats = {}
|
| 343 |
+
for conf in ["high", "medium", "low"]:
|
| 344 |
+
conf_preds = [p for p in completed if p.get("confidence") == conf]
|
| 345 |
+
conf_correct = [p for p in conf_preds if p.get("is_correct") == 1]
|
| 346 |
+
if conf_preds:
|
| 347 |
+
confidence_stats[conf] = {
|
| 348 |
+
"total": len(conf_preds),
|
| 349 |
+
"correct": len(conf_correct),
|
| 350 |
+
"accuracy": len(conf_correct) / len(conf_preds)
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
# By team predicted
|
| 354 |
+
team_stats = {}
|
| 355 |
+
for pred in completed:
|
| 356 |
+
team = pred.get("predicted_winner", "")
|
| 357 |
+
if team not in team_stats:
|
| 358 |
+
team_stats[team] = {"total": 0, "correct": 0}
|
| 359 |
+
team_stats[team]["total"] += 1
|
| 360 |
+
if pred.get("is_correct") == 1:
|
| 361 |
+
team_stats[team]["correct"] += 1
|
| 362 |
+
|
| 363 |
+
for team in team_stats:
|
| 364 |
+
total = team_stats[team]["total"]
|
| 365 |
+
team_stats[team]["accuracy"] = team_stats[team]["correct"] / total if total > 0 else 0
|
| 366 |
+
|
| 367 |
+
return {
|
| 368 |
+
"total_predictions": len(predictions),
|
| 369 |
+
"completed_games": len(completed),
|
| 370 |
+
"correct_predictions": len(correct),
|
| 371 |
+
"overall_accuracy": len(correct) / len(completed) if completed else 0.0,
|
| 372 |
+
"by_confidence": confidence_stats,
|
| 373 |
+
"by_team": team_stats,
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
def get_accuracy_stats(self) -> Dict:
|
| 377 |
+
"""
|
| 378 |
+
Calculate comprehensive accuracy statistics.
|
| 379 |
+
|
| 380 |
+
Returns:
|
| 381 |
+
Dict with overall accuracy, by confidence, by team
|
| 382 |
+
"""
|
| 383 |
+
# Handle local storage
|
| 384 |
+
if self._use_local:
|
| 385 |
+
return self._calculate_accuracy_from_predictions(self._local_data)
|
| 386 |
+
|
| 387 |
+
# Handle ChromaDB
|
| 388 |
+
if not self.collection:
|
| 389 |
+
return {
|
| 390 |
+
"total_predictions": 0,
|
| 391 |
+
"completed_games": 0,
|
| 392 |
+
"correct_predictions": 0,
|
| 393 |
+
"overall_accuracy": 0.0,
|
| 394 |
+
"by_confidence": {},
|
| 395 |
+
"by_team": {},
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
try:
|
| 399 |
+
results = self.collection.get(include=["metadatas"])
|
| 400 |
+
|
| 401 |
+
if not results["ids"]:
|
| 402 |
+
return {
|
| 403 |
+
"total_predictions": 0,
|
| 404 |
+
"completed_games": 0,
|
| 405 |
+
"correct_predictions": 0,
|
| 406 |
+
"overall_accuracy": 0.0,
|
| 407 |
+
"by_confidence": {},
|
| 408 |
+
"by_team": {},
|
| 409 |
+
}
|
| 410 |
+
|
| 411 |
+
predictions = results["metadatas"]
|
| 412 |
+
|
| 413 |
+
# Filter to completed games only
|
| 414 |
+
completed = [p for p in predictions if p.get("is_correct", -1) >= 0]
|
| 415 |
+
correct = [p for p in completed if p.get("is_correct") == 1]
|
| 416 |
+
|
| 417 |
+
# By confidence level
|
| 418 |
+
confidence_stats = {}
|
| 419 |
+
for conf in ["high", "medium", "low"]:
|
| 420 |
+
conf_preds = [p for p in completed if p.get("confidence") == conf]
|
| 421 |
+
conf_correct = [p for p in conf_preds if p.get("is_correct") == 1]
|
| 422 |
+
if conf_preds:
|
| 423 |
+
confidence_stats[conf] = {
|
| 424 |
+
"total": len(conf_preds),
|
| 425 |
+
"correct": len(conf_correct),
|
| 426 |
+
"accuracy": len(conf_correct) / len(conf_preds)
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
# By team predicted
|
| 430 |
+
team_stats = {}
|
| 431 |
+
for pred in completed:
|
| 432 |
+
team = pred.get("predicted_winner", "")
|
| 433 |
+
if team not in team_stats:
|
| 434 |
+
team_stats[team] = {"total": 0, "correct": 0}
|
| 435 |
+
team_stats[team]["total"] += 1
|
| 436 |
+
if pred.get("is_correct") == 1:
|
| 437 |
+
team_stats[team]["correct"] += 1
|
| 438 |
+
|
| 439 |
+
for team in team_stats:
|
| 440 |
+
total = team_stats[team]["total"]
|
| 441 |
+
team_stats[team]["accuracy"] = team_stats[team]["correct"] / total if total > 0 else 0
|
| 442 |
+
|
| 443 |
+
return {
|
| 444 |
+
"total_predictions": len(predictions),
|
| 445 |
+
"completed_games": len(completed),
|
| 446 |
+
"correct_predictions": len(correct),
|
| 447 |
+
"overall_accuracy": len(correct) / len(completed) if completed else 0.0,
|
| 448 |
+
"by_confidence": confidence_stats,
|
| 449 |
+
"by_team": team_stats,
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
except Exception as e:
|
| 453 |
+
logger.error(f"Failed to get accuracy stats: {e}")
|
| 454 |
+
return {
|
| 455 |
+
"total_predictions": 0,
|
| 456 |
+
"completed_games": 0,
|
| 457 |
+
"correct_predictions": 0,
|
| 458 |
+
"overall_accuracy": 0.0,
|
| 459 |
+
"by_confidence": {},
|
| 460 |
+
"by_team": {},
|
| 461 |
+
"error": str(e)
|
| 462 |
+
}
|
| 463 |
+
|
| 464 |
+
def get_pending_predictions(self) -> List[Dict]:
|
| 465 |
+
"""Get predictions for games not yet completed."""
|
| 466 |
+
if not self.collection:
|
| 467 |
+
return []
|
| 468 |
+
|
| 469 |
+
try:
|
| 470 |
+
results = self.collection.get(
|
| 471 |
+
where={"is_correct": -1},
|
| 472 |
+
include=["metadatas"]
|
| 473 |
+
)
|
| 474 |
+
|
| 475 |
+
return results.get("metadatas", [])
|
| 476 |
+
|
| 477 |
+
except Exception as e:
|
| 478 |
+
logger.error(f"Failed to get pending predictions: {e}")
|
| 479 |
+
return []
|
| 480 |
+
|
| 481 |
+
|
| 482 |
+
# =============================================================================
|
| 483 |
+
# CLI INTERFACE
|
| 484 |
+
# =============================================================================
|
| 485 |
+
if __name__ == "__main__":
|
| 486 |
+
logging.basicConfig(level=logging.INFO)
|
| 487 |
+
|
| 488 |
+
tracker = PredictionTracker()
|
| 489 |
+
|
| 490 |
+
print("\n=== Prediction Tracker Stats ===\n")
|
| 491 |
+
|
| 492 |
+
stats = tracker.get_accuracy_stats()
|
| 493 |
+
print(f"Total Predictions: {stats['total_predictions']}")
|
| 494 |
+
print(f"Completed Games: {stats['completed_games']}")
|
| 495 |
+
print(f"Correct Predictions: {stats['correct_predictions']}")
|
| 496 |
+
print(f"Overall Accuracy: {stats['overall_accuracy']:.1%}")
|
| 497 |
+
|
| 498 |
+
if stats['by_confidence']:
|
| 499 |
+
print("\nBy Confidence Level:")
|
| 500 |
+
for conf, data in stats['by_confidence'].items():
|
| 501 |
+
print(f" {conf.upper()}: {data['correct']}/{data['total']} ({data['accuracy']:.1%})")
|
| 502 |
+
|
| 503 |
+
print("\n=== Recent Predictions ===\n")
|
| 504 |
+
recent = tracker.get_recent_predictions(5)
|
| 505 |
+
for pred in recent:
|
| 506 |
+
status = "✓" if pred.get("is_correct") == 1 else "✗" if pred.get("is_correct") == 0 else "⏳"
|
| 507 |
+
print(f"{status} {pred.get('away_team')} @ {pred.get('home_team')} - Predicted: {pred.get('predicted_winner')}")
|
src/preprocessing.py
ADDED
|
@@ -0,0 +1,292 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NBA ML Prediction System - Preprocessing
|
| 3 |
+
=========================================
|
| 4 |
+
Data cleaning and transformation with:
|
| 5 |
+
- Time-aware train/val/test splits
|
| 6 |
+
- Dynamic feature detection (uses ALL available features)
|
| 7 |
+
- Missing value handling
|
| 8 |
+
- Feature scaling
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import pandas as pd
|
| 12 |
+
import numpy as np
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
from typing import List, Tuple, Optional, Dict
|
| 15 |
+
from sklearn.preprocessing import StandardScaler
|
| 16 |
+
from sklearn.impute import SimpleImputer
|
| 17 |
+
import joblib
|
| 18 |
+
import logging
|
| 19 |
+
|
| 20 |
+
from src.config import MODEL_CONFIG, PROCESSED_DATA_DIR, MODELS_DIR
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
# =============================================================================
|
| 25 |
+
# COLUMNS TO EXCLUDE FROM FEATURES
|
| 26 |
+
# =============================================================================
|
| 27 |
+
EXCLUDE_COLUMNS = [
|
| 28 |
+
"GAME_ID", "TEAM_ID", "GAME_DATE", "SEASON_ID", "SEASON",
|
| 29 |
+
"WL", "target", "MATCHUP", "TEAM_NAME", "TEAM_ABBREVIATION",
|
| 30 |
+
"PLAYER_ID", "PLAYER_NAME"
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# =============================================================================
|
| 35 |
+
# SEASON-BASED SPLITTER (NO DATA LEAKAGE)
|
| 36 |
+
# =============================================================================
|
| 37 |
+
class SeasonBasedSplitter:
|
| 38 |
+
"""Splits data by season to prevent data leakage."""
|
| 39 |
+
|
| 40 |
+
def __init__(self,
|
| 41 |
+
test_seasons: List[str] = None,
|
| 42 |
+
val_seasons: List[str] = None):
|
| 43 |
+
self.test_seasons = test_seasons or MODEL_CONFIG.test_seasons
|
| 44 |
+
self.val_seasons = val_seasons or MODEL_CONFIG.val_seasons
|
| 45 |
+
|
| 46 |
+
def split(self, df: pd.DataFrame,
|
| 47 |
+
season_column: str = "SEASON") -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
|
| 48 |
+
# Extract season from SEASON_ID if needed
|
| 49 |
+
if season_column not in df.columns and "SEASON_ID" in df.columns:
|
| 50 |
+
df = df.copy()
|
| 51 |
+
df[season_column] = df["SEASON_ID"].apply(self._parse_season_id)
|
| 52 |
+
|
| 53 |
+
test_mask = df[season_column].isin(self.test_seasons)
|
| 54 |
+
val_mask = df[season_column].isin(self.val_seasons)
|
| 55 |
+
train_mask = ~(test_mask | val_mask)
|
| 56 |
+
|
| 57 |
+
train_df = df[train_mask].copy()
|
| 58 |
+
val_df = df[val_mask].copy()
|
| 59 |
+
test_df = df[test_mask].copy()
|
| 60 |
+
|
| 61 |
+
logger.info(f"Split: Train={len(train_df)}, Val={len(val_df)}, Test={len(test_df)}")
|
| 62 |
+
|
| 63 |
+
return train_df, val_df, test_df
|
| 64 |
+
|
| 65 |
+
def _parse_season_id(self, season_id: str) -> str:
|
| 66 |
+
if isinstance(season_id, str) and len(season_id) == 5:
|
| 67 |
+
year = int(season_id[1:])
|
| 68 |
+
return f"{year}-{str(year+1)[-2:]}"
|
| 69 |
+
return str(season_id)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
# =============================================================================
|
| 73 |
+
# DATA PREPROCESSOR
|
| 74 |
+
# =============================================================================
|
| 75 |
+
class DataPreprocessor:
|
| 76 |
+
"""Handles missing values, scaling, and data preparation."""
|
| 77 |
+
|
| 78 |
+
def __init__(self, feature_columns: List[str] = None):
|
| 79 |
+
self.feature_columns = feature_columns
|
| 80 |
+
self.scaler = StandardScaler()
|
| 81 |
+
self.imputer = SimpleImputer(strategy="median")
|
| 82 |
+
self.fitted = False
|
| 83 |
+
|
| 84 |
+
def fit(self, df: pd.DataFrame, feature_columns: List[str] = None):
|
| 85 |
+
if feature_columns:
|
| 86 |
+
self.feature_columns = feature_columns
|
| 87 |
+
|
| 88 |
+
X = df[self.feature_columns].values
|
| 89 |
+
X_imputed = self.imputer.fit_transform(X)
|
| 90 |
+
self.scaler.fit(X_imputed)
|
| 91 |
+
|
| 92 |
+
self.fitted = True
|
| 93 |
+
logger.info(f"Preprocessor fitted on {len(self.feature_columns)} features")
|
| 94 |
+
|
| 95 |
+
def transform(self, df: pd.DataFrame) -> np.ndarray:
|
| 96 |
+
if not self.fitted:
|
| 97 |
+
raise ValueError("Preprocessor not fitted. Call fit() first.")
|
| 98 |
+
|
| 99 |
+
X = df[self.feature_columns].values
|
| 100 |
+
X_imputed = self.imputer.transform(X)
|
| 101 |
+
X_scaled = self.scaler.transform(X_imputed)
|
| 102 |
+
|
| 103 |
+
return X_scaled
|
| 104 |
+
|
| 105 |
+
def fit_transform(self, df: pd.DataFrame, feature_columns: List[str] = None) -> np.ndarray:
|
| 106 |
+
self.fit(df, feature_columns)
|
| 107 |
+
return self.transform(df)
|
| 108 |
+
|
| 109 |
+
def save(self, path: Path = None):
|
| 110 |
+
if path is None:
|
| 111 |
+
path = MODELS_DIR / "preprocessor.joblib"
|
| 112 |
+
|
| 113 |
+
joblib.dump({
|
| 114 |
+
"feature_columns": self.feature_columns,
|
| 115 |
+
"scaler": self.scaler,
|
| 116 |
+
"imputer": self.imputer
|
| 117 |
+
}, path)
|
| 118 |
+
logger.info(f"Saved preprocessor to {path}")
|
| 119 |
+
|
| 120 |
+
def load(self, path: Path = None):
|
| 121 |
+
if path is None:
|
| 122 |
+
path = MODELS_DIR / "preprocessor.joblib"
|
| 123 |
+
|
| 124 |
+
data = joblib.load(path)
|
| 125 |
+
self.feature_columns = data["feature_columns"]
|
| 126 |
+
self.scaler = data["scaler"]
|
| 127 |
+
self.imputer = data["imputer"]
|
| 128 |
+
self.fitted = True
|
| 129 |
+
logger.info(f"Loaded preprocessor from {path}")
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
# =============================================================================
|
| 133 |
+
# DATASET BUILDER - USES ALL AVAILABLE FEATURES
|
| 134 |
+
# =============================================================================
|
| 135 |
+
class GameDatasetBuilder:
|
| 136 |
+
"""Builds train/val/test datasets using ALL available features."""
|
| 137 |
+
|
| 138 |
+
def __init__(self):
|
| 139 |
+
self.splitter = SeasonBasedSplitter()
|
| 140 |
+
self.preprocessor = DataPreprocessor()
|
| 141 |
+
|
| 142 |
+
def _get_feature_columns(self, df: pd.DataFrame) -> List[str]:
|
| 143 |
+
"""
|
| 144 |
+
Dynamically detect ALL numeric feature columns.
|
| 145 |
+
Excludes ID columns, target, and non-numeric columns.
|
| 146 |
+
"""
|
| 147 |
+
feature_columns = []
|
| 148 |
+
|
| 149 |
+
for col in df.columns:
|
| 150 |
+
# Skip excluded columns
|
| 151 |
+
if col in EXCLUDE_COLUMNS:
|
| 152 |
+
continue
|
| 153 |
+
|
| 154 |
+
# Skip non-numeric columns
|
| 155 |
+
if not pd.api.types.is_numeric_dtype(df[col]):
|
| 156 |
+
continue
|
| 157 |
+
|
| 158 |
+
# Skip columns with all NaN
|
| 159 |
+
if df[col].isna().all():
|
| 160 |
+
continue
|
| 161 |
+
|
| 162 |
+
feature_columns.append(col)
|
| 163 |
+
|
| 164 |
+
return sorted(feature_columns)
|
| 165 |
+
|
| 166 |
+
def build_dataset(self, features_df: pd.DataFrame,
|
| 167 |
+
target_column: str = "WL",
|
| 168 |
+
use_all_features: bool = True) -> Dict:
|
| 169 |
+
"""
|
| 170 |
+
Build complete dataset for training.
|
| 171 |
+
|
| 172 |
+
Args:
|
| 173 |
+
features_df: DataFrame with features
|
| 174 |
+
target_column: Column to predict
|
| 175 |
+
use_all_features: If True, uses ALL available numeric features
|
| 176 |
+
"""
|
| 177 |
+
# Remove rows without target
|
| 178 |
+
df = features_df.dropna(subset=[target_column]).copy()
|
| 179 |
+
|
| 180 |
+
# Convert WL to binary
|
| 181 |
+
df["target"] = (df[target_column] == "W").astype(int)
|
| 182 |
+
|
| 183 |
+
# Split by season
|
| 184 |
+
train_df, val_df, test_df = self.splitter.split(df)
|
| 185 |
+
|
| 186 |
+
# Get feature columns - USE ALL AVAILABLE
|
| 187 |
+
if use_all_features:
|
| 188 |
+
feature_columns = self._get_feature_columns(df)
|
| 189 |
+
logger.info(f"Detected {len(feature_columns)} numeric feature columns")
|
| 190 |
+
else:
|
| 191 |
+
# Fallback to basic features
|
| 192 |
+
feature_columns = [
|
| 193 |
+
"team_elo", "opponent_elo", "elo_diff", "elo_win_prob", "is_home",
|
| 194 |
+
"PTS_last5", "PTS_last10", "AST_last5", "REB_last5",
|
| 195 |
+
"win_pct_season", "days_rest", "back_to_back"
|
| 196 |
+
]
|
| 197 |
+
feature_columns = [c for c in feature_columns if c in df.columns]
|
| 198 |
+
|
| 199 |
+
logger.info(f"\n=== FEATURES USED FOR TRAINING ({len(feature_columns)} total) ===")
|
| 200 |
+
for i, col in enumerate(feature_columns):
|
| 201 |
+
logger.info(f" {i+1:3}. {col}")
|
| 202 |
+
|
| 203 |
+
# Fit preprocessor on training data
|
| 204 |
+
self.preprocessor.fit(train_df, feature_columns)
|
| 205 |
+
|
| 206 |
+
# Transform all splits
|
| 207 |
+
X_train = self.preprocessor.transform(train_df)
|
| 208 |
+
X_val = self.preprocessor.transform(val_df)
|
| 209 |
+
X_test = self.preprocessor.transform(test_df)
|
| 210 |
+
|
| 211 |
+
y_train = train_df["target"].values
|
| 212 |
+
y_val = val_df["target"].values
|
| 213 |
+
y_test = test_df["target"].values
|
| 214 |
+
|
| 215 |
+
logger.info(f"\n=== DATASET SUMMARY ===")
|
| 216 |
+
logger.info(f" Training samples: {len(y_train)}")
|
| 217 |
+
logger.info(f" Validation samples: {len(y_val)}")
|
| 218 |
+
logger.info(f" Test samples: {len(y_test)}")
|
| 219 |
+
logger.info(f" Features: {len(feature_columns)}")
|
| 220 |
+
|
| 221 |
+
return {
|
| 222 |
+
"X_train": X_train, "y_train": y_train,
|
| 223 |
+
"X_val": X_val, "y_val": y_val,
|
| 224 |
+
"X_test": X_test, "y_test": y_test,
|
| 225 |
+
"feature_columns": feature_columns,
|
| 226 |
+
"preprocessor": self.preprocessor,
|
| 227 |
+
"train_df": train_df,
|
| 228 |
+
"val_df": val_df,
|
| 229 |
+
"test_df": test_df
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
def save_dataset(self, dataset: Dict, name: str = "game_dataset"):
|
| 233 |
+
path = PROCESSED_DATA_DIR / f"{name}.joblib"
|
| 234 |
+
joblib.dump(dataset, path)
|
| 235 |
+
logger.info(f"Saved dataset to {path}")
|
| 236 |
+
|
| 237 |
+
def load_dataset(self, name: str = "game_dataset") -> Dict:
|
| 238 |
+
path = PROCESSED_DATA_DIR / f"{name}.joblib"
|
| 239 |
+
return joblib.load(path)
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
# =============================================================================
|
| 243 |
+
# CLI INTERFACE
|
| 244 |
+
# =============================================================================
|
| 245 |
+
if __name__ == "__main__":
|
| 246 |
+
import argparse
|
| 247 |
+
|
| 248 |
+
parser = argparse.ArgumentParser(description="Preprocessing")
|
| 249 |
+
parser.add_argument("--build", action="store_true", help="Build dataset from features")
|
| 250 |
+
parser.add_argument("--test", action="store_true", help="Run tests")
|
| 251 |
+
|
| 252 |
+
args = parser.parse_args()
|
| 253 |
+
|
| 254 |
+
logging.basicConfig(level=logging.INFO)
|
| 255 |
+
|
| 256 |
+
if args.build:
|
| 257 |
+
print("=== Building Dataset from Features ===")
|
| 258 |
+
|
| 259 |
+
features_path = PROCESSED_DATA_DIR / "game_features.parquet"
|
| 260 |
+
|
| 261 |
+
if not features_path.exists():
|
| 262 |
+
print(f"ERROR: Features not found at {features_path}")
|
| 263 |
+
print("Run 'python -m src.feature_engineering --process' first.")
|
| 264 |
+
exit(1)
|
| 265 |
+
|
| 266 |
+
print(f"Loading features from {features_path}...")
|
| 267 |
+
features_df = pd.read_parquet(features_path)
|
| 268 |
+
print(f"Loaded {len(features_df)} rows")
|
| 269 |
+
|
| 270 |
+
builder = GameDatasetBuilder()
|
| 271 |
+
dataset = builder.build_dataset(features_df, use_all_features=True)
|
| 272 |
+
builder.save_dataset(dataset)
|
| 273 |
+
|
| 274 |
+
print(f"\n✅ Dataset saved!")
|
| 275 |
+
print(f" Training samples: {len(dataset['y_train'])}")
|
| 276 |
+
print(f" Features used: {len(dataset['feature_columns'])}")
|
| 277 |
+
|
| 278 |
+
elif args.test:
|
| 279 |
+
print("Testing Season-Based Splitter...")
|
| 280 |
+
sample_data = pd.DataFrame({
|
| 281 |
+
"SEASON": ["2022-23"] * 100 + ["2023-24"] * 50 + ["2024-25"] * 25,
|
| 282 |
+
"feature1": np.random.randn(175),
|
| 283 |
+
"WL": np.random.choice(["W", "L"], 175)
|
| 284 |
+
})
|
| 285 |
+
|
| 286 |
+
splitter = SeasonBasedSplitter()
|
| 287 |
+
train, val, test = splitter.split(sample_data)
|
| 288 |
+
|
| 289 |
+
print(f"Train: {len(train)}, Val: {len(val)}, Test: {len(test)}")
|
| 290 |
+
|
| 291 |
+
else:
|
| 292 |
+
print("Use --build to build dataset or --test to run tests")
|
src/visualization.py
ADDED
|
@@ -0,0 +1,539 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NBA ML Prediction System - Data Visualization
|
| 3 |
+
==============================================
|
| 4 |
+
Create and save visualizations for analysis and reporting.
|
| 5 |
+
All graphs are saved to the 'graphs' folder.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import numpy as np
|
| 10 |
+
import matplotlib.pyplot as plt
|
| 11 |
+
import matplotlib.dates as mdates
|
| 12 |
+
import seaborn as sns
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
from typing import Optional, List, Dict
|
| 15 |
+
from datetime import datetime
|
| 16 |
+
import logging
|
| 17 |
+
|
| 18 |
+
from src.config import PROJECT_ROOT, RAW_DATA_DIR, PROCESSED_DATA_DIR
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
# =============================================================================
|
| 23 |
+
# CONFIGURATION
|
| 24 |
+
# =============================================================================
|
| 25 |
+
GRAPHS_DIR = PROJECT_ROOT / "graphs"
|
| 26 |
+
GRAPHS_DIR.mkdir(exist_ok=True)
|
| 27 |
+
|
| 28 |
+
# Style settings
|
| 29 |
+
plt.style.use('dark_background')
|
| 30 |
+
COLORS = {
|
| 31 |
+
'primary': '#7c3aed',
|
| 32 |
+
'secondary': '#00d4ff',
|
| 33 |
+
'success': '#10b981',
|
| 34 |
+
'warning': '#f59e0b',
|
| 35 |
+
'danger': '#ef4444',
|
| 36 |
+
'gradient': ['#7c3aed', '#00d4ff', '#f472b6']
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
def save_figure(fig, name: str, dpi: int = 150):
|
| 40 |
+
"""Save figure to graphs folder."""
|
| 41 |
+
path = GRAPHS_DIR / f"{name}.png"
|
| 42 |
+
fig.savefig(path, dpi=dpi, bbox_inches='tight', facecolor='#1a1a2e', edgecolor='none')
|
| 43 |
+
logger.info(f"Saved graph to {path}")
|
| 44 |
+
plt.close(fig)
|
| 45 |
+
return path
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# =============================================================================
|
| 49 |
+
# TEAM PERFORMANCE VISUALIZATIONS
|
| 50 |
+
# =============================================================================
|
| 51 |
+
class TeamVisualizer:
|
| 52 |
+
"""Visualization for team-level statistics."""
|
| 53 |
+
|
| 54 |
+
def plot_elo_history(self, elo_history: pd.DataFrame, team_abbrev: str = None) -> Path:
|
| 55 |
+
"""
|
| 56 |
+
Plot ELO rating history over time.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
elo_history: DataFrame with columns [date, team, elo]
|
| 60 |
+
"""
|
| 61 |
+
fig, ax = plt.subplots(figsize=(14, 7))
|
| 62 |
+
|
| 63 |
+
if team_abbrev:
|
| 64 |
+
data = elo_history[elo_history['team'] == team_abbrev]
|
| 65 |
+
ax.plot(data['date'], data['elo'], color=COLORS['primary'], linewidth=2)
|
| 66 |
+
ax.fill_between(data['date'], 1500, data['elo'], alpha=0.3, color=COLORS['primary'])
|
| 67 |
+
title = f"{team_abbrev} ELO Rating History"
|
| 68 |
+
else:
|
| 69 |
+
# Plot top 5 teams
|
| 70 |
+
for i, (team, data) in enumerate(elo_history.groupby('team')):
|
| 71 |
+
if i < 5:
|
| 72 |
+
ax.plot(data['date'], data['elo'], label=team, linewidth=2)
|
| 73 |
+
ax.legend(loc='upper left')
|
| 74 |
+
title = "ELO Rating History - Top Teams"
|
| 75 |
+
|
| 76 |
+
ax.axhline(y=1500, color='white', linestyle='--', alpha=0.3, label='Average')
|
| 77 |
+
ax.set_xlabel('Date', fontsize=12)
|
| 78 |
+
ax.set_ylabel('ELO Rating', fontsize=12)
|
| 79 |
+
ax.set_title(title, fontsize=16, fontweight='bold')
|
| 80 |
+
ax.grid(True, alpha=0.2)
|
| 81 |
+
|
| 82 |
+
return save_figure(fig, f"elo_history_{team_abbrev or 'all'}")
|
| 83 |
+
|
| 84 |
+
def plot_team_comparison(self, team_stats: pd.DataFrame,
|
| 85 |
+
metrics: List[str] = None) -> Path:
|
| 86 |
+
"""
|
| 87 |
+
Radar chart comparing multiple teams.
|
| 88 |
+
"""
|
| 89 |
+
if metrics is None:
|
| 90 |
+
metrics = ['PTS', 'AST', 'REB', 'STL', 'BLK', 'FG_PCT']
|
| 91 |
+
|
| 92 |
+
# Normalize metrics to 0-1 scale
|
| 93 |
+
normalized = team_stats[metrics].copy()
|
| 94 |
+
for col in metrics:
|
| 95 |
+
normalized[col] = (normalized[col] - normalized[col].min()) / (normalized[col].max() - normalized[col].min())
|
| 96 |
+
|
| 97 |
+
# Create radar chart
|
| 98 |
+
fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(polar=True))
|
| 99 |
+
|
| 100 |
+
angles = np.linspace(0, 2 * np.pi, len(metrics), endpoint=False).tolist()
|
| 101 |
+
angles += angles[:1]
|
| 102 |
+
|
| 103 |
+
for i, (idx, row) in enumerate(team_stats.head(5).iterrows()):
|
| 104 |
+
values = normalized.loc[idx, metrics].tolist()
|
| 105 |
+
values += values[:1]
|
| 106 |
+
ax.plot(angles, values, linewidth=2, label=row.get('TEAM_ABBREVIATION', f'Team {i+1}'))
|
| 107 |
+
ax.fill(angles, values, alpha=0.1)
|
| 108 |
+
|
| 109 |
+
ax.set_xticks(angles[:-1])
|
| 110 |
+
ax.set_xticklabels(metrics)
|
| 111 |
+
ax.legend(loc='upper right', bbox_to_anchor=(1.3, 1))
|
| 112 |
+
ax.set_title('Team Comparison', fontsize=16, fontweight='bold', pad=20)
|
| 113 |
+
|
| 114 |
+
return save_figure(fig, "team_comparison_radar")
|
| 115 |
+
|
| 116 |
+
def plot_standings(self, standings: pd.DataFrame) -> Path:
|
| 117 |
+
"""
|
| 118 |
+
Horizontal bar chart of team standings by win percentage.
|
| 119 |
+
"""
|
| 120 |
+
fig, ax = plt.subplots(figsize=(12, 10))
|
| 121 |
+
|
| 122 |
+
data = standings.sort_values('W_PCT', ascending=True).tail(15)
|
| 123 |
+
colors = [COLORS['primary'] if i >= 7 else COLORS['secondary']
|
| 124 |
+
for i in range(len(data))]
|
| 125 |
+
|
| 126 |
+
bars = ax.barh(data['TEAM_ABBREVIATION'], data['W_PCT'], color=colors, edgecolor='white', linewidth=0.5)
|
| 127 |
+
|
| 128 |
+
# Add playoff line
|
| 129 |
+
ax.axvline(x=0.5, color=COLORS['warning'], linestyle='--', linewidth=2, label='Playoff Cutoff')
|
| 130 |
+
|
| 131 |
+
ax.set_xlabel('Win Percentage', fontsize=12)
|
| 132 |
+
ax.set_title('Team Standings', fontsize=16, fontweight='bold')
|
| 133 |
+
ax.set_xlim(0, 1)
|
| 134 |
+
ax.legend()
|
| 135 |
+
ax.grid(axis='x', alpha=0.2)
|
| 136 |
+
|
| 137 |
+
# Add value labels
|
| 138 |
+
for bar, val in zip(bars, data['W_PCT']):
|
| 139 |
+
ax.text(val + 0.02, bar.get_y() + bar.get_height()/2,
|
| 140 |
+
f'{val:.1%}', va='center', fontsize=10)
|
| 141 |
+
|
| 142 |
+
return save_figure(fig, "standings")
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
# =============================================================================
|
| 146 |
+
# GAME PREDICTION VISUALIZATIONS
|
| 147 |
+
# =============================================================================
|
| 148 |
+
class GameVisualizer:
|
| 149 |
+
"""Visualization for game predictions and analysis."""
|
| 150 |
+
|
| 151 |
+
def plot_prediction_calibration(self, predictions: pd.DataFrame) -> Path:
|
| 152 |
+
"""
|
| 153 |
+
Calibration curve - how well do probabilities match actual outcomes.
|
| 154 |
+
"""
|
| 155 |
+
fig, ax = plt.subplots(figsize=(10, 10))
|
| 156 |
+
|
| 157 |
+
# Bin predictions
|
| 158 |
+
bins = np.linspace(0, 1, 11)
|
| 159 |
+
bin_centers = (bins[:-1] + bins[1:]) / 2
|
| 160 |
+
|
| 161 |
+
predicted_proba = predictions['predicted_proba']
|
| 162 |
+
actual = predictions['actual']
|
| 163 |
+
|
| 164 |
+
bin_indices = np.digitize(predicted_proba, bins) - 1
|
| 165 |
+
bin_indices = np.clip(bin_indices, 0, 9)
|
| 166 |
+
|
| 167 |
+
actual_fractions = []
|
| 168 |
+
for i in range(10):
|
| 169 |
+
mask = bin_indices == i
|
| 170 |
+
if mask.sum() > 0:
|
| 171 |
+
actual_fractions.append(actual[mask].mean())
|
| 172 |
+
else:
|
| 173 |
+
actual_fractions.append(np.nan)
|
| 174 |
+
|
| 175 |
+
# Perfect calibration line
|
| 176 |
+
ax.plot([0, 1], [0, 1], 'w--', linewidth=2, label='Perfect Calibration')
|
| 177 |
+
|
| 178 |
+
# Actual calibration
|
| 179 |
+
ax.plot(bin_centers, actual_fractions, 'o-', color=COLORS['primary'],
|
| 180 |
+
linewidth=3, markersize=10, label='Model Calibration')
|
| 181 |
+
|
| 182 |
+
ax.set_xlabel('Predicted Probability', fontsize=12)
|
| 183 |
+
ax.set_ylabel('Actual Win Rate', fontsize=12)
|
| 184 |
+
ax.set_title('Prediction Calibration Curve', fontsize=16, fontweight='bold')
|
| 185 |
+
ax.legend()
|
| 186 |
+
ax.grid(True, alpha=0.2)
|
| 187 |
+
ax.set_xlim(0, 1)
|
| 188 |
+
ax.set_ylim(0, 1)
|
| 189 |
+
|
| 190 |
+
return save_figure(fig, "calibration_curve")
|
| 191 |
+
|
| 192 |
+
def plot_accuracy_by_confidence(self, predictions: pd.DataFrame) -> Path:
|
| 193 |
+
"""
|
| 194 |
+
How does accuracy change with prediction confidence?
|
| 195 |
+
"""
|
| 196 |
+
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6))
|
| 197 |
+
|
| 198 |
+
# Calculate confidence as distance from 0.5
|
| 199 |
+
predictions = predictions.copy()
|
| 200 |
+
predictions['confidence'] = abs(predictions['predicted_proba'] - 0.5)
|
| 201 |
+
predictions['correct'] = predictions['predicted'] == predictions['actual']
|
| 202 |
+
|
| 203 |
+
# Bin by confidence
|
| 204 |
+
bins = np.linspace(0, 0.5, 6)
|
| 205 |
+
predictions['confidence_bin'] = pd.cut(predictions['confidence'], bins)
|
| 206 |
+
|
| 207 |
+
accuracy_by_conf = predictions.groupby('confidence_bin')['correct'].mean()
|
| 208 |
+
count_by_conf = predictions.groupby('confidence_bin').size()
|
| 209 |
+
|
| 210 |
+
# Accuracy plot
|
| 211 |
+
ax1.bar(range(len(accuracy_by_conf)), accuracy_by_conf.values,
|
| 212 |
+
color=COLORS['primary'], edgecolor='white')
|
| 213 |
+
ax1.set_xticks(range(len(accuracy_by_conf)))
|
| 214 |
+
ax1.set_xticklabels(['Low', 'Med-Low', 'Medium', 'Med-High', 'High'], rotation=45)
|
| 215 |
+
ax1.set_ylabel('Accuracy', fontsize=12)
|
| 216 |
+
ax1.set_title('Accuracy by Confidence Level', fontsize=14, fontweight='bold')
|
| 217 |
+
ax1.set_ylim(0, 1)
|
| 218 |
+
ax1.axhline(y=0.5, color='white', linestyle='--', alpha=0.3)
|
| 219 |
+
|
| 220 |
+
# Count plot
|
| 221 |
+
ax2.bar(range(len(count_by_conf)), count_by_conf.values,
|
| 222 |
+
color=COLORS['secondary'], edgecolor='white')
|
| 223 |
+
ax2.set_xticks(range(len(count_by_conf)))
|
| 224 |
+
ax2.set_xticklabels(['Low', 'Med-Low', 'Medium', 'Med-High', 'High'], rotation=45)
|
| 225 |
+
ax2.set_ylabel('Number of Predictions', fontsize=12)
|
| 226 |
+
ax2.set_title('Prediction Distribution', fontsize=14, fontweight='bold')
|
| 227 |
+
|
| 228 |
+
plt.tight_layout()
|
| 229 |
+
return save_figure(fig, "accuracy_by_confidence")
|
| 230 |
+
|
| 231 |
+
def plot_feature_importance(self, importance_df: pd.DataFrame, top_n: int = 15) -> Path:
|
| 232 |
+
"""
|
| 233 |
+
Bar chart of feature importance.
|
| 234 |
+
"""
|
| 235 |
+
fig, ax = plt.subplots(figsize=(12, 8))
|
| 236 |
+
|
| 237 |
+
data = importance_df.head(top_n).sort_values('avg_importance', ascending=True)
|
| 238 |
+
|
| 239 |
+
bars = ax.barh(data['feature'], data['avg_importance'],
|
| 240 |
+
color=COLORS['primary'], edgecolor='white', linewidth=0.5)
|
| 241 |
+
|
| 242 |
+
ax.set_xlabel('Importance Score', fontsize=12)
|
| 243 |
+
ax.set_title('Top Features for Game Prediction', fontsize=16, fontweight='bold')
|
| 244 |
+
ax.grid(axis='x', alpha=0.2)
|
| 245 |
+
|
| 246 |
+
return save_figure(fig, "feature_importance")
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
# =============================================================================
|
| 250 |
+
# MVP VISUALIZATIONS
|
| 251 |
+
# =============================================================================
|
| 252 |
+
class MVPVisualizer:
|
| 253 |
+
"""Visualization for MVP race analysis."""
|
| 254 |
+
|
| 255 |
+
def plot_mvp_race(self, mvp_df: pd.DataFrame) -> Path:
|
| 256 |
+
"""
|
| 257 |
+
Horizontal bar chart of MVP race standings.
|
| 258 |
+
"""
|
| 259 |
+
fig, ax = plt.subplots(figsize=(12, 8))
|
| 260 |
+
|
| 261 |
+
data = mvp_df.head(10).sort_values('mvp_score', ascending=True)
|
| 262 |
+
colors = plt.cm.Purples(np.linspace(0.3, 0.9, len(data)))
|
| 263 |
+
|
| 264 |
+
bars = ax.barh(data['PLAYER_NAME'], data['mvp_score'], color=colors, edgecolor='white')
|
| 265 |
+
|
| 266 |
+
ax.set_xlabel('MVP Score', fontsize=12)
|
| 267 |
+
ax.set_title('MVP Race 2024-25', fontsize=16, fontweight='bold')
|
| 268 |
+
ax.grid(axis='x', alpha=0.2)
|
| 269 |
+
|
| 270 |
+
# Add value labels
|
| 271 |
+
for bar, val in zip(bars, data['mvp_score']):
|
| 272 |
+
ax.text(val + 0.5, bar.get_y() + bar.get_height()/2,
|
| 273 |
+
f'{val:.1f}', va='center', fontsize=10)
|
| 274 |
+
|
| 275 |
+
return save_figure(fig, "mvp_race")
|
| 276 |
+
|
| 277 |
+
def plot_mvp_similarity(self, mvp_df: pd.DataFrame) -> Path:
|
| 278 |
+
"""
|
| 279 |
+
Scatter plot of MVP score vs historical similarity.
|
| 280 |
+
"""
|
| 281 |
+
fig, ax = plt.subplots(figsize=(10, 8))
|
| 282 |
+
|
| 283 |
+
scatter = ax.scatter(mvp_df['mvp_similarity'], mvp_df['mvp_score'],
|
| 284 |
+
s=mvp_df['PTS'] * 10, c=mvp_df['mvp_score'],
|
| 285 |
+
cmap='Purples', alpha=0.7, edgecolor='white')
|
| 286 |
+
|
| 287 |
+
# Add labels for top candidates
|
| 288 |
+
for idx, row in mvp_df.head(5).iterrows():
|
| 289 |
+
ax.annotate(row['PLAYER_NAME'],
|
| 290 |
+
(row['mvp_similarity'], row['mvp_score']),
|
| 291 |
+
xytext=(10, 10), textcoords='offset points',
|
| 292 |
+
fontsize=10, color='white')
|
| 293 |
+
|
| 294 |
+
ax.set_xlabel('Similarity to Historical MVPs', fontsize=12)
|
| 295 |
+
ax.set_ylabel('MVP Score', fontsize=12)
|
| 296 |
+
ax.set_title('MVP Score vs Historical Similarity', fontsize=16, fontweight='bold')
|
| 297 |
+
ax.grid(True, alpha=0.2)
|
| 298 |
+
|
| 299 |
+
cbar = plt.colorbar(scatter, ax=ax)
|
| 300 |
+
cbar.set_label('MVP Score', fontsize=10)
|
| 301 |
+
|
| 302 |
+
return save_figure(fig, "mvp_similarity_scatter")
|
| 303 |
+
|
| 304 |
+
def plot_stat_comparison(self, mvp_df: pd.DataFrame,
|
| 305 |
+
stats: List[str] = None) -> Path:
|
| 306 |
+
"""
|
| 307 |
+
Bar chart comparing stats of top MVP candidates.
|
| 308 |
+
"""
|
| 309 |
+
if stats is None:
|
| 310 |
+
stats = ['PTS', 'REB', 'AST']
|
| 311 |
+
|
| 312 |
+
fig, axes = plt.subplots(1, len(stats), figsize=(5 * len(stats), 6))
|
| 313 |
+
if len(stats) == 1:
|
| 314 |
+
axes = [axes]
|
| 315 |
+
|
| 316 |
+
top_players = mvp_df.head(5)
|
| 317 |
+
|
| 318 |
+
for ax, stat in zip(axes, stats):
|
| 319 |
+
colors = plt.cm.Purples(np.linspace(0.4, 0.9, len(top_players)))
|
| 320 |
+
bars = ax.bar(top_players['PLAYER_NAME'], top_players[stat], color=colors, edgecolor='white')
|
| 321 |
+
ax.set_ylabel(stat, fontsize=12)
|
| 322 |
+
ax.set_title(f'{stat} Comparison', fontsize=14, fontweight='bold')
|
| 323 |
+
ax.tick_params(axis='x', rotation=45)
|
| 324 |
+
|
| 325 |
+
for bar, val in zip(bars, top_players[stat]):
|
| 326 |
+
ax.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.5,
|
| 327 |
+
f'{val:.1f}', ha='center', fontsize=10)
|
| 328 |
+
|
| 329 |
+
plt.tight_layout()
|
| 330 |
+
return save_figure(fig, "mvp_stat_comparison")
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
# =============================================================================
|
| 334 |
+
# CHAMPIONSHIP VISUALIZATIONS
|
| 335 |
+
# =============================================================================
|
| 336 |
+
class ChampionshipVisualizer:
|
| 337 |
+
"""Visualization for championship predictions."""
|
| 338 |
+
|
| 339 |
+
def plot_championship_odds(self, champ_df: pd.DataFrame) -> Path:
|
| 340 |
+
"""
|
| 341 |
+
Pie chart of championship probabilities.
|
| 342 |
+
"""
|
| 343 |
+
fig, ax = plt.subplots(figsize=(10, 10))
|
| 344 |
+
|
| 345 |
+
data = champ_df.head(8)
|
| 346 |
+
colors = plt.cm.Purples(np.linspace(0.3, 0.9, len(data)))
|
| 347 |
+
|
| 348 |
+
wedges, texts, autotexts = ax.pie(
|
| 349 |
+
data['champ_probability'],
|
| 350 |
+
labels=data['TEAM_ABBREVIATION'],
|
| 351 |
+
autopct='%1.1f%%',
|
| 352 |
+
colors=colors,
|
| 353 |
+
explode=[0.05] * len(data),
|
| 354 |
+
shadow=True,
|
| 355 |
+
startangle=90
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
for text in texts:
|
| 359 |
+
text.set_fontsize(12)
|
| 360 |
+
text.set_color('white')
|
| 361 |
+
for autotext in autotexts:
|
| 362 |
+
autotext.set_fontsize(10)
|
| 363 |
+
autotext.set_color('white')
|
| 364 |
+
|
| 365 |
+
ax.set_title('Championship Probabilities', fontsize=16, fontweight='bold')
|
| 366 |
+
|
| 367 |
+
return save_figure(fig, "championship_odds_pie")
|
| 368 |
+
|
| 369 |
+
def plot_strength_vs_experience(self, champ_df: pd.DataFrame) -> Path:
|
| 370 |
+
"""
|
| 371 |
+
Scatter plot of team strength vs playoff experience.
|
| 372 |
+
"""
|
| 373 |
+
fig, ax = plt.subplots(figsize=(12, 8))
|
| 374 |
+
|
| 375 |
+
scatter = ax.scatter(
|
| 376 |
+
champ_df['playoff_experience'],
|
| 377 |
+
champ_df['strength_rating'],
|
| 378 |
+
s=champ_df['champ_probability'] * 3000,
|
| 379 |
+
c=champ_df['champ_probability'],
|
| 380 |
+
cmap='Purples',
|
| 381 |
+
alpha=0.7,
|
| 382 |
+
edgecolor='white',
|
| 383 |
+
linewidth=2
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
# Add labels
|
| 387 |
+
for idx, row in champ_df.iterrows():
|
| 388 |
+
ax.annotate(
|
| 389 |
+
row['TEAM_ABBREVIATION'],
|
| 390 |
+
(row['playoff_experience'], row['strength_rating']),
|
| 391 |
+
xytext=(10, 5), textcoords='offset points',
|
| 392 |
+
fontsize=11, color='white', fontweight='bold'
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
ax.set_xlabel('Playoff Experience Index', fontsize=12)
|
| 396 |
+
ax.set_ylabel('Strength Rating', fontsize=12)
|
| 397 |
+
ax.set_title('Championship Contenders: Strength vs Experience', fontsize=16, fontweight='bold')
|
| 398 |
+
ax.grid(True, alpha=0.2)
|
| 399 |
+
|
| 400 |
+
cbar = plt.colorbar(scatter, ax=ax)
|
| 401 |
+
cbar.set_label('Championship Probability', fontsize=10)
|
| 402 |
+
|
| 403 |
+
return save_figure(fig, "strength_vs_experience")
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
# =============================================================================
|
| 407 |
+
# SEASON ANALYSIS VISUALIZATIONS
|
| 408 |
+
# =============================================================================
|
| 409 |
+
class SeasonVisualizer:
|
| 410 |
+
"""Visualization for historical season analysis."""
|
| 411 |
+
|
| 412 |
+
def plot_scoring_trends(self, season_data: pd.DataFrame) -> Path:
|
| 413 |
+
"""
|
| 414 |
+
Line chart of scoring trends across seasons.
|
| 415 |
+
"""
|
| 416 |
+
fig, ax = plt.subplots(figsize=(14, 7))
|
| 417 |
+
|
| 418 |
+
ax.plot(season_data['season'], season_data['avg_pts'],
|
| 419 |
+
color=COLORS['primary'], linewidth=3, marker='o', markersize=8)
|
| 420 |
+
ax.fill_between(season_data['season'], season_data['avg_pts'], alpha=0.3, color=COLORS['primary'])
|
| 421 |
+
|
| 422 |
+
ax.set_xlabel('Season', fontsize=12)
|
| 423 |
+
ax.set_ylabel('Average Points Per Game', fontsize=12)
|
| 424 |
+
ax.set_title('NBA Scoring Trends Over Time', fontsize=16, fontweight='bold')
|
| 425 |
+
ax.tick_params(axis='x', rotation=45)
|
| 426 |
+
ax.grid(True, alpha=0.2)
|
| 427 |
+
|
| 428 |
+
return save_figure(fig, "scoring_trends")
|
| 429 |
+
|
| 430 |
+
def plot_three_point_revolution(self, season_data: pd.DataFrame) -> Path:
|
| 431 |
+
"""
|
| 432 |
+
Dual-axis chart showing 3PA and 3P% trends.
|
| 433 |
+
"""
|
| 434 |
+
fig, ax1 = plt.subplots(figsize=(14, 7))
|
| 435 |
+
ax2 = ax1.twinx()
|
| 436 |
+
|
| 437 |
+
ax1.bar(season_data['season'], season_data['avg_3pa'],
|
| 438 |
+
color=COLORS['secondary'], alpha=0.7, label='3-Point Attempts')
|
| 439 |
+
ax2.plot(season_data['season'], season_data['avg_3pct'],
|
| 440 |
+
color=COLORS['primary'], linewidth=3, marker='o', label='3-Point %')
|
| 441 |
+
|
| 442 |
+
ax1.set_xlabel('Season', fontsize=12)
|
| 443 |
+
ax1.set_ylabel('3-Point Attempts', fontsize=12, color=COLORS['secondary'])
|
| 444 |
+
ax2.set_ylabel('3-Point Percentage', fontsize=12, color=COLORS['primary'])
|
| 445 |
+
ax1.set_title('The 3-Point Revolution', fontsize=16, fontweight='bold')
|
| 446 |
+
ax1.tick_params(axis='x', rotation=45)
|
| 447 |
+
|
| 448 |
+
lines1, labels1 = ax1.get_legend_handles_labels()
|
| 449 |
+
lines2, labels2 = ax2.get_legend_handles_labels()
|
| 450 |
+
ax1.legend(lines1 + lines2, labels1 + labels2, loc='upper left')
|
| 451 |
+
|
| 452 |
+
return save_figure(fig, "three_point_revolution")
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
# =============================================================================
|
| 456 |
+
# MASTER VISUALIZER
|
| 457 |
+
# =============================================================================
|
| 458 |
+
class NBAVisualizer:
|
| 459 |
+
"""Master class combining all visualization capabilities."""
|
| 460 |
+
|
| 461 |
+
def __init__(self):
|
| 462 |
+
self.team = TeamVisualizer()
|
| 463 |
+
self.game = GameVisualizer()
|
| 464 |
+
self.mvp = MVPVisualizer()
|
| 465 |
+
self.championship = ChampionshipVisualizer()
|
| 466 |
+
self.season = SeasonVisualizer()
|
| 467 |
+
|
| 468 |
+
def generate_all_visualizations(self, data: Dict[str, pd.DataFrame]) -> List[Path]:
|
| 469 |
+
"""
|
| 470 |
+
Generate all available visualizations from provided data.
|
| 471 |
+
|
| 472 |
+
Args:
|
| 473 |
+
data: Dict with keys like 'standings', 'mvp', 'championship', etc.
|
| 474 |
+
|
| 475 |
+
Returns:
|
| 476 |
+
List of paths to saved graphs
|
| 477 |
+
"""
|
| 478 |
+
saved_paths = []
|
| 479 |
+
|
| 480 |
+
if 'standings' in data:
|
| 481 |
+
saved_paths.append(self.team.plot_standings(data['standings']))
|
| 482 |
+
|
| 483 |
+
if 'mvp' in data:
|
| 484 |
+
saved_paths.append(self.mvp.plot_mvp_race(data['mvp']))
|
| 485 |
+
saved_paths.append(self.mvp.plot_stat_comparison(data['mvp']))
|
| 486 |
+
|
| 487 |
+
if 'championship' in data:
|
| 488 |
+
saved_paths.append(self.championship.plot_championship_odds(data['championship']))
|
| 489 |
+
saved_paths.append(self.championship.plot_strength_vs_experience(data['championship']))
|
| 490 |
+
|
| 491 |
+
if 'predictions' in data:
|
| 492 |
+
saved_paths.append(self.game.plot_calibration(data['predictions']))
|
| 493 |
+
saved_paths.append(self.game.plot_accuracy_by_confidence(data['predictions']))
|
| 494 |
+
|
| 495 |
+
logger.info(f"Generated {len(saved_paths)} visualizations")
|
| 496 |
+
return saved_paths
|
| 497 |
+
|
| 498 |
+
|
| 499 |
+
# =============================================================================
|
| 500 |
+
# CLI INTERFACE
|
| 501 |
+
# =============================================================================
|
| 502 |
+
if __name__ == "__main__":
|
| 503 |
+
print(f"Generating sample visualizations to {GRAPHS_DIR}...")
|
| 504 |
+
|
| 505 |
+
# Create sample data for testing
|
| 506 |
+
sample_mvp = pd.DataFrame({
|
| 507 |
+
'PLAYER_NAME': ['Shai Gilgeous-Alexander', 'Nikola Jokic', 'Jayson Tatum',
|
| 508 |
+
'Luka Doncic', 'Giannis Antetokounmpo'],
|
| 509 |
+
'PTS': [31.5, 26.8, 27.2, 28.5, 30.5],
|
| 510 |
+
'REB': [5.5, 12.5, 8.2, 8.8, 11.5],
|
| 511 |
+
'AST': [6.0, 9.2, 4.8, 8.2, 6.5],
|
| 512 |
+
'mvp_score': [85.2, 82.1, 78.5, 77.2, 76.8],
|
| 513 |
+
'mvp_similarity': [0.92, 0.95, 0.85, 0.88, 0.90]
|
| 514 |
+
})
|
| 515 |
+
|
| 516 |
+
sample_champ = pd.DataFrame({
|
| 517 |
+
'TEAM_ABBREVIATION': ['OKC', 'CLE', 'BOS', 'DEN', 'MEM', 'HOU', 'NYK', 'GSW'],
|
| 518 |
+
'W_PCT': [0.70, 0.68, 0.65, 0.62, 0.60, 0.58, 0.55, 0.52],
|
| 519 |
+
'playoff_experience': [0.3, 0.5, 0.8, 0.9, 0.4, 0.2, 0.5, 0.95],
|
| 520 |
+
'strength_rating': [45, 42, 40, 38, 35, 33, 30, 28],
|
| 521 |
+
'champ_probability': [0.18, 0.15, 0.14, 0.12, 0.10, 0.09, 0.08, 0.07]
|
| 522 |
+
})
|
| 523 |
+
|
| 524 |
+
viz = NBAVisualizer()
|
| 525 |
+
|
| 526 |
+
# Generate sample visualizations
|
| 527 |
+
print("Creating MVP race chart...")
|
| 528 |
+
viz.mvp.plot_mvp_race(sample_mvp)
|
| 529 |
+
|
| 530 |
+
print("Creating MVP stat comparison...")
|
| 531 |
+
viz.mvp.plot_stat_comparison(sample_mvp)
|
| 532 |
+
|
| 533 |
+
print("Creating championship odds pie chart...")
|
| 534 |
+
viz.championship.plot_championship_odds(sample_champ)
|
| 535 |
+
|
| 536 |
+
print("Creating strength vs experience chart...")
|
| 537 |
+
viz.championship.plot_strength_vs_experience(sample_champ)
|
| 538 |
+
|
| 539 |
+
print(f"\n✅ Visualizations saved to: {GRAPHS_DIR}")
|
web/.gitignore
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Logs
|
| 2 |
+
logs
|
| 3 |
+
*.log
|
| 4 |
+
npm-debug.log*
|
| 5 |
+
yarn-debug.log*
|
| 6 |
+
yarn-error.log*
|
| 7 |
+
pnpm-debug.log*
|
| 8 |
+
lerna-debug.log*
|
| 9 |
+
|
| 10 |
+
node_modules
|
| 11 |
+
dist
|
| 12 |
+
dist-ssr
|
| 13 |
+
*.local
|
| 14 |
+
|
| 15 |
+
# Editor directories and files
|
| 16 |
+
.vscode/*
|
| 17 |
+
!.vscode/extensions.json
|
| 18 |
+
.idea
|
| 19 |
+
.DS_Store
|
| 20 |
+
*.suo
|
| 21 |
+
*.ntvs*
|
| 22 |
+
*.njsproj
|
| 23 |
+
*.sln
|
| 24 |
+
*.sw?
|
web/README.md
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# React + Vite
|
| 2 |
+
|
| 3 |
+
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
|
| 4 |
+
|
| 5 |
+
Currently, two official plugins are available:
|
| 6 |
+
|
| 7 |
+
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Babel](https://babeljs.io/) (or [oxc](https://oxc.rs) when used in [rolldown-vite](https://vite.dev/guide/rolldown)) for Fast Refresh
|
| 8 |
+
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
|
| 9 |
+
|
| 10 |
+
## React Compiler
|
| 11 |
+
|
| 12 |
+
The React Compiler is not enabled on this template because of its impact on dev & build performances. To add it, see [this documentation](https://react.dev/learn/react-compiler/installation).
|
| 13 |
+
|
| 14 |
+
## Expanding the ESLint configuration
|
| 15 |
+
|
| 16 |
+
If you are developing a production application, we recommend using TypeScript with type-aware lint rules enabled. Check out the [TS template](https://github.com/vitejs/vite/tree/main/packages/create-vite/template-react-ts) for information on how to integrate TypeScript and [`typescript-eslint`](https://typescript-eslint.io) in your project.
|
web/eslint.config.js
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import js from '@eslint/js'
|
| 2 |
+
import globals from 'globals'
|
| 3 |
+
import reactHooks from 'eslint-plugin-react-hooks'
|
| 4 |
+
import reactRefresh from 'eslint-plugin-react-refresh'
|
| 5 |
+
import { defineConfig, globalIgnores } from 'eslint/config'
|
| 6 |
+
|
| 7 |
+
export default defineConfig([
|
| 8 |
+
globalIgnores(['dist']),
|
| 9 |
+
{
|
| 10 |
+
files: ['**/*.{js,jsx}'],
|
| 11 |
+
extends: [
|
| 12 |
+
js.configs.recommended,
|
| 13 |
+
reactHooks.configs.flat.recommended,
|
| 14 |
+
reactRefresh.configs.vite,
|
| 15 |
+
],
|
| 16 |
+
languageOptions: {
|
| 17 |
+
ecmaVersion: 2020,
|
| 18 |
+
globals: globals.browser,
|
| 19 |
+
parserOptions: {
|
| 20 |
+
ecmaVersion: 'latest',
|
| 21 |
+
ecmaFeatures: { jsx: true },
|
| 22 |
+
sourceType: 'module',
|
| 23 |
+
},
|
| 24 |
+
},
|
| 25 |
+
rules: {
|
| 26 |
+
'no-unused-vars': ['error', { varsIgnorePattern: '^[A-Z_]' }],
|
| 27 |
+
},
|
| 28 |
+
},
|
| 29 |
+
])
|
web/index.html
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!doctype html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8" />
|
| 5 |
+
<link rel="icon" href="data:image/svg+xml,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 100 100'><text y='.9em' font-size='90'>🏀</text></svg>" />
|
| 6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
| 7 |
+
<title>NBA ML Predictor</title>
|
| 8 |
+
<link rel="preconnect" href="https://fonts.googleapis.com">
|
| 9 |
+
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
| 10 |
+
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700;800&display=swap" rel="stylesheet">
|
| 11 |
+
</head>
|
| 12 |
+
<body>
|
| 13 |
+
<div id="root"></div>
|
| 14 |
+
<script type="module" src="/src/main.jsx"></script>
|
| 15 |
+
</body>
|
| 16 |
+
</html>
|
web/package.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "web",
|
| 3 |
+
"private": true,
|
| 4 |
+
"version": "0.0.0",
|
| 5 |
+
"type": "module",
|
| 6 |
+
"scripts": {
|
| 7 |
+
"dev": "vite",
|
| 8 |
+
"build": "vite build",
|
| 9 |
+
"lint": "eslint .",
|
| 10 |
+
"preview": "vite preview"
|
| 11 |
+
},
|
| 12 |
+
"dependencies": {
|
| 13 |
+
"react": "^19.2.0",
|
| 14 |
+
"react-dom": "^19.2.0"
|
| 15 |
+
},
|
| 16 |
+
"devDependencies": {
|
| 17 |
+
"@eslint/js": "^9.39.1",
|
| 18 |
+
"@types/react": "^19.2.5",
|
| 19 |
+
"@types/react-dom": "^19.2.3",
|
| 20 |
+
"@vitejs/plugin-react": "^5.1.1",
|
| 21 |
+
"eslint": "^9.39.1",
|
| 22 |
+
"eslint-plugin-react-hooks": "^7.0.1",
|
| 23 |
+
"eslint-plugin-react-refresh": "^0.4.24",
|
| 24 |
+
"globals": "^16.5.0",
|
| 25 |
+
"vite": "^7.2.4"
|
| 26 |
+
}
|
| 27 |
+
}
|
web/public/vite.svg
ADDED
|
|
web/src/App.css
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#root {
|
| 2 |
+
max-width: 1280px;
|
| 3 |
+
margin: 0 auto;
|
| 4 |
+
padding: 2rem;
|
| 5 |
+
text-align: center;
|
| 6 |
+
}
|
| 7 |
+
|
| 8 |
+
.logo {
|
| 9 |
+
height: 6em;
|
| 10 |
+
padding: 1.5em;
|
| 11 |
+
will-change: filter;
|
| 12 |
+
transition: filter 300ms;
|
| 13 |
+
}
|
| 14 |
+
.logo:hover {
|
| 15 |
+
filter: drop-shadow(0 0 2em #646cffaa);
|
| 16 |
+
}
|
| 17 |
+
.logo.react:hover {
|
| 18 |
+
filter: drop-shadow(0 0 2em #61dafbaa);
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
@keyframes logo-spin {
|
| 22 |
+
from {
|
| 23 |
+
transform: rotate(0deg);
|
| 24 |
+
}
|
| 25 |
+
to {
|
| 26 |
+
transform: rotate(360deg);
|
| 27 |
+
}
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
@media (prefers-reduced-motion: no-preference) {
|
| 31 |
+
a:nth-of-type(2) .logo {
|
| 32 |
+
animation: logo-spin infinite 20s linear;
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
.card {
|
| 37 |
+
padding: 2em;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
.read-the-docs {
|
| 41 |
+
color: #888;
|
| 42 |
+
}
|
web/src/App.jsx
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { useState, useEffect } from 'react'
|
| 2 |
+
import './index.css'
|
| 3 |
+
|
| 4 |
+
// Icons
|
| 5 |
+
import {
|
| 6 |
+
IconLive, IconTarget, IconChart, IconTrophy, IconCrown,
|
| 7 |
+
IconVs, IconRefresh
|
| 8 |
+
} from './icons'
|
| 9 |
+
|
| 10 |
+
// Pages
|
| 11 |
+
import LiveGames from './pages/LiveGames'
|
| 12 |
+
import Predictions from './pages/Predictions'
|
| 13 |
+
import Accuracy from './pages/Accuracy'
|
| 14 |
+
import MvpRace from './pages/MvpRace'
|
| 15 |
+
import Championship from './pages/Championship'
|
| 16 |
+
import HeadToHead from './pages/HeadToHead'
|
| 17 |
+
|
| 18 |
+
// Chevron icon for collapse toggle
|
| 19 |
+
function IconChevron({ className = '', direction = 'left' }) {
|
| 20 |
+
return (
|
| 21 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 22 |
+
{direction === 'left' ? (
|
| 23 |
+
<polyline points="15 18 9 12 15 6"></polyline>
|
| 24 |
+
) : (
|
| 25 |
+
<polyline points="9 18 15 12 9 6"></polyline>
|
| 26 |
+
)}
|
| 27 |
+
</svg>
|
| 28 |
+
)
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
function App() {
|
| 32 |
+
const [activePage, setActivePage] = useState('live')
|
| 33 |
+
const [isRefreshing, setIsRefreshing] = useState(false)
|
| 34 |
+
const [sidebarCollapsed, setSidebarCollapsed] = useState(false)
|
| 35 |
+
const [sidebarHovered, setSidebarHovered] = useState(false)
|
| 36 |
+
|
| 37 |
+
const handleRefresh = () => {
|
| 38 |
+
setIsRefreshing(true)
|
| 39 |
+
window.location.reload()
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
const navSections = [
|
| 43 |
+
{
|
| 44 |
+
title: 'Games',
|
| 45 |
+
items: [
|
| 46 |
+
{ id: 'live', name: 'Live Games', icon: IconLive },
|
| 47 |
+
{ id: 'predictions', name: 'Predictions', icon: IconTarget },
|
| 48 |
+
]
|
| 49 |
+
},
|
| 50 |
+
{
|
| 51 |
+
title: 'Analysis',
|
| 52 |
+
items: [
|
| 53 |
+
{ id: 'h2h', name: 'Head to Head', icon: IconVs },
|
| 54 |
+
{ id: 'accuracy', name: 'Model Accuracy', icon: IconChart },
|
| 55 |
+
]
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
title: 'Rankings',
|
| 59 |
+
items: [
|
| 60 |
+
{ id: 'mvp', name: 'MVP Race', icon: IconTrophy },
|
| 61 |
+
{ id: 'championship', name: 'Championship', icon: IconCrown },
|
| 62 |
+
]
|
| 63 |
+
}
|
| 64 |
+
]
|
| 65 |
+
|
| 66 |
+
const renderPage = () => {
|
| 67 |
+
switch (activePage) {
|
| 68 |
+
case 'live': return <LiveGames />
|
| 69 |
+
case 'predictions': return <Predictions />
|
| 70 |
+
case 'accuracy': return <Accuracy />
|
| 71 |
+
case 'mvp': return <MvpRace />
|
| 72 |
+
case 'championship': return <Championship />
|
| 73 |
+
case 'h2h': return <HeadToHead />
|
| 74 |
+
default: return <LiveGames />
|
| 75 |
+
}
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
// Determine if sidebar should be expanded (not collapsed OR being hovered)
|
| 79 |
+
const isExpanded = !sidebarCollapsed || sidebarHovered
|
| 80 |
+
|
| 81 |
+
return (
|
| 82 |
+
<div className={`app-layout ${sidebarCollapsed ? 'sidebar-collapsed' : ''}`}>
|
| 83 |
+
{/* Sidebar */}
|
| 84 |
+
<aside
|
| 85 |
+
className={`sidebar ${sidebarCollapsed ? 'collapsed' : ''} ${sidebarHovered ? 'hovered' : ''}`}
|
| 86 |
+
onMouseEnter={() => sidebarCollapsed && setSidebarHovered(true)}
|
| 87 |
+
onMouseLeave={() => setSidebarHovered(false)}
|
| 88 |
+
>
|
| 89 |
+
<div className="sidebar-header">
|
| 90 |
+
<div className="sidebar-logo">
|
| 91 |
+
<div className="sidebar-logo-icon">
|
| 92 |
+
<img
|
| 93 |
+
src="https://cdn.nba.com/logos/leagues/logo-nba.svg"
|
| 94 |
+
alt="NBA"
|
| 95 |
+
style={{ width: '32px', height: '32px' }}
|
| 96 |
+
/>
|
| 97 |
+
</div>
|
| 98 |
+
<span className="sidebar-logo-text">NBA sage</span>
|
| 99 |
+
</div>
|
| 100 |
+
<button
|
| 101 |
+
className="sidebar-toggle"
|
| 102 |
+
onClick={() => {
|
| 103 |
+
setSidebarCollapsed(!sidebarCollapsed)
|
| 104 |
+
setSidebarHovered(false)
|
| 105 |
+
}}
|
| 106 |
+
title={sidebarCollapsed ? 'Expand sidebar' : 'Collapse sidebar'}
|
| 107 |
+
>
|
| 108 |
+
<IconChevron direction={sidebarCollapsed ? 'right' : 'left'} />
|
| 109 |
+
</button>
|
| 110 |
+
</div>
|
| 111 |
+
|
| 112 |
+
<nav className="sidebar-nav">
|
| 113 |
+
{navSections.map((section) => (
|
| 114 |
+
<div key={section.title} className="nav-section">
|
| 115 |
+
<div className="nav-section-title">{section.title}</div>
|
| 116 |
+
{section.items.map((item) => (
|
| 117 |
+
<div
|
| 118 |
+
key={item.id}
|
| 119 |
+
className={`nav-item ${activePage === item.id ? 'active' : ''}`}
|
| 120 |
+
onClick={() => setActivePage(item.id)}
|
| 121 |
+
title={sidebarCollapsed && !sidebarHovered ? item.name : ''}
|
| 122 |
+
>
|
| 123 |
+
<item.icon className="nav-icon" />
|
| 124 |
+
<span className="nav-text">{item.name}</span>
|
| 125 |
+
</div>
|
| 126 |
+
))}
|
| 127 |
+
</div>
|
| 128 |
+
))}
|
| 129 |
+
</nav>
|
| 130 |
+
|
| 131 |
+
<div className="sidebar-footer">
|
| 132 |
+
<button
|
| 133 |
+
className="btn btn-ghost btn-block"
|
| 134 |
+
onClick={handleRefresh}
|
| 135 |
+
disabled={isRefreshing}
|
| 136 |
+
title={sidebarCollapsed && !sidebarHovered ? 'Refresh Data' : ''}
|
| 137 |
+
>
|
| 138 |
+
<IconRefresh className={`nav-icon ${isRefreshing ? 'spinning' : ''}`} />
|
| 139 |
+
<span className="nav-text">Refresh Data</span>
|
| 140 |
+
</button>
|
| 141 |
+
<div className="sidebar-version">
|
| 142 |
+
NBA sage
|
| 143 |
+
</div>
|
| 144 |
+
</div>
|
| 145 |
+
</aside>
|
| 146 |
+
|
| 147 |
+
{/* Main Content */}
|
| 148 |
+
<main className="main-content">
|
| 149 |
+
{renderPage()}
|
| 150 |
+
</main>
|
| 151 |
+
</div>
|
| 152 |
+
)
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
export default App
|
| 156 |
+
|
web/src/api.js
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* API utilities for NBA ML Predictor
|
| 3 |
+
*/
|
| 4 |
+
|
| 5 |
+
// Use relative URL in production, localhost in development
|
| 6 |
+
const API_BASE = import.meta.env.DEV ? 'http://localhost:8000/api' : '/api';
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
/**
|
| 10 |
+
* Generic fetch wrapper with error handling
|
| 11 |
+
*/
|
| 12 |
+
async function fetchAPI(endpoint, options = {}) {
|
| 13 |
+
try {
|
| 14 |
+
const response = await fetch(`${API_BASE}${endpoint}`, {
|
| 15 |
+
...options,
|
| 16 |
+
headers: {
|
| 17 |
+
'Content-Type': 'application/json',
|
| 18 |
+
...options.headers,
|
| 19 |
+
},
|
| 20 |
+
});
|
| 21 |
+
|
| 22 |
+
if (!response.ok) {
|
| 23 |
+
throw new Error(`API Error: ${response.status}`);
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
return await response.json();
|
| 27 |
+
} catch (error) {
|
| 28 |
+
console.error('API Error:', error);
|
| 29 |
+
throw error;
|
| 30 |
+
}
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
/**
|
| 34 |
+
* Get live games with predictions
|
| 35 |
+
*/
|
| 36 |
+
export async function getLiveGames() {
|
| 37 |
+
return fetchAPI('/games/live');
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
/**
|
| 41 |
+
* Get upcoming games
|
| 42 |
+
*/
|
| 43 |
+
export async function getUpcomingGames(days = 7) {
|
| 44 |
+
return fetchAPI(`/games/upcoming?days=${days}`);
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
/**
|
| 48 |
+
* Get prediction for a single game
|
| 49 |
+
*/
|
| 50 |
+
export async function predictGame(home, away) {
|
| 51 |
+
return fetchAPI(`/predict?home=${home}&away=${away}`);
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* Get model accuracy stats
|
| 56 |
+
*/
|
| 57 |
+
export async function getAccuracy() {
|
| 58 |
+
return fetchAPI('/accuracy');
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
/**
|
| 62 |
+
* Get MVP race standings
|
| 63 |
+
*/
|
| 64 |
+
export async function getMvpRace() {
|
| 65 |
+
return fetchAPI('/mvp');
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
/**
|
| 69 |
+
* Get championship odds
|
| 70 |
+
*/
|
| 71 |
+
export async function getChampionshipOdds() {
|
| 72 |
+
return fetchAPI('/championship');
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
/**
|
| 76 |
+
* Get list of all teams
|
| 77 |
+
*/
|
| 78 |
+
export async function getTeams() {
|
| 79 |
+
return fetchAPI('/teams');
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
/**
|
| 83 |
+
* Health check
|
| 84 |
+
*/
|
| 85 |
+
export async function healthCheck() {
|
| 86 |
+
return fetchAPI('/health');
|
| 87 |
+
}
|
web/src/assets/react.svg
ADDED
|
|
web/src/icons.jsx
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* SVG Icons for the application
|
| 3 |
+
* Professional, minimal icons
|
| 4 |
+
*/
|
| 5 |
+
|
| 6 |
+
export function IconLive({ className = '' }) {
|
| 7 |
+
return (
|
| 8 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 9 |
+
<circle cx="12" cy="12" r="3" fill="currentColor" />
|
| 10 |
+
<path d="M16.24 7.76a6 6 0 0 1 0 8.49m-8.48-.01a6 6 0 0 1 0-8.49m11.31-2.82a10 10 0 0 1 0 14.14m-14.14 0a10 10 0 0 1 0-14.14" />
|
| 11 |
+
</svg>
|
| 12 |
+
);
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
export function IconTarget({ className = '' }) {
|
| 16 |
+
return (
|
| 17 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 18 |
+
<circle cx="12" cy="12" r="10" />
|
| 19 |
+
<circle cx="12" cy="12" r="6" />
|
| 20 |
+
<circle cx="12" cy="12" r="2" fill="currentColor" />
|
| 21 |
+
</svg>
|
| 22 |
+
);
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
export function IconChart({ className = '' }) {
|
| 26 |
+
return (
|
| 27 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 28 |
+
<path d="M3 3v18h18" />
|
| 29 |
+
<path d="M18 9l-5 5-4-4-3 3" />
|
| 30 |
+
</svg>
|
| 31 |
+
);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
export function IconTrophy({ className = '' }) {
|
| 35 |
+
return (
|
| 36 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 37 |
+
<path d="M6 9H4.5a2.5 2.5 0 0 1 0-5H6" />
|
| 38 |
+
<path d="M18 9h1.5a2.5 2.5 0 0 0 0-5H18" />
|
| 39 |
+
<path d="M4 22h16" />
|
| 40 |
+
<path d="M10 14.66V17c0 .55-.47.98-.97 1.21C7.85 18.75 7 20.24 7 22" />
|
| 41 |
+
<path d="M14 14.66V17c0 .55.47.98.97 1.21C16.15 18.75 17 20.24 17 22" />
|
| 42 |
+
<path d="M18 2H6v7a6 6 0 0 0 12 0V2Z" />
|
| 43 |
+
</svg>
|
| 44 |
+
);
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
export function IconCrown({ className = '' }) {
|
| 48 |
+
return (
|
| 49 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 50 |
+
<path d="M2 4l3 12h14l3-12-6 7-4-7-4 7-6-7z" />
|
| 51 |
+
<path d="M5 20h14" />
|
| 52 |
+
</svg>
|
| 53 |
+
);
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
export function IconUsers({ className = '' }) {
|
| 57 |
+
return (
|
| 58 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 59 |
+
<path d="M17 21v-2a4 4 0 0 0-4-4H5a4 4 0 0 0-4 4v2" />
|
| 60 |
+
<circle cx="9" cy="7" r="4" />
|
| 61 |
+
<path d="M23 21v-2a4 4 0 0 0-3-3.87" />
|
| 62 |
+
<path d="M16 3.13a4 4 0 0 1 0 7.75" />
|
| 63 |
+
</svg>
|
| 64 |
+
);
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
export function IconPerson({ className = '' }) {
|
| 68 |
+
return (
|
| 69 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 70 |
+
<circle cx="12" cy="8" r="4" />
|
| 71 |
+
<path d="M20 21v-2a4 4 0 0 0-4-4H8a4 4 0 0 0-4 4v2" />
|
| 72 |
+
</svg>
|
| 73 |
+
);
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
export function IconVs({ className = '' }) {
|
| 77 |
+
return (
|
| 78 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 79 |
+
<path d="M6 3l6 9-6 9M18 3l-6 9 6 9" />
|
| 80 |
+
</svg>
|
| 81 |
+
);
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
export function IconStandings({ className = '' }) {
|
| 85 |
+
return (
|
| 86 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 87 |
+
<rect x="3" y="3" width="18" height="18" rx="2" />
|
| 88 |
+
<path d="M3 9h18" />
|
| 89 |
+
<path d="M9 3v18" />
|
| 90 |
+
</svg>
|
| 91 |
+
);
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
export function IconRefresh({ className = '' }) {
|
| 95 |
+
return (
|
| 96 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 97 |
+
<path d="M21 12a9 9 0 1 1-9-9c2.52 0 4.93 1 6.74 2.74L21 8" />
|
| 98 |
+
<path d="M21 3v5h-5" />
|
| 99 |
+
</svg>
|
| 100 |
+
);
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
export function IconSearch({ className = '' }) {
|
| 104 |
+
return (
|
| 105 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 106 |
+
<circle cx="11" cy="11" r="8" />
|
| 107 |
+
<path d="M21 21l-4.35-4.35" />
|
| 108 |
+
</svg>
|
| 109 |
+
);
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
export function IconCheck({ className = '' }) {
|
| 113 |
+
return (
|
| 114 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 115 |
+
<path d="M20 6L9 17l-5-5" />
|
| 116 |
+
</svg>
|
| 117 |
+
);
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
export function IconX({ className = '' }) {
|
| 121 |
+
return (
|
| 122 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 123 |
+
<path d="M18 6L6 18M6 6l12 12" />
|
| 124 |
+
</svg>
|
| 125 |
+
);
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
export function IconBasketball({ className = '' }) {
|
| 129 |
+
return (
|
| 130 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 131 |
+
<circle cx="12" cy="12" r="10" />
|
| 132 |
+
<path d="M2 12h20" />
|
| 133 |
+
<path d="M12 2a10 10 0 0 1 0 20" />
|
| 134 |
+
<path d="M12 2a10 10 0 0 0 0 20" />
|
| 135 |
+
<path d="M12 2v20" />
|
| 136 |
+
</svg>
|
| 137 |
+
);
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
export function IconCalendar({ className = '' }) {
|
| 141 |
+
return (
|
| 142 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 143 |
+
<rect x="3" y="4" width="18" height="18" rx="2" />
|
| 144 |
+
<path d="M16 2v4M8 2v4M3 10h18" />
|
| 145 |
+
</svg>
|
| 146 |
+
);
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
export function IconClock({ className = '' }) {
|
| 150 |
+
return (
|
| 151 |
+
<svg className={className} viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
| 152 |
+
<circle cx="12" cy="12" r="10" />
|
| 153 |
+
<path d="M12 6v6l4 2" />
|
| 154 |
+
</svg>
|
| 155 |
+
);
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
export default {
|
| 159 |
+
IconLive, IconTarget, IconChart, IconTrophy, IconCrown, IconUsers,
|
| 160 |
+
IconPerson, IconVs, IconStandings, IconRefresh, IconSearch, IconCheck,
|
| 161 |
+
IconX, IconBasketball, IconCalendar, IconClock
|
| 162 |
+
};
|
web/src/index.css
ADDED
|
@@ -0,0 +1,1378 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* ===========================================================================
|
| 2 |
+
NBA ML Predictor - Professional Design System
|
| 3 |
+
Sleek, Modern, Sports-Premium Aesthetic
|
| 4 |
+
=========================================================================== */
|
| 5 |
+
|
| 6 |
+
/* CSS Reset & Box Sizing */
|
| 7 |
+
*,
|
| 8 |
+
*::before,
|
| 9 |
+
*::after {
|
| 10 |
+
box-sizing: border-box;
|
| 11 |
+
margin: 0;
|
| 12 |
+
padding: 0;
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
/* =========================================================================
|
| 16 |
+
Design Tokens
|
| 17 |
+
========================================================================= */
|
| 18 |
+
:root {
|
| 19 |
+
/* Colors - Deep Dark Theme */
|
| 20 |
+
--bg-primary: #000000;
|
| 21 |
+
--bg-secondary: #0d0d0d;
|
| 22 |
+
--bg-tertiary: #141414;
|
| 23 |
+
--bg-card: #0a0a0a;
|
| 24 |
+
--bg-card-hover: #111111;
|
| 25 |
+
--bg-elevated: #1a1a1a;
|
| 26 |
+
|
| 27 |
+
/* Text Hierarchy */
|
| 28 |
+
--text-primary: #ffffff;
|
| 29 |
+
--text-secondary: #b3b3b3;
|
| 30 |
+
--text-muted: #666666;
|
| 31 |
+
--text-dim: #444444;
|
| 32 |
+
|
| 33 |
+
/* Accent Colors - Sophisticated */
|
| 34 |
+
--accent-primary: #00c8ff;
|
| 35 |
+
--accent-secondary: #ff6b35;
|
| 36 |
+
--accent-success: #00d26a;
|
| 37 |
+
--accent-warning: #ffb800;
|
| 38 |
+
--accent-danger: #ff3b3b;
|
| 39 |
+
|
| 40 |
+
/* Subtle Gradients */
|
| 41 |
+
--gradient-primary: linear-gradient(135deg, #00c8ff 0%, #0099cc 100%);
|
| 42 |
+
--gradient-secondary: linear-gradient(135deg, #ff6b35 0%, #cc5529 100%);
|
| 43 |
+
--gradient-subtle: linear-gradient(180deg, rgba(255, 255, 255, 0.03) 0%, rgba(255, 255, 255, 0) 100%);
|
| 44 |
+
|
| 45 |
+
/* Borders & Surfaces */
|
| 46 |
+
--border-subtle: 1px solid rgba(255, 255, 255, 0.06);
|
| 47 |
+
--border-medium: 1px solid rgba(255, 255, 255, 0.1);
|
| 48 |
+
--border-focus: 1px solid var(--accent-primary);
|
| 49 |
+
|
| 50 |
+
/* Shadows */
|
| 51 |
+
--shadow-sm: 0 2px 8px rgba(0, 0, 0, 0.3);
|
| 52 |
+
--shadow-md: 0 4px 16px rgba(0, 0, 0, 0.4);
|
| 53 |
+
--shadow-lg: 0 8px 32px rgba(0, 0, 0, 0.5);
|
| 54 |
+
--shadow-glow: 0 0 20px rgba(0, 200, 255, 0.15);
|
| 55 |
+
|
| 56 |
+
/* Border Radius */
|
| 57 |
+
--radius-xs: 4px;
|
| 58 |
+
--radius-sm: 6px;
|
| 59 |
+
--radius-md: 8px;
|
| 60 |
+
--radius-lg: 12px;
|
| 61 |
+
--radius-xl: 16px;
|
| 62 |
+
|
| 63 |
+
/* Spacing Scale */
|
| 64 |
+
--space-1: 4px;
|
| 65 |
+
--space-2: 8px;
|
| 66 |
+
--space-3: 12px;
|
| 67 |
+
--space-4: 16px;
|
| 68 |
+
--space-5: 20px;
|
| 69 |
+
--space-6: 24px;
|
| 70 |
+
--space-8: 32px;
|
| 71 |
+
--space-10: 40px;
|
| 72 |
+
--space-12: 48px;
|
| 73 |
+
|
| 74 |
+
/* Typography */
|
| 75 |
+
--font-sans: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
|
| 76 |
+
--font-mono: 'SF Mono', 'Monaco', 'Inconsolata', monospace;
|
| 77 |
+
|
| 78 |
+
/* Transitions */
|
| 79 |
+
--transition-fast: 100ms ease;
|
| 80 |
+
--transition-base: 200ms ease;
|
| 81 |
+
--transition-slow: 300ms ease;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
/* =========================================================================
|
| 85 |
+
Base Styles
|
| 86 |
+
========================================================================= */
|
| 87 |
+
html {
|
| 88 |
+
font-size: 14px;
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
body {
|
| 92 |
+
font-family: var(--font-sans);
|
| 93 |
+
background: var(--bg-primary);
|
| 94 |
+
color: var(--text-primary);
|
| 95 |
+
line-height: 1.5;
|
| 96 |
+
min-height: 100vh;
|
| 97 |
+
overflow-x: hidden;
|
| 98 |
+
-webkit-font-smoothing: antialiased;
|
| 99 |
+
-moz-osx-font-smoothing: grayscale;
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
#root {
|
| 103 |
+
min-height: 100vh;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
/* =========================================================================
|
| 107 |
+
Typography
|
| 108 |
+
========================================================================= */
|
| 109 |
+
h1,
|
| 110 |
+
h2,
|
| 111 |
+
h3,
|
| 112 |
+
h4,
|
| 113 |
+
h5,
|
| 114 |
+
h6 {
|
| 115 |
+
font-weight: 600;
|
| 116 |
+
line-height: 1.25;
|
| 117 |
+
letter-spacing: -0.025em;
|
| 118 |
+
color: var(--text-primary);
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
h1 {
|
| 122 |
+
font-size: 2rem;
|
| 123 |
+
font-weight: 700;
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
h2 {
|
| 127 |
+
font-size: 1.5rem;
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
h3 {
|
| 131 |
+
font-size: 1.125rem;
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
h4 {
|
| 135 |
+
font-size: 1rem;
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
.text-gradient {
|
| 139 |
+
background: var(--gradient-primary);
|
| 140 |
+
-webkit-background-clip: text;
|
| 141 |
+
-webkit-text-fill-color: transparent;
|
| 142 |
+
background-clip: text;
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
.text-muted {
|
| 146 |
+
color: var(--text-muted);
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
.text-secondary {
|
| 150 |
+
color: var(--text-secondary);
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
/* =========================================================================
|
| 154 |
+
Layout
|
| 155 |
+
========================================================================= */
|
| 156 |
+
.app-layout {
|
| 157 |
+
display: flex;
|
| 158 |
+
min-height: 100vh;
|
| 159 |
+
transition: all var(--transition-slow);
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
.app-layout.sidebar-collapsed .main-content {
|
| 163 |
+
margin-left: 72px;
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
/* Sidebar - Collapsible with Hover Expand */
|
| 167 |
+
.sidebar {
|
| 168 |
+
width: 240px;
|
| 169 |
+
background: var(--bg-secondary);
|
| 170 |
+
border-right: var(--border-subtle);
|
| 171 |
+
position: fixed;
|
| 172 |
+
top: 0;
|
| 173 |
+
left: 0;
|
| 174 |
+
height: 100vh;
|
| 175 |
+
z-index: 100;
|
| 176 |
+
display: flex;
|
| 177 |
+
flex-direction: column;
|
| 178 |
+
padding: var(--space-5);
|
| 179 |
+
transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
|
| 180 |
+
overflow: hidden;
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
/* Collapsed Sidebar */
|
| 184 |
+
.sidebar.collapsed {
|
| 185 |
+
width: 72px;
|
| 186 |
+
padding: var(--space-4) var(--space-3);
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
/* Hover Expansion with Glass Effect */
|
| 190 |
+
.sidebar.collapsed.hovered {
|
| 191 |
+
width: 240px;
|
| 192 |
+
padding: var(--space-5);
|
| 193 |
+
background: rgba(13, 13, 13, 0.95);
|
| 194 |
+
backdrop-filter: blur(20px);
|
| 195 |
+
-webkit-backdrop-filter: blur(20px);
|
| 196 |
+
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.5),
|
| 197 |
+
0 0 0 1px rgba(255, 255, 255, 0.08);
|
| 198 |
+
border-right: 1px solid rgba(255, 255, 255, 0.1);
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
.sidebar-header {
|
| 202 |
+
padding-bottom: var(--space-6);
|
| 203 |
+
border-bottom: var(--border-subtle);
|
| 204 |
+
margin-bottom: var(--space-4);
|
| 205 |
+
display: flex;
|
| 206 |
+
align-items: center;
|
| 207 |
+
justify-content: space-between;
|
| 208 |
+
gap: var(--space-2);
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
.sidebar-logo {
|
| 212 |
+
display: flex;
|
| 213 |
+
align-items: center;
|
| 214 |
+
gap: var(--space-3);
|
| 215 |
+
min-width: 0;
|
| 216 |
+
flex: 1;
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
.sidebar-logo-icon {
|
| 220 |
+
width: 32px;
|
| 221 |
+
height: 32px;
|
| 222 |
+
min-width: 32px;
|
| 223 |
+
background: var(--gradient-primary);
|
| 224 |
+
border-radius: var(--radius-sm);
|
| 225 |
+
display: flex;
|
| 226 |
+
align-items: center;
|
| 227 |
+
justify-content: center;
|
| 228 |
+
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
.sidebar:hover .sidebar-logo-icon {
|
| 232 |
+
transform: scale(1.05);
|
| 233 |
+
box-shadow: 0 0 20px rgba(0, 200, 255, 0.3);
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
.sidebar-logo-text {
|
| 237 |
+
font-size: 1.125rem;
|
| 238 |
+
font-weight: 700;
|
| 239 |
+
letter-spacing: -0.02em;
|
| 240 |
+
white-space: nowrap;
|
| 241 |
+
opacity: 1;
|
| 242 |
+
transition: opacity 0.2s ease 0.1s, transform 0.2s ease;
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
.sidebar.collapsed .sidebar-logo-text {
|
| 246 |
+
opacity: 0;
|
| 247 |
+
transform: translateX(-10px);
|
| 248 |
+
transition: opacity 0.1s ease, transform 0.1s ease;
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
.sidebar.collapsed.hovered .sidebar-logo-text {
|
| 252 |
+
opacity: 1;
|
| 253 |
+
transform: translateX(0);
|
| 254 |
+
transition: opacity 0.2s ease 0.1s, transform 0.2s ease 0.1s;
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
/* Sidebar Toggle Button */
|
| 258 |
+
.sidebar-toggle {
|
| 259 |
+
width: 28px;
|
| 260 |
+
height: 28px;
|
| 261 |
+
min-width: 28px;
|
| 262 |
+
border-radius: var(--radius-sm);
|
| 263 |
+
background: var(--bg-tertiary);
|
| 264 |
+
border: var(--border-subtle);
|
| 265 |
+
color: var(--text-secondary);
|
| 266 |
+
cursor: pointer;
|
| 267 |
+
display: flex;
|
| 268 |
+
align-items: center;
|
| 269 |
+
justify-content: center;
|
| 270 |
+
transition: all 0.2s ease;
|
| 271 |
+
opacity: 0;
|
| 272 |
+
transform: scale(0.8);
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
.sidebar:hover .sidebar-toggle {
|
| 276 |
+
opacity: 1;
|
| 277 |
+
transform: scale(1);
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
.sidebar-toggle:hover {
|
| 281 |
+
background: var(--bg-elevated);
|
| 282 |
+
color: var(--accent-primary);
|
| 283 |
+
border-color: var(--accent-primary);
|
| 284 |
+
box-shadow: 0 0 12px rgba(0, 200, 255, 0.2);
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
.sidebar-toggle svg {
|
| 288 |
+
width: 16px;
|
| 289 |
+
height: 16px;
|
| 290 |
+
transition: transform 0.3s ease;
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
.sidebar-toggle:hover svg {
|
| 294 |
+
transform: scale(1.1);
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
.sidebar-nav {
|
| 298 |
+
flex: 1;
|
| 299 |
+
display: flex;
|
| 300 |
+
flex-direction: column;
|
| 301 |
+
gap: var(--space-1);
|
| 302 |
+
overflow-y: auto;
|
| 303 |
+
overflow-x: hidden;
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
.nav-section {
|
| 307 |
+
margin-bottom: var(--space-4);
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
.nav-section-title {
|
| 311 |
+
font-size: 0.6875rem;
|
| 312 |
+
font-weight: 600;
|
| 313 |
+
text-transform: uppercase;
|
| 314 |
+
letter-spacing: 0.05em;
|
| 315 |
+
color: var(--text-dim);
|
| 316 |
+
padding: var(--space-2) var(--space-3);
|
| 317 |
+
margin-bottom: var(--space-1);
|
| 318 |
+
white-space: nowrap;
|
| 319 |
+
opacity: 1;
|
| 320 |
+
transition: opacity 0.2s ease;
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
.sidebar.collapsed .nav-section-title {
|
| 324 |
+
opacity: 0;
|
| 325 |
+
height: 0;
|
| 326 |
+
padding: 0;
|
| 327 |
+
margin: 0;
|
| 328 |
+
overflow: hidden;
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
.sidebar.collapsed.hovered .nav-section-title {
|
| 332 |
+
opacity: 1;
|
| 333 |
+
height: auto;
|
| 334 |
+
padding: var(--space-2) var(--space-3);
|
| 335 |
+
margin-bottom: var(--space-1);
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
.nav-item {
|
| 339 |
+
display: flex;
|
| 340 |
+
align-items: center;
|
| 341 |
+
gap: var(--space-3);
|
| 342 |
+
padding: var(--space-3);
|
| 343 |
+
border-radius: var(--radius-md);
|
| 344 |
+
color: var(--text-secondary);
|
| 345 |
+
cursor: pointer;
|
| 346 |
+
transition: all 0.2s cubic-bezier(0.4, 0, 0.2, 1);
|
| 347 |
+
font-weight: 500;
|
| 348 |
+
font-size: 0.875rem;
|
| 349 |
+
position: relative;
|
| 350 |
+
overflow: hidden;
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
/* Hover glow effect */
|
| 354 |
+
.nav-item::before {
|
| 355 |
+
content: '';
|
| 356 |
+
position: absolute;
|
| 357 |
+
top: 0;
|
| 358 |
+
left: 0;
|
| 359 |
+
right: 0;
|
| 360 |
+
bottom: 0;
|
| 361 |
+
background: radial-gradient(circle at left center, rgba(0, 200, 255, 0.1) 0%, transparent 70%);
|
| 362 |
+
opacity: 0;
|
| 363 |
+
transition: opacity 0.3s ease;
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
.nav-item:hover::before {
|
| 367 |
+
opacity: 1;
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
.nav-item:hover {
|
| 371 |
+
background: var(--bg-tertiary);
|
| 372 |
+
color: var(--text-primary);
|
| 373 |
+
transform: translateX(4px);
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
.nav-item.active {
|
| 377 |
+
background: rgba(0, 200, 255, 0.12);
|
| 378 |
+
color: var(--accent-primary);
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
.nav-item.active::after {
|
| 382 |
+
content: '';
|
| 383 |
+
position: absolute;
|
| 384 |
+
left: 0;
|
| 385 |
+
top: 50%;
|
| 386 |
+
transform: translateY(-50%);
|
| 387 |
+
width: 3px;
|
| 388 |
+
height: 20px;
|
| 389 |
+
background: var(--accent-primary);
|
| 390 |
+
border-radius: 0 2px 2px 0;
|
| 391 |
+
box-shadow: 0 0 10px rgba(0, 200, 255, 0.5);
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
.nav-icon {
|
| 395 |
+
width: 20px;
|
| 396 |
+
height: 20px;
|
| 397 |
+
min-width: 20px;
|
| 398 |
+
opacity: 0.7;
|
| 399 |
+
transition: all 0.2s ease;
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
.nav-item:hover .nav-icon,
|
| 403 |
+
.nav-item.active .nav-icon {
|
| 404 |
+
opacity: 1;
|
| 405 |
+
transform: scale(1.1);
|
| 406 |
+
}
|
| 407 |
+
|
| 408 |
+
.nav-item.active .nav-icon {
|
| 409 |
+
filter: drop-shadow(0 0 4px rgba(0, 200, 255, 0.5));
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
.nav-text {
|
| 413 |
+
white-space: nowrap;
|
| 414 |
+
opacity: 1;
|
| 415 |
+
transition: opacity 0.2s ease 0.1s;
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
.sidebar.collapsed .nav-text {
|
| 419 |
+
opacity: 0;
|
| 420 |
+
width: 0;
|
| 421 |
+
overflow: hidden;
|
| 422 |
+
transition: opacity 0.1s ease;
|
| 423 |
+
}
|
| 424 |
+
|
| 425 |
+
.sidebar.collapsed.hovered .nav-text {
|
| 426 |
+
opacity: 1;
|
| 427 |
+
width: auto;
|
| 428 |
+
transition: opacity 0.2s ease 0.15s;
|
| 429 |
+
}
|
| 430 |
+
|
| 431 |
+
/* Center icons when collapsed */
|
| 432 |
+
.sidebar.collapsed .nav-item {
|
| 433 |
+
justify-content: center;
|
| 434 |
+
padding: var(--space-3);
|
| 435 |
+
}
|
| 436 |
+
|
| 437 |
+
.sidebar.collapsed.hovered .nav-item {
|
| 438 |
+
justify-content: flex-start;
|
| 439 |
+
}
|
| 440 |
+
|
| 441 |
+
/* Sidebar Footer */
|
| 442 |
+
.sidebar-footer {
|
| 443 |
+
margin-top: auto;
|
| 444 |
+
padding-top: var(--space-4);
|
| 445 |
+
border-top: var(--border-subtle);
|
| 446 |
+
}
|
| 447 |
+
|
| 448 |
+
.sidebar-footer .btn {
|
| 449 |
+
justify-content: flex-start;
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
.sidebar.collapsed .sidebar-footer .btn {
|
| 453 |
+
justify-content: center;
|
| 454 |
+
padding: var(--space-3);
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
.sidebar.collapsed.hovered .sidebar-footer .btn {
|
| 458 |
+
justify-content: flex-start;
|
| 459 |
+
padding: var(--space-3) var(--space-5);
|
| 460 |
+
}
|
| 461 |
+
|
| 462 |
+
.sidebar-version {
|
| 463 |
+
margin-top: var(--space-3);
|
| 464 |
+
font-size: 0.6875rem;
|
| 465 |
+
color: var(--text-dim);
|
| 466 |
+
text-align: center;
|
| 467 |
+
opacity: 1;
|
| 468 |
+
transition: opacity 0.2s ease;
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
.sidebar.collapsed .sidebar-version {
|
| 472 |
+
opacity: 0;
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
.sidebar.collapsed.hovered .sidebar-version {
|
| 476 |
+
opacity: 1;
|
| 477 |
+
}
|
| 478 |
+
|
| 479 |
+
/* Spinning animation for refresh */
|
| 480 |
+
@keyframes spin {
|
| 481 |
+
from {
|
| 482 |
+
transform: rotate(0deg);
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
to {
|
| 486 |
+
transform: rotate(360deg);
|
| 487 |
+
}
|
| 488 |
+
}
|
| 489 |
+
|
| 490 |
+
.spinning {
|
| 491 |
+
animation: spin 1s linear infinite;
|
| 492 |
+
}
|
| 493 |
+
|
| 494 |
+
/* Main Content */
|
| 495 |
+
.main-content {
|
| 496 |
+
flex: 1;
|
| 497 |
+
margin-left: 240px;
|
| 498 |
+
padding: var(--space-8);
|
| 499 |
+
min-height: 100vh;
|
| 500 |
+
background: var(--bg-primary);
|
| 501 |
+
transition: margin-left 0.3s cubic-bezier(0.4, 0, 0.2, 1);
|
| 502 |
+
}
|
| 503 |
+
|
| 504 |
+
.sidebar-collapsed .main-content {
|
| 505 |
+
margin-left: 72px;
|
| 506 |
+
}
|
| 507 |
+
|
| 508 |
+
/* =========================================================================
|
| 509 |
+
Cards
|
| 510 |
+
========================================================================= */
|
| 511 |
+
.card {
|
| 512 |
+
background: var(--bg-card);
|
| 513 |
+
border: var(--border-subtle);
|
| 514 |
+
border-radius: var(--radius-lg);
|
| 515 |
+
padding: var(--space-5);
|
| 516 |
+
transition: all var(--transition-base);
|
| 517 |
+
}
|
| 518 |
+
|
| 519 |
+
.card:hover {
|
| 520 |
+
background: var(--bg-card-hover);
|
| 521 |
+
border-color: rgba(255, 255, 255, 0.1);
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
.card-header {
|
| 525 |
+
display: flex;
|
| 526 |
+
align-items: center;
|
| 527 |
+
justify-content: space-between;
|
| 528 |
+
margin-bottom: var(--space-4);
|
| 529 |
+
}
|
| 530 |
+
|
| 531 |
+
.card-title {
|
| 532 |
+
font-size: 0.875rem;
|
| 533 |
+
font-weight: 600;
|
| 534 |
+
color: var(--text-secondary);
|
| 535 |
+
}
|
| 536 |
+
|
| 537 |
+
/* =========================================================================
|
| 538 |
+
Team Logo
|
| 539 |
+
========================================================================= */
|
| 540 |
+
.team-logo {
|
| 541 |
+
width: 48px;
|
| 542 |
+
height: 48px;
|
| 543 |
+
object-fit: contain;
|
| 544 |
+
}
|
| 545 |
+
|
| 546 |
+
.team-logo-sm {
|
| 547 |
+
width: 32px;
|
| 548 |
+
height: 32px;
|
| 549 |
+
}
|
| 550 |
+
|
| 551 |
+
.team-logo-lg {
|
| 552 |
+
width: 64px;
|
| 553 |
+
height: 64px;
|
| 554 |
+
}
|
| 555 |
+
|
| 556 |
+
.team-logo-xl {
|
| 557 |
+
width: 80px;
|
| 558 |
+
height: 80px;
|
| 559 |
+
}
|
| 560 |
+
|
| 561 |
+
/* =========================================================================
|
| 562 |
+
Game Card - Professional Style
|
| 563 |
+
========================================================================= */
|
| 564 |
+
.game-card {
|
| 565 |
+
background: var(--bg-card);
|
| 566 |
+
border: var(--border-subtle);
|
| 567 |
+
border-radius: var(--radius-lg);
|
| 568 |
+
padding: var(--space-5);
|
| 569 |
+
margin-bottom: var(--space-4);
|
| 570 |
+
transition: all var(--transition-base);
|
| 571 |
+
}
|
| 572 |
+
|
| 573 |
+
.game-card:hover {
|
| 574 |
+
border-color: rgba(255, 255, 255, 0.12);
|
| 575 |
+
box-shadow: var(--shadow-sm);
|
| 576 |
+
}
|
| 577 |
+
|
| 578 |
+
.game-card.live {
|
| 579 |
+
border-color: var(--accent-danger);
|
| 580 |
+
box-shadow: 0 0 0 1px var(--accent-danger);
|
| 581 |
+
}
|
| 582 |
+
|
| 583 |
+
.game-header {
|
| 584 |
+
display: flex;
|
| 585 |
+
align-items: center;
|
| 586 |
+
justify-content: space-between;
|
| 587 |
+
margin-bottom: var(--space-4);
|
| 588 |
+
padding-bottom: var(--space-3);
|
| 589 |
+
border-bottom: var(--border-subtle);
|
| 590 |
+
}
|
| 591 |
+
|
| 592 |
+
.game-status {
|
| 593 |
+
font-size: 0.75rem;
|
| 594 |
+
font-weight: 600;
|
| 595 |
+
text-transform: uppercase;
|
| 596 |
+
letter-spacing: 0.05em;
|
| 597 |
+
}
|
| 598 |
+
|
| 599 |
+
.game-status.live {
|
| 600 |
+
color: var(--accent-danger);
|
| 601 |
+
display: flex;
|
| 602 |
+
align-items: center;
|
| 603 |
+
gap: var(--space-2);
|
| 604 |
+
}
|
| 605 |
+
|
| 606 |
+
.live-dot {
|
| 607 |
+
width: 6px;
|
| 608 |
+
height: 6px;
|
| 609 |
+
background: var(--accent-danger);
|
| 610 |
+
border-radius: 50%;
|
| 611 |
+
animation: pulse-dot 2s infinite;
|
| 612 |
+
}
|
| 613 |
+
|
| 614 |
+
@keyframes pulse-dot {
|
| 615 |
+
|
| 616 |
+
0%,
|
| 617 |
+
100% {
|
| 618 |
+
opacity: 1;
|
| 619 |
+
transform: scale(1);
|
| 620 |
+
}
|
| 621 |
+
|
| 622 |
+
50% {
|
| 623 |
+
opacity: 0.5;
|
| 624 |
+
transform: scale(0.8);
|
| 625 |
+
}
|
| 626 |
+
}
|
| 627 |
+
|
| 628 |
+
.game-time {
|
| 629 |
+
color: var(--text-muted);
|
| 630 |
+
font-size: 0.75rem;
|
| 631 |
+
}
|
| 632 |
+
|
| 633 |
+
.game-matchup {
|
| 634 |
+
display: grid;
|
| 635 |
+
grid-template-columns: 1fr auto 1fr;
|
| 636 |
+
align-items: center;
|
| 637 |
+
gap: var(--space-6);
|
| 638 |
+
}
|
| 639 |
+
|
| 640 |
+
.team-block {
|
| 641 |
+
display: flex;
|
| 642 |
+
flex-direction: column;
|
| 643 |
+
align-items: center;
|
| 644 |
+
text-align: center;
|
| 645 |
+
}
|
| 646 |
+
|
| 647 |
+
.team-block.away {
|
| 648 |
+
align-items: flex-start;
|
| 649 |
+
text-align: left;
|
| 650 |
+
}
|
| 651 |
+
|
| 652 |
+
.team-block.home {
|
| 653 |
+
align-items: flex-end;
|
| 654 |
+
text-align: right;
|
| 655 |
+
}
|
| 656 |
+
|
| 657 |
+
.team-info {
|
| 658 |
+
display: flex;
|
| 659 |
+
align-items: center;
|
| 660 |
+
gap: var(--space-3);
|
| 661 |
+
}
|
| 662 |
+
|
| 663 |
+
.team-block.home .team-info {
|
| 664 |
+
flex-direction: row-reverse;
|
| 665 |
+
}
|
| 666 |
+
|
| 667 |
+
.team-details {
|
| 668 |
+
display: flex;
|
| 669 |
+
flex-direction: column;
|
| 670 |
+
}
|
| 671 |
+
|
| 672 |
+
.team-name {
|
| 673 |
+
font-size: 1rem;
|
| 674 |
+
font-weight: 600;
|
| 675 |
+
color: var(--text-primary);
|
| 676 |
+
}
|
| 677 |
+
|
| 678 |
+
.team-record {
|
| 679 |
+
font-size: 0.75rem;
|
| 680 |
+
color: var(--text-muted);
|
| 681 |
+
}
|
| 682 |
+
|
| 683 |
+
.team-score {
|
| 684 |
+
font-size: 2rem;
|
| 685 |
+
font-weight: 700;
|
| 686 |
+
color: var(--text-primary);
|
| 687 |
+
margin-top: var(--space-2);
|
| 688 |
+
}
|
| 689 |
+
|
| 690 |
+
.team-probability {
|
| 691 |
+
font-size: 1.5rem;
|
| 692 |
+
font-weight: 700;
|
| 693 |
+
color: var(--accent-primary);
|
| 694 |
+
margin-top: var(--space-2);
|
| 695 |
+
}
|
| 696 |
+
|
| 697 |
+
/* Score/Prediction Divider */
|
| 698 |
+
.game-center {
|
| 699 |
+
display: flex;
|
| 700 |
+
flex-direction: column;
|
| 701 |
+
align-items: center;
|
| 702 |
+
gap: var(--space-2);
|
| 703 |
+
}
|
| 704 |
+
|
| 705 |
+
.vs-divider {
|
| 706 |
+
font-size: 0.875rem;
|
| 707 |
+
color: var(--text-dim);
|
| 708 |
+
font-weight: 500;
|
| 709 |
+
}
|
| 710 |
+
|
| 711 |
+
.prediction-indicator {
|
| 712 |
+
background: var(--bg-elevated);
|
| 713 |
+
border: var(--border-subtle);
|
| 714 |
+
border-radius: var(--radius-md);
|
| 715 |
+
padding: var(--space-3) var(--space-4);
|
| 716 |
+
text-align: center;
|
| 717 |
+
}
|
| 718 |
+
|
| 719 |
+
.prediction-label {
|
| 720 |
+
font-size: 0.625rem;
|
| 721 |
+
font-weight: 600;
|
| 722 |
+
text-transform: uppercase;
|
| 723 |
+
letter-spacing: 0.1em;
|
| 724 |
+
color: var(--text-muted);
|
| 725 |
+
margin-bottom: var(--space-1);
|
| 726 |
+
}
|
| 727 |
+
|
| 728 |
+
.prediction-pick {
|
| 729 |
+
font-size: 1rem;
|
| 730 |
+
font-weight: 700;
|
| 731 |
+
color: var(--accent-primary);
|
| 732 |
+
}
|
| 733 |
+
|
| 734 |
+
/* =========================================================================
|
| 735 |
+
Stats Grid
|
| 736 |
+
========================================================================= */
|
| 737 |
+
.stats-grid {
|
| 738 |
+
display: grid;
|
| 739 |
+
grid-template-columns: repeat(auto-fit, minmax(180px, 1fr));
|
| 740 |
+
gap: var(--space-4);
|
| 741 |
+
margin-bottom: var(--space-6);
|
| 742 |
+
}
|
| 743 |
+
|
| 744 |
+
.stat-card {
|
| 745 |
+
background: var(--bg-card);
|
| 746 |
+
border: var(--border-subtle);
|
| 747 |
+
border-radius: var(--radius-lg);
|
| 748 |
+
padding: var(--space-5);
|
| 749 |
+
display: flex;
|
| 750 |
+
flex-direction: column;
|
| 751 |
+
}
|
| 752 |
+
|
| 753 |
+
.stat-label {
|
| 754 |
+
font-size: 0.75rem;
|
| 755 |
+
font-weight: 500;
|
| 756 |
+
text-transform: uppercase;
|
| 757 |
+
letter-spacing: 0.05em;
|
| 758 |
+
color: var(--text-muted);
|
| 759 |
+
margin-bottom: var(--space-2);
|
| 760 |
+
}
|
| 761 |
+
|
| 762 |
+
.stat-value {
|
| 763 |
+
font-size: 2rem;
|
| 764 |
+
font-weight: 700;
|
| 765 |
+
color: var(--text-primary);
|
| 766 |
+
line-height: 1;
|
| 767 |
+
}
|
| 768 |
+
|
| 769 |
+
.stat-value.accent {
|
| 770 |
+
color: var(--accent-primary);
|
| 771 |
+
}
|
| 772 |
+
|
| 773 |
+
.stat-change {
|
| 774 |
+
font-size: 0.75rem;
|
| 775 |
+
margin-top: var(--space-2);
|
| 776 |
+
display: flex;
|
| 777 |
+
align-items: center;
|
| 778 |
+
gap: var(--space-1);
|
| 779 |
+
}
|
| 780 |
+
|
| 781 |
+
.stat-change.positive {
|
| 782 |
+
color: var(--accent-success);
|
| 783 |
+
}
|
| 784 |
+
|
| 785 |
+
.stat-change.negative {
|
| 786 |
+
color: var(--accent-danger);
|
| 787 |
+
}
|
| 788 |
+
|
| 789 |
+
/* =========================================================================
|
| 790 |
+
Form Controls
|
| 791 |
+
========================================================================= */
|
| 792 |
+
.form-group {
|
| 793 |
+
margin-bottom: var(--space-4);
|
| 794 |
+
}
|
| 795 |
+
|
| 796 |
+
.form-label {
|
| 797 |
+
display: block;
|
| 798 |
+
font-size: 0.75rem;
|
| 799 |
+
font-weight: 500;
|
| 800 |
+
text-transform: uppercase;
|
| 801 |
+
letter-spacing: 0.05em;
|
| 802 |
+
color: var(--text-muted);
|
| 803 |
+
margin-bottom: var(--space-2);
|
| 804 |
+
}
|
| 805 |
+
|
| 806 |
+
.form-select {
|
| 807 |
+
width: 100%;
|
| 808 |
+
padding: var(--space-3) var(--space-4);
|
| 809 |
+
background: var(--bg-elevated);
|
| 810 |
+
border: var(--border-subtle);
|
| 811 |
+
border-radius: var(--radius-md);
|
| 812 |
+
color: var(--text-primary);
|
| 813 |
+
font-size: 0.875rem;
|
| 814 |
+
font-weight: 500;
|
| 815 |
+
cursor: pointer;
|
| 816 |
+
appearance: none;
|
| 817 |
+
background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='0 0 12 12'%3E%3Cpath fill='%23666' d='M6 8L1 3h10z'/%3E%3C/svg%3E");
|
| 818 |
+
background-repeat: no-repeat;
|
| 819 |
+
background-position: right var(--space-3) center;
|
| 820 |
+
transition: all var(--transition-fast);
|
| 821 |
+
}
|
| 822 |
+
|
| 823 |
+
.form-select:hover {
|
| 824 |
+
border-color: rgba(255, 255, 255, 0.15);
|
| 825 |
+
}
|
| 826 |
+
|
| 827 |
+
.form-select:focus {
|
| 828 |
+
outline: none;
|
| 829 |
+
border-color: var(--accent-primary);
|
| 830 |
+
box-shadow: 0 0 0 2px rgba(0, 200, 255, 0.1);
|
| 831 |
+
}
|
| 832 |
+
|
| 833 |
+
.form-input {
|
| 834 |
+
width: 100%;
|
| 835 |
+
padding: var(--space-3) var(--space-4);
|
| 836 |
+
background: var(--bg-elevated);
|
| 837 |
+
border: var(--border-subtle);
|
| 838 |
+
border-radius: var(--radius-md);
|
| 839 |
+
color: var(--text-primary);
|
| 840 |
+
font-size: 0.875rem;
|
| 841 |
+
transition: all var(--transition-fast);
|
| 842 |
+
}
|
| 843 |
+
|
| 844 |
+
.form-input:focus {
|
| 845 |
+
outline: none;
|
| 846 |
+
border-color: var(--accent-primary);
|
| 847 |
+
box-shadow: 0 0 0 2px rgba(0, 200, 255, 0.1);
|
| 848 |
+
}
|
| 849 |
+
|
| 850 |
+
.form-input::placeholder {
|
| 851 |
+
color: var(--text-dim);
|
| 852 |
+
}
|
| 853 |
+
|
| 854 |
+
/* =========================================================================
|
| 855 |
+
Buttons
|
| 856 |
+
========================================================================= */
|
| 857 |
+
.btn {
|
| 858 |
+
display: inline-flex;
|
| 859 |
+
align-items: center;
|
| 860 |
+
justify-content: center;
|
| 861 |
+
gap: var(--space-2);
|
| 862 |
+
padding: var(--space-3) var(--space-5);
|
| 863 |
+
border-radius: var(--radius-md);
|
| 864 |
+
font-size: 0.875rem;
|
| 865 |
+
font-weight: 600;
|
| 866 |
+
cursor: pointer;
|
| 867 |
+
border: none;
|
| 868 |
+
transition: all var(--transition-fast);
|
| 869 |
+
}
|
| 870 |
+
|
| 871 |
+
.btn-primary {
|
| 872 |
+
background: var(--gradient-primary);
|
| 873 |
+
color: #000;
|
| 874 |
+
}
|
| 875 |
+
|
| 876 |
+
.btn-primary:hover {
|
| 877 |
+
opacity: 0.9;
|
| 878 |
+
box-shadow: var(--shadow-glow);
|
| 879 |
+
}
|
| 880 |
+
|
| 881 |
+
.btn-secondary {
|
| 882 |
+
background: var(--bg-elevated);
|
| 883 |
+
border: var(--border-subtle);
|
| 884 |
+
color: var(--text-primary);
|
| 885 |
+
}
|
| 886 |
+
|
| 887 |
+
.btn-secondary:hover {
|
| 888 |
+
background: var(--bg-tertiary);
|
| 889 |
+
border-color: rgba(255, 255, 255, 0.15);
|
| 890 |
+
}
|
| 891 |
+
|
| 892 |
+
.btn-ghost {
|
| 893 |
+
background: transparent;
|
| 894 |
+
color: var(--text-secondary);
|
| 895 |
+
}
|
| 896 |
+
|
| 897 |
+
.btn-ghost:hover {
|
| 898 |
+
background: var(--bg-tertiary);
|
| 899 |
+
color: var(--text-primary);
|
| 900 |
+
}
|
| 901 |
+
|
| 902 |
+
.btn-icon {
|
| 903 |
+
width: 36px;
|
| 904 |
+
height: 36px;
|
| 905 |
+
padding: 0;
|
| 906 |
+
border-radius: var(--radius-md);
|
| 907 |
+
}
|
| 908 |
+
|
| 909 |
+
.btn-lg {
|
| 910 |
+
padding: var(--space-4) var(--space-6);
|
| 911 |
+
font-size: 1rem;
|
| 912 |
+
}
|
| 913 |
+
|
| 914 |
+
.btn-block {
|
| 915 |
+
width: 100%;
|
| 916 |
+
}
|
| 917 |
+
|
| 918 |
+
/* =========================================================================
|
| 919 |
+
Badges & Tags
|
| 920 |
+
========================================================================= */
|
| 921 |
+
.badge {
|
| 922 |
+
display: inline-flex;
|
| 923 |
+
align-items: center;
|
| 924 |
+
padding: var(--space-1) var(--space-2);
|
| 925 |
+
border-radius: var(--radius-xs);
|
| 926 |
+
font-size: 0.6875rem;
|
| 927 |
+
font-weight: 600;
|
| 928 |
+
text-transform: uppercase;
|
| 929 |
+
letter-spacing: 0.05em;
|
| 930 |
+
}
|
| 931 |
+
|
| 932 |
+
.badge-success {
|
| 933 |
+
background: rgba(0, 210, 106, 0.15);
|
| 934 |
+
color: var(--accent-success);
|
| 935 |
+
}
|
| 936 |
+
|
| 937 |
+
.badge-warning {
|
| 938 |
+
background: rgba(255, 184, 0, 0.15);
|
| 939 |
+
color: var(--accent-warning);
|
| 940 |
+
}
|
| 941 |
+
|
| 942 |
+
.badge-danger {
|
| 943 |
+
background: rgba(255, 59, 59, 0.15);
|
| 944 |
+
color: var(--accent-danger);
|
| 945 |
+
}
|
| 946 |
+
|
| 947 |
+
.badge-neutral {
|
| 948 |
+
background: rgba(255, 255, 255, 0.08);
|
| 949 |
+
color: var(--text-secondary);
|
| 950 |
+
}
|
| 951 |
+
|
| 952 |
+
.confidence-high {
|
| 953 |
+
background: rgba(0, 210, 106, 0.15);
|
| 954 |
+
color: var(--accent-success);
|
| 955 |
+
}
|
| 956 |
+
|
| 957 |
+
.confidence-medium {
|
| 958 |
+
background: rgba(255, 184, 0, 0.15);
|
| 959 |
+
color: var(--accent-warning);
|
| 960 |
+
}
|
| 961 |
+
|
| 962 |
+
.confidence-low {
|
| 963 |
+
background: rgba(255, 59, 59, 0.15);
|
| 964 |
+
color: var(--accent-danger);
|
| 965 |
+
}
|
| 966 |
+
|
| 967 |
+
/* =========================================================================
|
| 968 |
+
Tables
|
| 969 |
+
========================================================================= */
|
| 970 |
+
.table-container {
|
| 971 |
+
background: var(--bg-card);
|
| 972 |
+
border: var(--border-subtle);
|
| 973 |
+
border-radius: var(--radius-lg);
|
| 974 |
+
overflow: hidden;
|
| 975 |
+
}
|
| 976 |
+
|
| 977 |
+
.data-table {
|
| 978 |
+
width: 100%;
|
| 979 |
+
border-collapse: collapse;
|
| 980 |
+
}
|
| 981 |
+
|
| 982 |
+
.data-table th {
|
| 983 |
+
padding: var(--space-3) var(--space-4);
|
| 984 |
+
text-align: left;
|
| 985 |
+
font-size: 0.6875rem;
|
| 986 |
+
font-weight: 600;
|
| 987 |
+
text-transform: uppercase;
|
| 988 |
+
letter-spacing: 0.05em;
|
| 989 |
+
color: var(--text-muted);
|
| 990 |
+
background: var(--bg-secondary);
|
| 991 |
+
border-bottom: var(--border-subtle);
|
| 992 |
+
}
|
| 993 |
+
|
| 994 |
+
.data-table td {
|
| 995 |
+
padding: var(--space-4);
|
| 996 |
+
border-bottom: var(--border-subtle);
|
| 997 |
+
font-size: 0.875rem;
|
| 998 |
+
}
|
| 999 |
+
|
| 1000 |
+
.data-table tr:last-child td {
|
| 1001 |
+
border-bottom: none;
|
| 1002 |
+
}
|
| 1003 |
+
|
| 1004 |
+
.data-table tr:hover {
|
| 1005 |
+
background: var(--bg-card-hover);
|
| 1006 |
+
}
|
| 1007 |
+
|
| 1008 |
+
.table-team {
|
| 1009 |
+
display: flex;
|
| 1010 |
+
align-items: center;
|
| 1011 |
+
gap: var(--space-3);
|
| 1012 |
+
}
|
| 1013 |
+
|
| 1014 |
+
/* =========================================================================
|
| 1015 |
+
Loading States
|
| 1016 |
+
========================================================================= */
|
| 1017 |
+
.loading {
|
| 1018 |
+
display: flex;
|
| 1019 |
+
flex-direction: column;
|
| 1020 |
+
align-items: center;
|
| 1021 |
+
justify-content: center;
|
| 1022 |
+
padding: var(--space-12);
|
| 1023 |
+
}
|
| 1024 |
+
|
| 1025 |
+
.spinner {
|
| 1026 |
+
width: 32px;
|
| 1027 |
+
height: 32px;
|
| 1028 |
+
border: 2px solid var(--bg-tertiary);
|
| 1029 |
+
border-top-color: var(--accent-primary);
|
| 1030 |
+
border-radius: 50%;
|
| 1031 |
+
animation: spin 0.8s linear infinite;
|
| 1032 |
+
}
|
| 1033 |
+
|
| 1034 |
+
.loading-text {
|
| 1035 |
+
margin-top: var(--space-4);
|
| 1036 |
+
color: var(--text-muted);
|
| 1037 |
+
font-size: 0.875rem;
|
| 1038 |
+
}
|
| 1039 |
+
|
| 1040 |
+
@keyframes spin {
|
| 1041 |
+
to {
|
| 1042 |
+
transform: rotate(360deg);
|
| 1043 |
+
}
|
| 1044 |
+
}
|
| 1045 |
+
|
| 1046 |
+
/* Skeleton */
|
| 1047 |
+
.skeleton {
|
| 1048 |
+
background: linear-gradient(90deg,
|
| 1049 |
+
var(--bg-tertiary) 25%,
|
| 1050 |
+
var(--bg-elevated) 50%,
|
| 1051 |
+
var(--bg-tertiary) 75%);
|
| 1052 |
+
background-size: 200% 100%;
|
| 1053 |
+
animation: shimmer 1.5s infinite;
|
| 1054 |
+
border-radius: var(--radius-sm);
|
| 1055 |
+
}
|
| 1056 |
+
|
| 1057 |
+
@keyframes shimmer {
|
| 1058 |
+
0% {
|
| 1059 |
+
background-position: 200% 0;
|
| 1060 |
+
}
|
| 1061 |
+
|
| 1062 |
+
100% {
|
| 1063 |
+
background-position: -200% 0;
|
| 1064 |
+
}
|
| 1065 |
+
}
|
| 1066 |
+
|
| 1067 |
+
/* =========================================================================
|
| 1068 |
+
Empty States
|
| 1069 |
+
========================================================================= */
|
| 1070 |
+
.empty-state {
|
| 1071 |
+
text-align: center;
|
| 1072 |
+
padding: var(--space-12);
|
| 1073 |
+
color: var(--text-muted);
|
| 1074 |
+
}
|
| 1075 |
+
|
| 1076 |
+
.empty-state-icon {
|
| 1077 |
+
width: 48px;
|
| 1078 |
+
height: 48px;
|
| 1079 |
+
margin: 0 auto var(--space-4);
|
| 1080 |
+
opacity: 0.3;
|
| 1081 |
+
}
|
| 1082 |
+
|
| 1083 |
+
.empty-state-title {
|
| 1084 |
+
font-size: 1rem;
|
| 1085 |
+
font-weight: 600;
|
| 1086 |
+
color: var(--text-secondary);
|
| 1087 |
+
margin-bottom: var(--space-2);
|
| 1088 |
+
}
|
| 1089 |
+
|
| 1090 |
+
.empty-state-text {
|
| 1091 |
+
font-size: 0.875rem;
|
| 1092 |
+
color: var(--text-muted);
|
| 1093 |
+
}
|
| 1094 |
+
|
| 1095 |
+
/* =========================================================================
|
| 1096 |
+
Page Header
|
| 1097 |
+
========================================================================= */
|
| 1098 |
+
.page-header {
|
| 1099 |
+
margin-bottom: var(--space-8);
|
| 1100 |
+
}
|
| 1101 |
+
|
| 1102 |
+
.page-title {
|
| 1103 |
+
font-size: 1.75rem;
|
| 1104 |
+
font-weight: 700;
|
| 1105 |
+
margin-bottom: var(--space-2);
|
| 1106 |
+
}
|
| 1107 |
+
|
| 1108 |
+
.page-description {
|
| 1109 |
+
color: var(--text-secondary);
|
| 1110 |
+
font-size: 0.875rem;
|
| 1111 |
+
}
|
| 1112 |
+
|
| 1113 |
+
/* =========================================================================
|
| 1114 |
+
Probability Bar
|
| 1115 |
+
========================================================================= */
|
| 1116 |
+
.probability-bar-container {
|
| 1117 |
+
margin-top: var(--space-4);
|
| 1118 |
+
}
|
| 1119 |
+
|
| 1120 |
+
.probability-bar {
|
| 1121 |
+
height: 4px;
|
| 1122 |
+
background: var(--bg-tertiary);
|
| 1123 |
+
border-radius: 2px;
|
| 1124 |
+
overflow: hidden;
|
| 1125 |
+
display: flex;
|
| 1126 |
+
}
|
| 1127 |
+
|
| 1128 |
+
.probability-fill-away {
|
| 1129 |
+
height: 100%;
|
| 1130 |
+
background: var(--accent-secondary);
|
| 1131 |
+
transition: width var(--transition-slow);
|
| 1132 |
+
}
|
| 1133 |
+
|
| 1134 |
+
.probability-fill-home {
|
| 1135 |
+
height: 100%;
|
| 1136 |
+
background: var(--accent-primary);
|
| 1137 |
+
transition: width var(--transition-slow);
|
| 1138 |
+
}
|
| 1139 |
+
|
| 1140 |
+
.probability-labels {
|
| 1141 |
+
display: flex;
|
| 1142 |
+
justify-content: space-between;
|
| 1143 |
+
margin-top: var(--space-2);
|
| 1144 |
+
font-size: 0.75rem;
|
| 1145 |
+
color: var(--text-muted);
|
| 1146 |
+
}
|
| 1147 |
+
|
| 1148 |
+
/* =========================================================================
|
| 1149 |
+
Section Dividers
|
| 1150 |
+
========================================================================= */
|
| 1151 |
+
.section {
|
| 1152 |
+
margin-bottom: var(--space-8);
|
| 1153 |
+
}
|
| 1154 |
+
|
| 1155 |
+
.section-header {
|
| 1156 |
+
display: flex;
|
| 1157 |
+
align-items: center;
|
| 1158 |
+
justify-content: space-between;
|
| 1159 |
+
margin-bottom: var(--space-4);
|
| 1160 |
+
}
|
| 1161 |
+
|
| 1162 |
+
.section-title {
|
| 1163 |
+
font-size: 0.875rem;
|
| 1164 |
+
font-weight: 600;
|
| 1165 |
+
text-transform: uppercase;
|
| 1166 |
+
letter-spacing: 0.05em;
|
| 1167 |
+
color: var(--text-secondary);
|
| 1168 |
+
}
|
| 1169 |
+
|
| 1170 |
+
/* =========================================================================
|
| 1171 |
+
Animations
|
| 1172 |
+
========================================================================= */
|
| 1173 |
+
@keyframes fadeIn {
|
| 1174 |
+
from {
|
| 1175 |
+
opacity: 0;
|
| 1176 |
+
}
|
| 1177 |
+
|
| 1178 |
+
to {
|
| 1179 |
+
opacity: 1;
|
| 1180 |
+
}
|
| 1181 |
+
}
|
| 1182 |
+
|
| 1183 |
+
@keyframes slideUp {
|
| 1184 |
+
from {
|
| 1185 |
+
opacity: 0;
|
| 1186 |
+
transform: translateY(10px);
|
| 1187 |
+
}
|
| 1188 |
+
|
| 1189 |
+
to {
|
| 1190 |
+
opacity: 1;
|
| 1191 |
+
transform: translateY(0);
|
| 1192 |
+
}
|
| 1193 |
+
}
|
| 1194 |
+
|
| 1195 |
+
.animate-fadeIn {
|
| 1196 |
+
animation: fadeIn 0.3s ease;
|
| 1197 |
+
}
|
| 1198 |
+
|
| 1199 |
+
.animate-slideUp {
|
| 1200 |
+
animation: slideUp 0.3s ease;
|
| 1201 |
+
}
|
| 1202 |
+
|
| 1203 |
+
/* Staggered animations */
|
| 1204 |
+
.stagger>*:nth-child(1) {
|
| 1205 |
+
animation-delay: 0.05s;
|
| 1206 |
+
}
|
| 1207 |
+
|
| 1208 |
+
.stagger>*:nth-child(2) {
|
| 1209 |
+
animation-delay: 0.1s;
|
| 1210 |
+
}
|
| 1211 |
+
|
| 1212 |
+
.stagger>*:nth-child(3) {
|
| 1213 |
+
animation-delay: 0.15s;
|
| 1214 |
+
}
|
| 1215 |
+
|
| 1216 |
+
.stagger>*:nth-child(4) {
|
| 1217 |
+
animation-delay: 0.2s;
|
| 1218 |
+
}
|
| 1219 |
+
|
| 1220 |
+
.stagger>*:nth-child(5) {
|
| 1221 |
+
animation-delay: 0.25s;
|
| 1222 |
+
}
|
| 1223 |
+
|
| 1224 |
+
/* =========================================================================
|
| 1225 |
+
Responsive
|
| 1226 |
+
========================================================================= */
|
| 1227 |
+
@media (max-width: 1024px) {
|
| 1228 |
+
.sidebar {
|
| 1229 |
+
width: 64px;
|
| 1230 |
+
padding: var(--space-3);
|
| 1231 |
+
}
|
| 1232 |
+
|
| 1233 |
+
.sidebar-logo-text,
|
| 1234 |
+
.nav-section-title,
|
| 1235 |
+
.nav-item span:not(.nav-icon) {
|
| 1236 |
+
display: none;
|
| 1237 |
+
}
|
| 1238 |
+
|
| 1239 |
+
.main-content {
|
| 1240 |
+
margin-left: 64px;
|
| 1241 |
+
padding: var(--space-5);
|
| 1242 |
+
}
|
| 1243 |
+
}
|
| 1244 |
+
|
| 1245 |
+
@media (max-width: 768px) {
|
| 1246 |
+
.sidebar {
|
| 1247 |
+
position: fixed;
|
| 1248 |
+
bottom: 0;
|
| 1249 |
+
top: auto;
|
| 1250 |
+
left: 0;
|
| 1251 |
+
right: 0;
|
| 1252 |
+
width: 100%;
|
| 1253 |
+
height: 64px;
|
| 1254 |
+
flex-direction: row;
|
| 1255 |
+
padding: var(--space-2);
|
| 1256 |
+
border-right: none;
|
| 1257 |
+
border-top: var(--border-subtle);
|
| 1258 |
+
}
|
| 1259 |
+
|
| 1260 |
+
.sidebar-header {
|
| 1261 |
+
display: none;
|
| 1262 |
+
}
|
| 1263 |
+
|
| 1264 |
+
.sidebar-nav {
|
| 1265 |
+
flex-direction: row;
|
| 1266 |
+
justify-content: space-around;
|
| 1267 |
+
gap: 0;
|
| 1268 |
+
width: 100%;
|
| 1269 |
+
}
|
| 1270 |
+
|
| 1271 |
+
.nav-section {
|
| 1272 |
+
display: flex;
|
| 1273 |
+
margin: 0;
|
| 1274 |
+
}
|
| 1275 |
+
|
| 1276 |
+
.nav-section-title {
|
| 1277 |
+
display: none;
|
| 1278 |
+
}
|
| 1279 |
+
|
| 1280 |
+
.main-content {
|
| 1281 |
+
margin-left: 0;
|
| 1282 |
+
margin-bottom: 64px;
|
| 1283 |
+
padding: var(--space-4);
|
| 1284 |
+
}
|
| 1285 |
+
|
| 1286 |
+
.game-matchup {
|
| 1287 |
+
grid-template-columns: 1fr;
|
| 1288 |
+
gap: var(--space-4);
|
| 1289 |
+
}
|
| 1290 |
+
|
| 1291 |
+
.team-block.away,
|
| 1292 |
+
.team-block.home {
|
| 1293 |
+
align-items: center;
|
| 1294 |
+
text-align: center;
|
| 1295 |
+
}
|
| 1296 |
+
|
| 1297 |
+
.team-block.home .team-info {
|
| 1298 |
+
flex-direction: row;
|
| 1299 |
+
}
|
| 1300 |
+
}
|
| 1301 |
+
|
| 1302 |
+
/* =========================================================================
|
| 1303 |
+
Scrollbar
|
| 1304 |
+
========================================================================= */
|
| 1305 |
+
::-webkit-scrollbar {
|
| 1306 |
+
width: 8px;
|
| 1307 |
+
height: 8px;
|
| 1308 |
+
}
|
| 1309 |
+
|
| 1310 |
+
::-webkit-scrollbar-track {
|
| 1311 |
+
background: transparent;
|
| 1312 |
+
}
|
| 1313 |
+
|
| 1314 |
+
::-webkit-scrollbar-thumb {
|
| 1315 |
+
background: var(--bg-tertiary);
|
| 1316 |
+
border-radius: 4px;
|
| 1317 |
+
}
|
| 1318 |
+
|
| 1319 |
+
::-webkit-scrollbar-thumb:hover {
|
| 1320 |
+
background: var(--text-dim);
|
| 1321 |
+
}
|
| 1322 |
+
|
| 1323 |
+
/* =========================================================================
|
| 1324 |
+
Utility Classes
|
| 1325 |
+
========================================================================= */
|
| 1326 |
+
.flex {
|
| 1327 |
+
display: flex;
|
| 1328 |
+
}
|
| 1329 |
+
|
| 1330 |
+
.flex-col {
|
| 1331 |
+
flex-direction: column;
|
| 1332 |
+
}
|
| 1333 |
+
|
| 1334 |
+
.items-center {
|
| 1335 |
+
align-items: center;
|
| 1336 |
+
}
|
| 1337 |
+
|
| 1338 |
+
.justify-between {
|
| 1339 |
+
justify-content: space-between;
|
| 1340 |
+
}
|
| 1341 |
+
|
| 1342 |
+
.gap-2 {
|
| 1343 |
+
gap: var(--space-2);
|
| 1344 |
+
}
|
| 1345 |
+
|
| 1346 |
+
.gap-3 {
|
| 1347 |
+
gap: var(--space-3);
|
| 1348 |
+
}
|
| 1349 |
+
|
| 1350 |
+
.gap-4 {
|
| 1351 |
+
gap: var(--space-4);
|
| 1352 |
+
}
|
| 1353 |
+
|
| 1354 |
+
.mt-4 {
|
| 1355 |
+
margin-top: var(--space-4);
|
| 1356 |
+
}
|
| 1357 |
+
|
| 1358 |
+
.mb-4 {
|
| 1359 |
+
margin-bottom: var(--space-4);
|
| 1360 |
+
}
|
| 1361 |
+
|
| 1362 |
+
.text-right {
|
| 1363 |
+
text-align: right;
|
| 1364 |
+
}
|
| 1365 |
+
|
| 1366 |
+
.text-center {
|
| 1367 |
+
text-align: center;
|
| 1368 |
+
}
|
| 1369 |
+
|
| 1370 |
+
.font-mono {
|
| 1371 |
+
font-family: var(--font-mono);
|
| 1372 |
+
}
|
| 1373 |
+
|
| 1374 |
+
.truncate {
|
| 1375 |
+
overflow: hidden;
|
| 1376 |
+
text-overflow: ellipsis;
|
| 1377 |
+
white-space: nowrap;
|
| 1378 |
+
}
|
web/src/main.jsx
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { StrictMode } from 'react'
|
| 2 |
+
import { createRoot } from 'react-dom/client'
|
| 3 |
+
import './index.css'
|
| 4 |
+
import App from './App.jsx'
|
| 5 |
+
|
| 6 |
+
createRoot(document.getElementById('root')).render(
|
| 7 |
+
<StrictMode>
|
| 8 |
+
<App />
|
| 9 |
+
</StrictMode>,
|
| 10 |
+
)
|
web/src/pages/Accuracy.jsx
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { useState, useEffect } from 'react'
|
| 2 |
+
import { getAccuracy } from '../api'
|
| 3 |
+
import { IconRefresh } from '../icons'
|
| 4 |
+
|
| 5 |
+
function Accuracy() {
|
| 6 |
+
const [data, setData] = useState({ stats: {}, recent_predictions: [] })
|
| 7 |
+
const [loading, setLoading] = useState(true)
|
| 8 |
+
|
| 9 |
+
const fetchData = async () => {
|
| 10 |
+
setLoading(true)
|
| 11 |
+
try {
|
| 12 |
+
const result = await getAccuracy()
|
| 13 |
+
setData(result)
|
| 14 |
+
} catch (err) {
|
| 15 |
+
console.error('Failed to load accuracy:', err)
|
| 16 |
+
} finally {
|
| 17 |
+
setLoading(false)
|
| 18 |
+
}
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
useEffect(() => {
|
| 22 |
+
fetchData()
|
| 23 |
+
// Auto-refresh every 60 seconds
|
| 24 |
+
const interval = setInterval(fetchData, 60000)
|
| 25 |
+
return () => clearInterval(interval)
|
| 26 |
+
}, [])
|
| 27 |
+
|
| 28 |
+
if (loading) {
|
| 29 |
+
return (
|
| 30 |
+
<div className="loading">
|
| 31 |
+
<div className="spinner"></div>
|
| 32 |
+
<p className="loading-text">Loading accuracy stats...</p>
|
| 33 |
+
</div>
|
| 34 |
+
)
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
const { stats, recent_predictions } = data
|
| 38 |
+
const overallAccuracy = stats.overall_accuracy || 0
|
| 39 |
+
const byConfidence = stats.by_confidence || {}
|
| 40 |
+
|
| 41 |
+
return (
|
| 42 |
+
<div className="animate-fadeIn">
|
| 43 |
+
<div className="page-header">
|
| 44 |
+
<div className="flex items-center justify-between">
|
| 45 |
+
<div>
|
| 46 |
+
<h1 className="page-title">Model Accuracy</h1>
|
| 47 |
+
<p className="page-description">Track prediction performance and model reliability</p>
|
| 48 |
+
</div>
|
| 49 |
+
<button className="btn btn-secondary" onClick={fetchData}>
|
| 50 |
+
<IconRefresh className="nav-icon" />
|
| 51 |
+
Refresh
|
| 52 |
+
</button>
|
| 53 |
+
</div>
|
| 54 |
+
</div>
|
| 55 |
+
|
| 56 |
+
{/* Primary Stats Grid */}
|
| 57 |
+
<div className="stats-grid" style={{ marginBottom: 'var(--space-6)' }}>
|
| 58 |
+
<div className="stat-card">
|
| 59 |
+
<div className="stat-label">Total Predictions</div>
|
| 60 |
+
<div className="stat-value">{stats.total_predictions || 0}</div>
|
| 61 |
+
</div>
|
| 62 |
+
<div className="stat-card">
|
| 63 |
+
<div className="stat-label">Completed Games</div>
|
| 64 |
+
<div className="stat-value">{stats.completed_games || 0}</div>
|
| 65 |
+
</div>
|
| 66 |
+
<div className="stat-card">
|
| 67 |
+
<div className="stat-label">Correct Predictions</div>
|
| 68 |
+
<div className="stat-value accent">{stats.correct_predictions || 0}</div>
|
| 69 |
+
</div>
|
| 70 |
+
<div className="stat-card">
|
| 71 |
+
<div className="stat-label">Overall Accuracy</div>
|
| 72 |
+
<div className="stat-value accent" style={{ fontSize: '2.5rem' }}>
|
| 73 |
+
{(overallAccuracy * 100).toFixed(1)}%
|
| 74 |
+
</div>
|
| 75 |
+
</div>
|
| 76 |
+
</div>
|
| 77 |
+
|
| 78 |
+
{/* Detailed Metrics */}
|
| 79 |
+
<div style={{ display: 'grid', gridTemplateColumns: '1fr 1fr', gap: 'var(--space-6)', marginBottom: 'var(--space-8)' }}>
|
| 80 |
+
{/* Performance Metrics */}
|
| 81 |
+
<div className="card">
|
| 82 |
+
<h3 style={{ marginBottom: 'var(--space-4)' }}>Performance Metrics</h3>
|
| 83 |
+
<div style={{ display: 'grid', gridTemplateColumns: '1fr 1fr', gap: 'var(--space-4)' }}>
|
| 84 |
+
<div>
|
| 85 |
+
<div style={{ fontSize: '0.75rem', color: 'var(--text-muted)', textTransform: 'uppercase', letterSpacing: '0.05em' }}>Current Streak</div>
|
| 86 |
+
<div style={{ fontSize: '1.5rem', fontWeight: '700', color: stats.streak_type === 'W' ? 'var(--accent-success)' : 'var(--accent-danger)' }}>
|
| 87 |
+
{stats.current_streak || 0}{stats.streak_type || ''}
|
| 88 |
+
</div>
|
| 89 |
+
</div>
|
| 90 |
+
<div>
|
| 91 |
+
<div style={{ fontSize: '0.75rem', color: 'var(--text-muted)', textTransform: 'uppercase', letterSpacing: '0.05em' }}>Last 10 Games</div>
|
| 92 |
+
<div style={{ fontSize: '1.5rem', fontWeight: '700' }}>
|
| 93 |
+
{stats.last_10_record || '0-0'}
|
| 94 |
+
<span style={{ fontSize: '0.875rem', color: 'var(--text-muted)', marginLeft: 'var(--space-2)' }}>
|
| 95 |
+
({((stats.last_10_accuracy || 0) * 100).toFixed(0)}%)
|
| 96 |
+
</span>
|
| 97 |
+
</div>
|
| 98 |
+
</div>
|
| 99 |
+
<div>
|
| 100 |
+
<div style={{ fontSize: '0.75rem', color: 'var(--text-muted)', textTransform: 'uppercase', letterSpacing: '0.05em' }}>Pending Predictions</div>
|
| 101 |
+
<div style={{ fontSize: '1.5rem', fontWeight: '700', color: 'var(--accent-warning)' }}>
|
| 102 |
+
{stats.pending_predictions || 0}
|
| 103 |
+
</div>
|
| 104 |
+
</div>
|
| 105 |
+
<div>
|
| 106 |
+
<div style={{ fontSize: '0.75rem', color: 'var(--text-muted)', textTransform: 'uppercase', letterSpacing: '0.05em' }}>Avg Probability</div>
|
| 107 |
+
<div style={{ fontSize: '1.5rem', fontWeight: '700' }}>
|
| 108 |
+
{((stats.avg_probability_correct || 0) * 100).toFixed(1)}%
|
| 109 |
+
</div>
|
| 110 |
+
</div>
|
| 111 |
+
</div>
|
| 112 |
+
</div>
|
| 113 |
+
|
| 114 |
+
{/* Home vs Away */}
|
| 115 |
+
<div className="card">
|
| 116 |
+
<h3 style={{ marginBottom: 'var(--space-4)' }}>Home vs Away Picks</h3>
|
| 117 |
+
<div style={{ display: 'grid', gridTemplateColumns: '1fr 1fr', gap: 'var(--space-4)' }}>
|
| 118 |
+
<div>
|
| 119 |
+
<div style={{ fontSize: '0.75rem', color: 'var(--text-muted)', textTransform: 'uppercase', letterSpacing: '0.05em' }}>Home Team Picks</div>
|
| 120 |
+
<div style={{ fontSize: '1.5rem', fontWeight: '700', color: 'var(--accent-primary)' }}>
|
| 121 |
+
{((stats.home_pick_accuracy || 0) * 100).toFixed(1)}%
|
| 122 |
+
</div>
|
| 123 |
+
<div style={{ fontSize: '0.75rem', color: 'var(--text-muted)' }}>
|
| 124 |
+
{stats.home_picks_total || 0} picks
|
| 125 |
+
</div>
|
| 126 |
+
</div>
|
| 127 |
+
<div>
|
| 128 |
+
<div style={{ fontSize: '0.75rem', color: 'var(--text-muted)', textTransform: 'uppercase', letterSpacing: '0.05em' }}>Away Team Picks</div>
|
| 129 |
+
<div style={{ fontSize: '1.5rem', fontWeight: '700', color: 'var(--accent-secondary)' }}>
|
| 130 |
+
{((stats.away_pick_accuracy || 0) * 100).toFixed(1)}%
|
| 131 |
+
</div>
|
| 132 |
+
<div style={{ fontSize: '0.75rem', color: 'var(--text-muted)' }}>
|
| 133 |
+
{stats.away_picks_total || 0} picks
|
| 134 |
+
</div>
|
| 135 |
+
</div>
|
| 136 |
+
</div>
|
| 137 |
+
|
| 138 |
+
{/* Visual bar */}
|
| 139 |
+
<div style={{ marginTop: 'var(--space-4)' }}>
|
| 140 |
+
<div style={{ display: 'flex', height: '8px', borderRadius: '4px', overflow: 'hidden' }}>
|
| 141 |
+
<div style={{
|
| 142 |
+
flex: stats.home_picks_total || 1,
|
| 143 |
+
background: 'var(--accent-primary)',
|
| 144 |
+
opacity: stats.home_pick_accuracy > stats.away_pick_accuracy ? 1 : 0.5
|
| 145 |
+
}}></div>
|
| 146 |
+
<div style={{
|
| 147 |
+
flex: stats.away_picks_total || 1,
|
| 148 |
+
background: 'var(--accent-secondary)',
|
| 149 |
+
opacity: stats.away_pick_accuracy > stats.home_pick_accuracy ? 1 : 0.5
|
| 150 |
+
}}></div>
|
| 151 |
+
</div>
|
| 152 |
+
</div>
|
| 153 |
+
</div>
|
| 154 |
+
</div>
|
| 155 |
+
|
| 156 |
+
{/* Accuracy by Confidence */}
|
| 157 |
+
{Object.keys(byConfidence).length > 0 && (
|
| 158 |
+
<div style={{ marginBottom: 'var(--space-8)' }}>
|
| 159 |
+
<h3 style={{ marginBottom: 'var(--space-4)' }}>Accuracy by Confidence Level</h3>
|
| 160 |
+
<div className="stats-grid" style={{ gridTemplateColumns: 'repeat(3, 1fr)' }}>
|
| 161 |
+
{['high', 'medium', 'low'].map((conf) => {
|
| 162 |
+
const confData = byConfidence[conf] || { accuracy: 0, correct: 0, total: 0 }
|
| 163 |
+
const accuracyPercent = (confData.accuracy * 100).toFixed(1)
|
| 164 |
+
return (
|
| 165 |
+
<div key={conf} className="stat-card" style={{ textAlign: 'center' }}>
|
| 166 |
+
<span className={`badge confidence-${conf}`} style={{ marginBottom: 'var(--space-3)' }}>
|
| 167 |
+
{conf.toUpperCase()}
|
| 168 |
+
</span>
|
| 169 |
+
<div className="stat-value" style={{ fontSize: '2rem' }}>{accuracyPercent}%</div>
|
| 170 |
+
<div className="stat-label" style={{ marginTop: 'var(--space-2)' }}>
|
| 171 |
+
{confData.correct}/{confData.total} correct
|
| 172 |
+
</div>
|
| 173 |
+
{/* Progress ring visual */}
|
| 174 |
+
<div style={{
|
| 175 |
+
marginTop: 'var(--space-3)',
|
| 176 |
+
height: '4px',
|
| 177 |
+
background: 'var(--bg-tertiary)',
|
| 178 |
+
borderRadius: '2px',
|
| 179 |
+
overflow: 'hidden'
|
| 180 |
+
}}>
|
| 181 |
+
<div style={{
|
| 182 |
+
height: '100%',
|
| 183 |
+
width: `${accuracyPercent}%`,
|
| 184 |
+
background: conf === 'high' ? 'var(--accent-success)' : conf === 'medium' ? 'var(--accent-warning)' : 'var(--accent-danger)',
|
| 185 |
+
transition: 'width 0.3s ease'
|
| 186 |
+
}}></div>
|
| 187 |
+
</div>
|
| 188 |
+
</div>
|
| 189 |
+
)
|
| 190 |
+
})}
|
| 191 |
+
</div>
|
| 192 |
+
</div>
|
| 193 |
+
)}
|
| 194 |
+
|
| 195 |
+
{/* Accuracy by Team */}
|
| 196 |
+
{stats.by_team && Object.keys(stats.by_team).length > 0 && (
|
| 197 |
+
<div style={{ marginBottom: 'var(--space-8)' }}>
|
| 198 |
+
<h3 style={{ marginBottom: 'var(--space-4)' }}>Accuracy by Team Predicted</h3>
|
| 199 |
+
<div className="table-container">
|
| 200 |
+
<table className="data-table">
|
| 201 |
+
<thead>
|
| 202 |
+
<tr>
|
| 203 |
+
<th>Team</th>
|
| 204 |
+
<th style={{ textAlign: 'center' }}>Correct</th>
|
| 205 |
+
<th style={{ textAlign: 'center' }}>Total</th>
|
| 206 |
+
<th style={{ textAlign: 'right' }}>Accuracy</th>
|
| 207 |
+
</tr>
|
| 208 |
+
</thead>
|
| 209 |
+
<tbody>
|
| 210 |
+
{Object.entries(stats.by_team)
|
| 211 |
+
.sort((a, b) => b[1].accuracy - a[1].accuracy)
|
| 212 |
+
.map(([team, data]) => (
|
| 213 |
+
<tr key={team}>
|
| 214 |
+
<td style={{ fontWeight: '500' }}>{team}</td>
|
| 215 |
+
<td style={{ textAlign: 'center' }}>{data.correct}</td>
|
| 216 |
+
<td style={{ textAlign: 'center', color: 'var(--text-muted)' }}>{data.total}</td>
|
| 217 |
+
<td style={{ textAlign: 'right', color: 'var(--accent-primary)', fontWeight: '600', fontFamily: 'var(--font-mono)' }}>
|
| 218 |
+
{(data.accuracy * 100).toFixed(1)}%
|
| 219 |
+
</td>
|
| 220 |
+
</tr>
|
| 221 |
+
))
|
| 222 |
+
}
|
| 223 |
+
</tbody>
|
| 224 |
+
</table>
|
| 225 |
+
</div>
|
| 226 |
+
</div>
|
| 227 |
+
)}
|
| 228 |
+
|
| 229 |
+
{/* Recent Predictions */}
|
| 230 |
+
<div className="table-container">
|
| 231 |
+
<div style={{ padding: 'var(--space-4) var(--space-4) 0', borderBottom: 'var(--border-subtle)' }}>
|
| 232 |
+
<h3 style={{ marginBottom: 'var(--space-4)' }}>Recent Predictions</h3>
|
| 233 |
+
</div>
|
| 234 |
+
|
| 235 |
+
{recent_predictions.length === 0 ? (
|
| 236 |
+
<div className="empty-state">
|
| 237 |
+
<p className="empty-state-title">No Predictions Yet</p>
|
| 238 |
+
<p className="empty-state-text">Visit the Live Games page to start tracking predictions.</p>
|
| 239 |
+
</div>
|
| 240 |
+
) : (
|
| 241 |
+
<table className="data-table">
|
| 242 |
+
<thead>
|
| 243 |
+
<tr>
|
| 244 |
+
<th>Date</th>
|
| 245 |
+
<th>Matchup</th>
|
| 246 |
+
<th>Prediction</th>
|
| 247 |
+
<th style={{ textAlign: 'center' }}>Confidence</th>
|
| 248 |
+
<th style={{ textAlign: 'center' }}>Result</th>
|
| 249 |
+
</tr>
|
| 250 |
+
</thead>
|
| 251 |
+
<tbody>
|
| 252 |
+
{recent_predictions.map((pred, idx) => {
|
| 253 |
+
const isPending = pred.is_correct === -1
|
| 254 |
+
const isCorrect = pred.is_correct === 1
|
| 255 |
+
|
| 256 |
+
return (
|
| 257 |
+
<tr key={idx}>
|
| 258 |
+
<td style={{ color: 'var(--text-muted)', fontSize: '0.8125rem' }}>
|
| 259 |
+
{pred.game_date || 'N/A'}
|
| 260 |
+
</td>
|
| 261 |
+
<td>
|
| 262 |
+
<span style={{ fontWeight: '500' }}>{pred.away_team || 'N/A'}</span>
|
| 263 |
+
<span style={{ color: 'var(--text-muted)' }}> @ </span>
|
| 264 |
+
<span style={{ fontWeight: '500' }}>{pred.home_team || 'N/A'}</span>
|
| 265 |
+
</td>
|
| 266 |
+
<td style={{ color: 'var(--accent-primary)', fontWeight: '500' }}>
|
| 267 |
+
{pred.predicted_winner || 'N/A'}
|
| 268 |
+
<span style={{ color: 'var(--text-muted)', fontSize: '0.75rem', marginLeft: 'var(--space-2)' }}>
|
| 269 |
+
({((pred.home_win_prob > 0.5 ? pred.home_win_prob : pred.away_win_prob) * 100 || 50).toFixed(0)}%)
|
| 270 |
+
</span>
|
| 271 |
+
</td>
|
| 272 |
+
<td style={{ textAlign: 'center' }}>
|
| 273 |
+
<span className={`badge confidence-${pred.confidence || 'medium'}`}>
|
| 274 |
+
{(pred.confidence || 'medium').toUpperCase()}
|
| 275 |
+
</span>
|
| 276 |
+
</td>
|
| 277 |
+
<td style={{ textAlign: 'center' }}>
|
| 278 |
+
{isPending ? (
|
| 279 |
+
<span className="badge badge-neutral">PENDING</span>
|
| 280 |
+
) : isCorrect ? (
|
| 281 |
+
<span className="badge badge-success">CORRECT</span>
|
| 282 |
+
) : (
|
| 283 |
+
<span className="badge badge-danger">WRONG</span>
|
| 284 |
+
)}
|
| 285 |
+
</td>
|
| 286 |
+
</tr>
|
| 287 |
+
)
|
| 288 |
+
})}
|
| 289 |
+
</tbody>
|
| 290 |
+
</table>
|
| 291 |
+
)}
|
| 292 |
+
</div>
|
| 293 |
+
</div>
|
| 294 |
+
)
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
export default Accuracy
|
web/src/pages/Championship.jsx
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { useState, useEffect } from 'react'
|
| 2 |
+
import { getChampionshipOdds } from '../api'
|
| 3 |
+
import { TeamLogo } from '../teamLogos'
|
| 4 |
+
|
| 5 |
+
function Championship() {
|
| 6 |
+
const [teams, setTeams] = useState([])
|
| 7 |
+
const [loading, setLoading] = useState(true)
|
| 8 |
+
|
| 9 |
+
useEffect(() => {
|
| 10 |
+
getChampionshipOdds()
|
| 11 |
+
.then(data => {
|
| 12 |
+
setTeams(data.teams || [])
|
| 13 |
+
setLoading(false)
|
| 14 |
+
})
|
| 15 |
+
.catch(err => {
|
| 16 |
+
console.error('Failed to load championship odds:', err)
|
| 17 |
+
setLoading(false)
|
| 18 |
+
})
|
| 19 |
+
}, [])
|
| 20 |
+
|
| 21 |
+
if (loading) {
|
| 22 |
+
return (
|
| 23 |
+
<div className="loading">
|
| 24 |
+
<div className="spinner"></div>
|
| 25 |
+
<p className="loading-text">Loading championship odds...</p>
|
| 26 |
+
</div>
|
| 27 |
+
)
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
const top4 = teams.slice(0, 4)
|
| 31 |
+
const rest = teams.slice(4)
|
| 32 |
+
|
| 33 |
+
return (
|
| 34 |
+
<div className="animate-fadeIn">
|
| 35 |
+
<div className="page-header">
|
| 36 |
+
<h1 className="page-title">Championship Odds</h1>
|
| 37 |
+
<p className="page-description">2025-26 NBA Championship probability rankings</p>
|
| 38 |
+
</div>
|
| 39 |
+
|
| 40 |
+
{/* Top 4 Contenders */}
|
| 41 |
+
{top4.length > 0 && (
|
| 42 |
+
<div className="stats-grid" style={{ gridTemplateColumns: 'repeat(4, 1fr)', marginBottom: 'var(--space-8)' }}>
|
| 43 |
+
{top4.map((team, idx) => (
|
| 44 |
+
<div
|
| 45 |
+
key={team.team}
|
| 46 |
+
className="stat-card animate-slideUp"
|
| 47 |
+
style={{
|
| 48 |
+
textAlign: 'center',
|
| 49 |
+
animationDelay: `${idx * 0.1}s`,
|
| 50 |
+
border: idx === 0 ? '1px solid var(--accent-primary)' : 'var(--border-subtle)'
|
| 51 |
+
}}
|
| 52 |
+
>
|
| 53 |
+
<div style={{
|
| 54 |
+
fontSize: '0.6875rem',
|
| 55 |
+
fontWeight: '600',
|
| 56 |
+
color: 'var(--text-muted)',
|
| 57 |
+
marginBottom: 'var(--space-3)'
|
| 58 |
+
}}>
|
| 59 |
+
#{idx + 1} CONTENDER
|
| 60 |
+
</div>
|
| 61 |
+
<TeamLogo abbrev={team.team} size="lg" style={{ margin: '0 auto var(--space-3)' }} />
|
| 62 |
+
<div style={{ fontSize: '1.125rem', fontWeight: '600', marginBottom: 'var(--space-2)' }}>
|
| 63 |
+
{team.team}
|
| 64 |
+
</div>
|
| 65 |
+
<div className="stat-value accent" style={{ fontSize: '2.5rem' }}>
|
| 66 |
+
{team.odds}%
|
| 67 |
+
</div>
|
| 68 |
+
<div className="stat-label">Championship Odds</div>
|
| 69 |
+
<div style={{
|
| 70 |
+
marginTop: 'var(--space-3)',
|
| 71 |
+
fontSize: '0.75rem',
|
| 72 |
+
color: 'var(--text-muted)',
|
| 73 |
+
fontFamily: 'var(--font-mono)'
|
| 74 |
+
}}>
|
| 75 |
+
Win Rate: {team.win_pct}%
|
| 76 |
+
</div>
|
| 77 |
+
</div>
|
| 78 |
+
))}
|
| 79 |
+
</div>
|
| 80 |
+
)}
|
| 81 |
+
|
| 82 |
+
{/* Full Rankings */}
|
| 83 |
+
{rest.length > 0 && (
|
| 84 |
+
<div className="table-container">
|
| 85 |
+
<table className="data-table">
|
| 86 |
+
<thead>
|
| 87 |
+
<tr>
|
| 88 |
+
<th style={{ width: '60px' }}>Rank</th>
|
| 89 |
+
<th>Team</th>
|
| 90 |
+
<th style={{ textAlign: 'right' }}>Championship Odds</th>
|
| 91 |
+
<th style={{ textAlign: 'right' }}>Win Rate</th>
|
| 92 |
+
</tr>
|
| 93 |
+
</thead>
|
| 94 |
+
<tbody>
|
| 95 |
+
{rest.map((team) => (
|
| 96 |
+
<tr key={team.team}>
|
| 97 |
+
<td style={{ fontWeight: '600', color: 'var(--text-muted)' }}>#{team.rank}</td>
|
| 98 |
+
<td>
|
| 99 |
+
<div className="table-team">
|
| 100 |
+
<TeamLogo abbrev={team.team} size="sm" />
|
| 101 |
+
<span style={{ fontWeight: '500' }}>{team.team}</span>
|
| 102 |
+
</div>
|
| 103 |
+
</td>
|
| 104 |
+
<td style={{ textAlign: 'right', fontFamily: 'var(--font-mono)', color: 'var(--accent-primary)', fontWeight: '600' }}>
|
| 105 |
+
{team.odds}%
|
| 106 |
+
</td>
|
| 107 |
+
<td style={{ textAlign: 'right', fontFamily: 'var(--font-mono)', color: 'var(--text-muted)' }}>
|
| 108 |
+
{team.win_pct}%
|
| 109 |
+
</td>
|
| 110 |
+
</tr>
|
| 111 |
+
))}
|
| 112 |
+
</tbody>
|
| 113 |
+
</table>
|
| 114 |
+
</div>
|
| 115 |
+
)}
|
| 116 |
+
</div>
|
| 117 |
+
)
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
export default Championship
|
web/src/pages/HeadToHead.jsx
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { useState, useEffect } from 'react'
|
| 2 |
+
import { getTeams, predictGame } from '../api'
|
| 3 |
+
import { TeamLogo, getTeamName } from '../teamLogos'
|
| 4 |
+
|
| 5 |
+
function HeadToHead() {
|
| 6 |
+
const [teams, setTeams] = useState([])
|
| 7 |
+
const [team1, setTeam1] = useState('LAL')
|
| 8 |
+
const [team2, setTeam2] = useState('BOS')
|
| 9 |
+
const [comparison, setComparison] = useState(null)
|
| 10 |
+
const [loading, setLoading] = useState(false)
|
| 11 |
+
|
| 12 |
+
useEffect(() => {
|
| 13 |
+
getTeams().then(data => {
|
| 14 |
+
setTeams(data.teams || [])
|
| 15 |
+
}).catch(console.error)
|
| 16 |
+
}, [])
|
| 17 |
+
|
| 18 |
+
const handleCompare = async () => {
|
| 19 |
+
if (!team1 || !team2 || team1 === team2) return
|
| 20 |
+
|
| 21 |
+
setLoading(true)
|
| 22 |
+
try {
|
| 23 |
+
// Get predictions for both scenarios
|
| 24 |
+
const [homeGame, awayGame] = await Promise.all([
|
| 25 |
+
predictGame(team1, team2), // Team1 at home
|
| 26 |
+
predictGame(team2, team1), // Team2 at home
|
| 27 |
+
])
|
| 28 |
+
|
| 29 |
+
setComparison({
|
| 30 |
+
team1,
|
| 31 |
+
team2,
|
| 32 |
+
homeGame, // Team1 hosting Team2
|
| 33 |
+
awayGame, // Team2 hosting Team1
|
| 34 |
+
})
|
| 35 |
+
} catch (err) {
|
| 36 |
+
console.error('Comparison failed:', err)
|
| 37 |
+
} finally {
|
| 38 |
+
setLoading(false)
|
| 39 |
+
}
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
return (
|
| 43 |
+
<div className="animate-fadeIn">
|
| 44 |
+
<div className="page-header">
|
| 45 |
+
<h1 className="page-title">Head to Head</h1>
|
| 46 |
+
<p className="page-description">Compare two teams across different scenarios</p>
|
| 47 |
+
</div>
|
| 48 |
+
|
| 49 |
+
{/* Team Selector */}
|
| 50 |
+
<div className="card" style={{ marginBottom: 'var(--space-6)' }}>
|
| 51 |
+
<div style={{ display: 'grid', gridTemplateColumns: '1fr auto 1fr', gap: 'var(--space-6)', alignItems: 'end' }}>
|
| 52 |
+
{/* Team 1 */}
|
| 53 |
+
<div className="form-group" style={{ marginBottom: 0 }}>
|
| 54 |
+
<label className="form-label">Team 1</label>
|
| 55 |
+
<div style={{ display: 'flex', alignItems: 'center', gap: 'var(--space-3)' }}>
|
| 56 |
+
<TeamLogo abbrev={team1} size="md" />
|
| 57 |
+
<select
|
| 58 |
+
className="form-select"
|
| 59 |
+
value={team1}
|
| 60 |
+
onChange={(e) => setTeam1(e.target.value)}
|
| 61 |
+
>
|
| 62 |
+
{teams.map(team => (
|
| 63 |
+
<option key={team.id} value={team.abbrev}>{team.abbrev} - {getTeamName(team.abbrev)}</option>
|
| 64 |
+
))}
|
| 65 |
+
</select>
|
| 66 |
+
</div>
|
| 67 |
+
</div>
|
| 68 |
+
|
| 69 |
+
<div style={{ color: 'var(--text-dim)', fontWeight: '600', paddingBottom: 'var(--space-3)' }}>VS</div>
|
| 70 |
+
|
| 71 |
+
{/* Team 2 */}
|
| 72 |
+
<div className="form-group" style={{ marginBottom: 0 }}>
|
| 73 |
+
<label className="form-label">Team 2</label>
|
| 74 |
+
<div style={{ display: 'flex', alignItems: 'center', gap: 'var(--space-3)' }}>
|
| 75 |
+
<TeamLogo abbrev={team2} size="md" />
|
| 76 |
+
<select
|
| 77 |
+
className="form-select"
|
| 78 |
+
value={team2}
|
| 79 |
+
onChange={(e) => setTeam2(e.target.value)}
|
| 80 |
+
>
|
| 81 |
+
{teams.map(team => (
|
| 82 |
+
<option key={team.id} value={team.abbrev}>{team.abbrev} - {getTeamName(team.abbrev)}</option>
|
| 83 |
+
))}
|
| 84 |
+
</select>
|
| 85 |
+
</div>
|
| 86 |
+
</div>
|
| 87 |
+
</div>
|
| 88 |
+
|
| 89 |
+
<button
|
| 90 |
+
className="btn btn-primary btn-lg btn-block"
|
| 91 |
+
onClick={handleCompare}
|
| 92 |
+
disabled={loading || team1 === team2}
|
| 93 |
+
style={{ marginTop: 'var(--space-6)' }}
|
| 94 |
+
>
|
| 95 |
+
{loading ? 'Analyzing...' : 'Compare Teams'}
|
| 96 |
+
</button>
|
| 97 |
+
</div>
|
| 98 |
+
|
| 99 |
+
{/* Comparison Results */}
|
| 100 |
+
{comparison && (
|
| 101 |
+
<div className="animate-slideUp">
|
| 102 |
+
{/* ELO Comparison */}
|
| 103 |
+
<div className="stats-grid" style={{ marginBottom: 'var(--space-6)' }}>
|
| 104 |
+
<div className="stat-card" style={{ textAlign: 'center' }}>
|
| 105 |
+
<TeamLogo abbrev={comparison.team1} size="lg" style={{ margin: '0 auto var(--space-3)' }} />
|
| 106 |
+
<div style={{ fontSize: '1.125rem', fontWeight: '600', marginBottom: 'var(--space-2)' }}>
|
| 107 |
+
{comparison.team1}
|
| 108 |
+
</div>
|
| 109 |
+
<div className="stat-value accent">
|
| 110 |
+
{comparison.homeGame?.home_elo?.toFixed(0) || 'N/A'}
|
| 111 |
+
</div>
|
| 112 |
+
<div className="stat-label">ELO Rating</div>
|
| 113 |
+
</div>
|
| 114 |
+
|
| 115 |
+
<div className="stat-card" style={{ textAlign: 'center', display: 'flex', flexDirection: 'column', justifyContent: 'center' }}>
|
| 116 |
+
<div style={{ fontSize: '0.75rem', color: 'var(--text-muted)', marginBottom: 'var(--space-2)' }}>ELO DIFFERENCE</div>
|
| 117 |
+
<div style={{ fontSize: '2rem', fontWeight: '700', color: comparison.homeGame?.elo_diff > 0 ? 'var(--accent-success)' : 'var(--accent-danger)' }}>
|
| 118 |
+
{comparison.homeGame?.elo_diff > 0 ? '+' : ''}{comparison.homeGame?.elo_diff?.toFixed(0) || 0}
|
| 119 |
+
</div>
|
| 120 |
+
<div style={{ fontSize: '0.75rem', color: 'var(--text-muted)', marginTop: 'var(--space-2)' }}>
|
| 121 |
+
{comparison.homeGame?.elo_diff > 0 ? `${comparison.team1} favored` : `${comparison.team2} favored`}
|
| 122 |
+
</div>
|
| 123 |
+
</div>
|
| 124 |
+
|
| 125 |
+
<div className="stat-card" style={{ textAlign: 'center' }}>
|
| 126 |
+
<TeamLogo abbrev={comparison.team2} size="lg" style={{ margin: '0 auto var(--space-3)' }} />
|
| 127 |
+
<div style={{ fontSize: '1.125rem', fontWeight: '600', marginBottom: 'var(--space-2)' }}>
|
| 128 |
+
{comparison.team2}
|
| 129 |
+
</div>
|
| 130 |
+
<div className="stat-value accent">
|
| 131 |
+
{comparison.homeGame?.away_elo?.toFixed(0) || 'N/A'}
|
| 132 |
+
</div>
|
| 133 |
+
<div className="stat-label">ELO Rating</div>
|
| 134 |
+
</div>
|
| 135 |
+
</div>
|
| 136 |
+
|
| 137 |
+
{/* Scenario Cards */}
|
| 138 |
+
<h3 style={{ marginBottom: 'var(--space-4)' }}>Win Probabilities by Venue</h3>
|
| 139 |
+
<div style={{ display: 'grid', gridTemplateColumns: '1fr 1fr', gap: 'var(--space-4)' }}>
|
| 140 |
+
{/* Scenario 1: Team1 at Home */}
|
| 141 |
+
<div className="card">
|
| 142 |
+
<div className="card-header">
|
| 143 |
+
<span className="card-title">{comparison.team1} Home Game</span>
|
| 144 |
+
</div>
|
| 145 |
+
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', marginBottom: 'var(--space-4)' }}>
|
| 146 |
+
<div style={{ textAlign: 'center' }}>
|
| 147 |
+
<TeamLogo abbrev={comparison.team1} size="md" />
|
| 148 |
+
<div style={{ fontSize: '1.5rem', fontWeight: '700', color: 'var(--accent-primary)', marginTop: 'var(--space-2)' }}>
|
| 149 |
+
{(comparison.homeGame?.home_win_probability * 100).toFixed(1)}%
|
| 150 |
+
</div>
|
| 151 |
+
</div>
|
| 152 |
+
<div style={{ color: 'var(--text-dim)' }}>vs</div>
|
| 153 |
+
<div style={{ textAlign: 'center' }}>
|
| 154 |
+
<TeamLogo abbrev={comparison.team2} size="md" />
|
| 155 |
+
<div style={{ fontSize: '1.5rem', fontWeight: '700', color: 'var(--accent-secondary)', marginTop: 'var(--space-2)' }}>
|
| 156 |
+
{(comparison.homeGame?.away_win_probability * 100).toFixed(1)}%
|
| 157 |
+
</div>
|
| 158 |
+
</div>
|
| 159 |
+
</div>
|
| 160 |
+
<div style={{ fontSize: '0.875rem', color: 'var(--text-muted)', textAlign: 'center' }}>
|
| 161 |
+
Prediction: <span style={{ color: 'var(--accent-primary)', fontWeight: '600' }}>{comparison.homeGame?.predicted_winner}</span>
|
| 162 |
+
</div>
|
| 163 |
+
</div>
|
| 164 |
+
|
| 165 |
+
{/* Scenario 2: Team2 at Home */}
|
| 166 |
+
<div className="card">
|
| 167 |
+
<div className="card-header">
|
| 168 |
+
<span className="card-title">{comparison.team2} Home Game</span>
|
| 169 |
+
</div>
|
| 170 |
+
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', marginBottom: 'var(--space-4)' }}>
|
| 171 |
+
<div style={{ textAlign: 'center' }}>
|
| 172 |
+
<TeamLogo abbrev={comparison.team2} size="md" />
|
| 173 |
+
<div style={{ fontSize: '1.5rem', fontWeight: '700', color: 'var(--accent-primary)', marginTop: 'var(--space-2)' }}>
|
| 174 |
+
{(comparison.awayGame?.home_win_probability * 100).toFixed(1)}%
|
| 175 |
+
</div>
|
| 176 |
+
</div>
|
| 177 |
+
<div style={{ color: 'var(--text-dim)' }}>vs</div>
|
| 178 |
+
<div style={{ textAlign: 'center' }}>
|
| 179 |
+
<TeamLogo abbrev={comparison.team1} size="md" />
|
| 180 |
+
<div style={{ fontSize: '1.5rem', fontWeight: '700', color: 'var(--accent-secondary)', marginTop: 'var(--space-2)' }}>
|
| 181 |
+
{(comparison.awayGame?.away_win_probability * 100).toFixed(1)}%
|
| 182 |
+
</div>
|
| 183 |
+
</div>
|
| 184 |
+
</div>
|
| 185 |
+
<div style={{ fontSize: '0.875rem', color: 'var(--text-muted)', textAlign: 'center' }}>
|
| 186 |
+
Prediction: <span style={{ color: 'var(--accent-primary)', fontWeight: '600' }}>{comparison.awayGame?.predicted_winner}</span>
|
| 187 |
+
</div>
|
| 188 |
+
</div>
|
| 189 |
+
</div>
|
| 190 |
+
</div>
|
| 191 |
+
)}
|
| 192 |
+
</div>
|
| 193 |
+
)
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
export default HeadToHead
|
web/src/pages/LiveGames.jsx
ADDED
|
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { useState, useEffect } from 'react'
|
| 2 |
+
import { getLiveGames } from '../api'
|
| 3 |
+
import { TeamLogo } from '../teamLogos'
|
| 4 |
+
import { IconRefresh } from '../icons'
|
| 5 |
+
|
| 6 |
+
// Fetch roster for a team - use relative URL in production
|
| 7 |
+
const API_BASE = import.meta.env.DEV ? 'http://localhost:8000' : '';
|
| 8 |
+
async function getTeamRoster(teamAbbrev) {
|
| 9 |
+
const response = await fetch(`${API_BASE}/api/roster/${teamAbbrev}`)
|
| 10 |
+
return response.json()
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
function LiveGames() {
|
| 14 |
+
const [data, setData] = useState({ live: [], final: [], upcoming: [] })
|
| 15 |
+
const [loading, setLoading] = useState(true)
|
| 16 |
+
const [error, setError] = useState(null)
|
| 17 |
+
const [lastRefresh, setLastRefresh] = useState(new Date())
|
| 18 |
+
|
| 19 |
+
const fetchGames = async () => {
|
| 20 |
+
try {
|
| 21 |
+
const result = await getLiveGames()
|
| 22 |
+
setData(result)
|
| 23 |
+
setLastRefresh(new Date())
|
| 24 |
+
setError(null)
|
| 25 |
+
} catch (err) {
|
| 26 |
+
setError('Failed to connect to API. Make sure the server is running.')
|
| 27 |
+
} finally {
|
| 28 |
+
setLoading(false)
|
| 29 |
+
}
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
useEffect(() => {
|
| 33 |
+
fetchGames()
|
| 34 |
+
const interval = setInterval(fetchGames, 15000)
|
| 35 |
+
return () => clearInterval(interval)
|
| 36 |
+
}, [])
|
| 37 |
+
|
| 38 |
+
if (loading) {
|
| 39 |
+
return (
|
| 40 |
+
<div className="loading">
|
| 41 |
+
<div className="spinner"></div>
|
| 42 |
+
<p className="loading-text">Loading games...</p>
|
| 43 |
+
</div>
|
| 44 |
+
)
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
if (error) {
|
| 48 |
+
return (
|
| 49 |
+
<div className="empty-state">
|
| 50 |
+
<p className="empty-state-title">Connection Error</p>
|
| 51 |
+
<p className="empty-state-text">{error}</p>
|
| 52 |
+
<button className="btn btn-primary" onClick={fetchGames} style={{ marginTop: 'var(--space-4)' }}>
|
| 53 |
+
Retry
|
| 54 |
+
</button>
|
| 55 |
+
</div>
|
| 56 |
+
)
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
const hasGames = data.live?.length > 0 || data.final?.length > 0 || data.upcoming?.length > 0
|
| 60 |
+
|
| 61 |
+
return (
|
| 62 |
+
<div className="animate-fadeIn">
|
| 63 |
+
<div className="page-header">
|
| 64 |
+
<div className="flex items-center justify-between">
|
| 65 |
+
<div>
|
| 66 |
+
<h1 className="page-title">Live Games</h1>
|
| 67 |
+
<p className="page-description">
|
| 68 |
+
Last updated: {lastRefresh.toLocaleTimeString()} • Auto-refreshes every 15s
|
| 69 |
+
</p>
|
| 70 |
+
</div>
|
| 71 |
+
<button className="btn btn-secondary" onClick={fetchGames}>
|
| 72 |
+
<IconRefresh className="nav-icon" />
|
| 73 |
+
Refresh
|
| 74 |
+
</button>
|
| 75 |
+
</div>
|
| 76 |
+
</div>
|
| 77 |
+
|
| 78 |
+
{!hasGames ? (
|
| 79 |
+
<div className="empty-state">
|
| 80 |
+
<p className="empty-state-title">No Games Today</p>
|
| 81 |
+
<p className="empty-state-text">Check back later for scheduled NBA games.</p>
|
| 82 |
+
</div>
|
| 83 |
+
) : (
|
| 84 |
+
<>
|
| 85 |
+
{/* Live Games */}
|
| 86 |
+
{data.live?.length > 0 && (
|
| 87 |
+
<section className="section">
|
| 88 |
+
<div className="section-header">
|
| 89 |
+
<span className="section-title flex items-center gap-2">
|
| 90 |
+
<span className="live-dot"></span>
|
| 91 |
+
In Progress
|
| 92 |
+
</span>
|
| 93 |
+
</div>
|
| 94 |
+
<div className="stagger">
|
| 95 |
+
{data.live.map((game) => (
|
| 96 |
+
<GameCard key={game.game_id} game={game} isLive={true} />
|
| 97 |
+
))}
|
| 98 |
+
</div>
|
| 99 |
+
</section>
|
| 100 |
+
)}
|
| 101 |
+
|
| 102 |
+
{/* Final Games */}
|
| 103 |
+
{data.final?.length > 0 && (
|
| 104 |
+
<section className="section">
|
| 105 |
+
<div className="section-header">
|
| 106 |
+
<span className="section-title">Completed</span>
|
| 107 |
+
</div>
|
| 108 |
+
<div className="stagger">
|
| 109 |
+
{data.final.map((game) => (
|
| 110 |
+
<GameCard key={game.game_id} game={game} isFinal={true} />
|
| 111 |
+
))}
|
| 112 |
+
</div>
|
| 113 |
+
</section>
|
| 114 |
+
)}
|
| 115 |
+
|
| 116 |
+
{/* Upcoming Games */}
|
| 117 |
+
{data.upcoming?.length > 0 && (
|
| 118 |
+
<section className="section">
|
| 119 |
+
<div className="section-header">
|
| 120 |
+
<span className="section-title">Upcoming Today</span>
|
| 121 |
+
</div>
|
| 122 |
+
<div className="stagger">
|
| 123 |
+
{data.upcoming.map((game) => (
|
| 124 |
+
<GameCard key={game.game_id} game={game} showLineups={true} />
|
| 125 |
+
))}
|
| 126 |
+
</div>
|
| 127 |
+
</section>
|
| 128 |
+
)}
|
| 129 |
+
</>
|
| 130 |
+
)}
|
| 131 |
+
</div>
|
| 132 |
+
)
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
function GameCard({ game, isLive, isFinal, showLineups }) {
|
| 136 |
+
const prediction = game.prediction || {}
|
| 137 |
+
const homeProb = (prediction.home_win_probability || 0.5) * 100
|
| 138 |
+
const awayProb = (prediction.away_win_probability || 0.5) * 100
|
| 139 |
+
|
| 140 |
+
const [showRosters, setShowRosters] = useState(false)
|
| 141 |
+
const [homeRoster, setHomeRoster] = useState([])
|
| 142 |
+
const [awayRoster, setAwayRoster] = useState([])
|
| 143 |
+
const [loadingRosters, setLoadingRosters] = useState(false)
|
| 144 |
+
|
| 145 |
+
const fetchRosters = async () => {
|
| 146 |
+
if (homeRoster.length > 0 && awayRoster.length > 0) {
|
| 147 |
+
setShowRosters(!showRosters)
|
| 148 |
+
return
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
setLoadingRosters(true)
|
| 152 |
+
try {
|
| 153 |
+
const [homeData, awayData] = await Promise.all([
|
| 154 |
+
getTeamRoster(game.home_team),
|
| 155 |
+
getTeamRoster(game.away_team)
|
| 156 |
+
])
|
| 157 |
+
setHomeRoster(homeData.starters || [])
|
| 158 |
+
setAwayRoster(awayData.starters || [])
|
| 159 |
+
setShowRosters(true)
|
| 160 |
+
} catch (err) {
|
| 161 |
+
console.error('Failed to fetch rosters:', err)
|
| 162 |
+
} finally {
|
| 163 |
+
setLoadingRosters(false)
|
| 164 |
+
}
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
return (
|
| 168 |
+
<div className={`game-card animate-slideUp ${isLive ? 'live' : ''}`}>
|
| 169 |
+
{/* Header */}
|
| 170 |
+
<div className="game-header">
|
| 171 |
+
<div className="game-status" style={{ color: isLive ? 'var(--accent-danger)' : isFinal ? 'var(--text-muted)' : 'var(--text-secondary)' }}>
|
| 172 |
+
{isLive && (
|
| 173 |
+
<span className="flex items-center gap-2">
|
| 174 |
+
<span className="live-dot"></span>
|
| 175 |
+
{game.period && `Q${game.period}`} {game.clock || 'LIVE'}
|
| 176 |
+
</span>
|
| 177 |
+
)}
|
| 178 |
+
{isFinal && 'FINAL'}
|
| 179 |
+
{!isLive && !isFinal && (game.status_text || 'Scheduled')}
|
| 180 |
+
</div>
|
| 181 |
+
<span className="game-time">{game.game_date}</span>
|
| 182 |
+
</div>
|
| 183 |
+
|
| 184 |
+
{/* Matchup */}
|
| 185 |
+
<div className="game-matchup">
|
| 186 |
+
{/* Away Team */}
|
| 187 |
+
<div className="team-block away">
|
| 188 |
+
<div className="team-info">
|
| 189 |
+
<TeamLogo abbrev={game.away_team} size="lg" />
|
| 190 |
+
<div className="team-details">
|
| 191 |
+
<span className="team-name">{game.away_team}</span>
|
| 192 |
+
<span className="team-record">{game.away_record || '0-0'}</span>
|
| 193 |
+
</div>
|
| 194 |
+
</div>
|
| 195 |
+
{(isLive || isFinal) ? (
|
| 196 |
+
<div className="team-score">{game.away_score}</div>
|
| 197 |
+
) : (
|
| 198 |
+
<div className="team-probability">{awayProb.toFixed(0)}%</div>
|
| 199 |
+
)}
|
| 200 |
+
</div>
|
| 201 |
+
|
| 202 |
+
{/* Center */}
|
| 203 |
+
<div className="game-center">
|
| 204 |
+
{!isFinal && prediction.predicted_winner && (
|
| 205 |
+
<div className="prediction-indicator">
|
| 206 |
+
<div className="prediction-label">Prediction</div>
|
| 207 |
+
<div className="prediction-pick">{prediction.predicted_winner}</div>
|
| 208 |
+
<span className={`badge confidence-${prediction.confidence || 'medium'}`} style={{ marginTop: 'var(--space-2)' }}>
|
| 209 |
+
{(prediction.confidence || 'medium').toUpperCase()}
|
| 210 |
+
</span>
|
| 211 |
+
</div>
|
| 212 |
+
)}
|
| 213 |
+
{isFinal && (
|
| 214 |
+
<div className="prediction-indicator">
|
| 215 |
+
<div className="prediction-label">Predicted</div>
|
| 216 |
+
<div className="prediction-pick">{prediction.predicted_winner || 'N/A'}</div>
|
| 217 |
+
<span className={`badge ${game.prediction_correct ? 'badge-success' : 'badge-danger'}`} style={{ marginTop: 'var(--space-2)' }}>
|
| 218 |
+
{game.prediction_correct ? 'CORRECT' : 'WRONG'}
|
| 219 |
+
</span>
|
| 220 |
+
</div>
|
| 221 |
+
)}
|
| 222 |
+
</div>
|
| 223 |
+
|
| 224 |
+
{/* Home Team */}
|
| 225 |
+
<div className="team-block home">
|
| 226 |
+
<div className="team-info">
|
| 227 |
+
<TeamLogo abbrev={game.home_team} size="lg" />
|
| 228 |
+
<div className="team-details">
|
| 229 |
+
<span className="team-name">{game.home_team}</span>
|
| 230 |
+
<span className="team-record">{game.home_record || '0-0'}</span>
|
| 231 |
+
</div>
|
| 232 |
+
</div>
|
| 233 |
+
{(isLive || isFinal) ? (
|
| 234 |
+
<div className="team-score">{game.home_score}</div>
|
| 235 |
+
) : (
|
| 236 |
+
<div className="team-probability">{homeProb.toFixed(0)}%</div>
|
| 237 |
+
)}
|
| 238 |
+
</div>
|
| 239 |
+
</div>
|
| 240 |
+
|
| 241 |
+
{/* Probability Bar */}
|
| 242 |
+
{!isFinal && (
|
| 243 |
+
<div className="probability-bar-container">
|
| 244 |
+
<div className="probability-bar">
|
| 245 |
+
<div className="probability-fill-away" style={{ width: `${awayProb}%` }}></div>
|
| 246 |
+
<div className="probability-fill-home" style={{ width: `${homeProb}%` }}></div>
|
| 247 |
+
</div>
|
| 248 |
+
<div className="probability-labels">
|
| 249 |
+
<span>{game.away_team}: {awayProb.toFixed(1)}%</span>
|
| 250 |
+
<span>{game.home_team}: {homeProb.toFixed(1)}%</span>
|
| 251 |
+
</div>
|
| 252 |
+
</div>
|
| 253 |
+
)}
|
| 254 |
+
|
| 255 |
+
{/* Starting Lineups Toggle */}
|
| 256 |
+
{!isFinal && (
|
| 257 |
+
<div style={{ marginTop: 'var(--space-4)', borderTop: 'var(--border-subtle)', paddingTop: 'var(--space-4)' }}>
|
| 258 |
+
<button
|
| 259 |
+
className="btn btn-ghost btn-block"
|
| 260 |
+
onClick={fetchRosters}
|
| 261 |
+
disabled={loadingRosters}
|
| 262 |
+
>
|
| 263 |
+
{loadingRosters ? 'Loading...' : showRosters ? 'Hide Starting Lineups' : 'Show Projected Starting 5'}
|
| 264 |
+
</button>
|
| 265 |
+
|
| 266 |
+
{/* Rosters Display */}
|
| 267 |
+
{showRosters && (
|
| 268 |
+
<div style={{ marginTop: 'var(--space-4)', display: 'grid', gridTemplateColumns: '1fr 1fr', gap: 'var(--space-6)' }}>
|
| 269 |
+
{/* Away Team Roster */}
|
| 270 |
+
<div>
|
| 271 |
+
<div style={{ fontSize: '0.75rem', fontWeight: '600', textTransform: 'uppercase', letterSpacing: '0.05em', color: 'var(--text-muted)', marginBottom: 'var(--space-3)' }}>
|
| 272 |
+
{game.away_team} Starters
|
| 273 |
+
</div>
|
| 274 |
+
<div style={{ display: 'flex', flexDirection: 'column', gap: 'var(--space-2)' }}>
|
| 275 |
+
{awayRoster.length > 0 ? awayRoster.map((player, idx) => (
|
| 276 |
+
<div key={idx} style={{
|
| 277 |
+
display: 'flex',
|
| 278 |
+
justifyContent: 'space-between',
|
| 279 |
+
padding: 'var(--space-2) var(--space-3)',
|
| 280 |
+
background: 'var(--bg-elevated)',
|
| 281 |
+
borderRadius: 'var(--radius-sm)',
|
| 282 |
+
fontSize: '0.8125rem'
|
| 283 |
+
}}>
|
| 284 |
+
<span style={{ fontWeight: '500' }}>{idx + 1}. {player.name}</span>
|
| 285 |
+
<span style={{ color: 'var(--accent-primary)', fontFamily: 'var(--font-mono)' }}>{player.pts || 0} PPG</span>
|
| 286 |
+
</div>
|
| 287 |
+
)) : <span style={{ color: 'var(--text-muted)', fontSize: '0.8125rem' }}>Lineup unavailable</span>}
|
| 288 |
+
</div>
|
| 289 |
+
</div>
|
| 290 |
+
|
| 291 |
+
{/* Home Team Roster */}
|
| 292 |
+
<div>
|
| 293 |
+
<div style={{ fontSize: '0.75rem', fontWeight: '600', textTransform: 'uppercase', letterSpacing: '0.05em', color: 'var(--text-muted)', marginBottom: 'var(--space-3)' }}>
|
| 294 |
+
{game.home_team} Starters
|
| 295 |
+
</div>
|
| 296 |
+
<div style={{ display: 'flex', flexDirection: 'column', gap: 'var(--space-2)' }}>
|
| 297 |
+
{homeRoster.length > 0 ? homeRoster.map((player, idx) => (
|
| 298 |
+
<div key={idx} style={{
|
| 299 |
+
display: 'flex',
|
| 300 |
+
justifyContent: 'space-between',
|
| 301 |
+
padding: 'var(--space-2) var(--space-3)',
|
| 302 |
+
background: 'var(--bg-elevated)',
|
| 303 |
+
borderRadius: 'var(--radius-sm)',
|
| 304 |
+
fontSize: '0.8125rem'
|
| 305 |
+
}}>
|
| 306 |
+
<span style={{ fontWeight: '500' }}>{idx + 1}. {player.name}</span>
|
| 307 |
+
<span style={{ color: 'var(--accent-primary)', fontFamily: 'var(--font-mono)' }}>{player.pts || 0} PPG</span>
|
| 308 |
+
</div>
|
| 309 |
+
)) : <span style={{ color: 'var(--text-muted)', fontSize: '0.8125rem' }}>Lineup unavailable</span>}
|
| 310 |
+
</div>
|
| 311 |
+
</div>
|
| 312 |
+
</div>
|
| 313 |
+
)}
|
| 314 |
+
</div>
|
| 315 |
+
)}
|
| 316 |
+
</div>
|
| 317 |
+
)
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
export default LiveGames
|
web/src/pages/MvpRace.jsx
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { useState, useEffect } from 'react'
|
| 2 |
+
import { getMvpRace } from '../api'
|
| 3 |
+
import { TeamLogo } from '../teamLogos'
|
| 4 |
+
|
| 5 |
+
function MvpRace() {
|
| 6 |
+
const [candidates, setCandidates] = useState([])
|
| 7 |
+
const [loading, setLoading] = useState(true)
|
| 8 |
+
|
| 9 |
+
useEffect(() => {
|
| 10 |
+
getMvpRace()
|
| 11 |
+
.then(data => {
|
| 12 |
+
setCandidates(data.candidates || [])
|
| 13 |
+
setLoading(false)
|
| 14 |
+
})
|
| 15 |
+
.catch(err => {
|
| 16 |
+
console.error('Failed to load MVP race:', err)
|
| 17 |
+
setLoading(false)
|
| 18 |
+
})
|
| 19 |
+
}, [])
|
| 20 |
+
|
| 21 |
+
if (loading) {
|
| 22 |
+
return (
|
| 23 |
+
<div className="loading">
|
| 24 |
+
<div className="spinner"></div>
|
| 25 |
+
<p className="loading-text">Loading MVP race...</p>
|
| 26 |
+
</div>
|
| 27 |
+
)
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
const top3 = candidates.slice(0, 3)
|
| 31 |
+
const rest = candidates.slice(3)
|
| 32 |
+
|
| 33 |
+
return (
|
| 34 |
+
<div className="animate-fadeIn">
|
| 35 |
+
<div className="page-header">
|
| 36 |
+
<h1 className="page-title">MVP Race</h1>
|
| 37 |
+
<p className="page-description">2025-26 MVP candidates ranked by performance metrics</p>
|
| 38 |
+
</div>
|
| 39 |
+
|
| 40 |
+
{/* Top 3 Podium */}
|
| 41 |
+
{top3.length > 0 && (
|
| 42 |
+
<div style={{ display: 'grid', gridTemplateColumns: 'repeat(3, 1fr)', gap: 'var(--space-4)', marginBottom: 'var(--space-8)' }}>
|
| 43 |
+
{top3.map((player, idx) => {
|
| 44 |
+
const rankStyles = [
|
| 45 |
+
{ border: '1px solid #FFD700', boxShadow: '0 0 20px rgba(255, 215, 0, 0.1)' },
|
| 46 |
+
{ border: '1px solid #C0C0C0' },
|
| 47 |
+
{ border: '1px solid #CD7F32' },
|
| 48 |
+
]
|
| 49 |
+
const rankLabels = ['1ST', '2ND', '3RD']
|
| 50 |
+
|
| 51 |
+
return (
|
| 52 |
+
<div
|
| 53 |
+
key={player.name}
|
| 54 |
+
className="card animate-slideUp"
|
| 55 |
+
style={{
|
| 56 |
+
...rankStyles[idx],
|
| 57 |
+
textAlign: 'center',
|
| 58 |
+
animationDelay: `${idx * 0.1}s`
|
| 59 |
+
}}
|
| 60 |
+
>
|
| 61 |
+
<div style={{
|
| 62 |
+
fontSize: '0.6875rem',
|
| 63 |
+
fontWeight: '700',
|
| 64 |
+
letterSpacing: '0.1em',
|
| 65 |
+
color: idx === 0 ? '#FFD700' : idx === 1 ? '#C0C0C0' : '#CD7F32',
|
| 66 |
+
marginBottom: 'var(--space-4)'
|
| 67 |
+
}}>
|
| 68 |
+
{rankLabels[idx]}
|
| 69 |
+
</div>
|
| 70 |
+
|
| 71 |
+
<div style={{
|
| 72 |
+
width: '64px',
|
| 73 |
+
height: '64px',
|
| 74 |
+
borderRadius: '50%',
|
| 75 |
+
background: 'var(--bg-elevated)',
|
| 76 |
+
margin: '0 auto var(--space-3)',
|
| 77 |
+
display: 'flex',
|
| 78 |
+
alignItems: 'center',
|
| 79 |
+
justifyContent: 'center',
|
| 80 |
+
fontSize: '1.5rem',
|
| 81 |
+
fontWeight: '700',
|
| 82 |
+
color: 'var(--accent-primary)'
|
| 83 |
+
}}>
|
| 84 |
+
{player.name.split(' ').map(n => n[0]).join('')}
|
| 85 |
+
</div>
|
| 86 |
+
|
| 87 |
+
<h3 style={{ marginBottom: 'var(--space-2)' }}>{player.name}</h3>
|
| 88 |
+
<p style={{ color: 'var(--text-muted)', fontSize: '0.875rem', marginBottom: 'var(--space-4)' }}>
|
| 89 |
+
{player.ppg} PPG / {player.rpg} RPG / {player.apg} APG
|
| 90 |
+
</p>
|
| 91 |
+
|
| 92 |
+
<div style={{
|
| 93 |
+
fontSize: '2rem',
|
| 94 |
+
fontWeight: '700',
|
| 95 |
+
color: 'var(--accent-primary)',
|
| 96 |
+
marginBottom: 'var(--space-1)'
|
| 97 |
+
}}>
|
| 98 |
+
{player.mvp_score}
|
| 99 |
+
</div>
|
| 100 |
+
<div style={{ fontSize: '0.6875rem', color: 'var(--text-muted)', textTransform: 'uppercase', letterSpacing: '0.05em' }}>
|
| 101 |
+
MVP Score
|
| 102 |
+
</div>
|
| 103 |
+
</div>
|
| 104 |
+
)
|
| 105 |
+
})}
|
| 106 |
+
</div>
|
| 107 |
+
)}
|
| 108 |
+
|
| 109 |
+
{/* Full Rankings */}
|
| 110 |
+
{rest.length > 0 && (
|
| 111 |
+
<div className="table-container">
|
| 112 |
+
<table className="data-table">
|
| 113 |
+
<thead>
|
| 114 |
+
<tr>
|
| 115 |
+
<th style={{ width: '60px' }}>Rank</th>
|
| 116 |
+
<th>Player</th>
|
| 117 |
+
<th style={{ textAlign: 'right' }}>PPG</th>
|
| 118 |
+
<th style={{ textAlign: 'right' }}>RPG</th>
|
| 119 |
+
<th style={{ textAlign: 'right' }}>APG</th>
|
| 120 |
+
<th style={{ textAlign: 'right' }}>MVP Score</th>
|
| 121 |
+
</tr>
|
| 122 |
+
</thead>
|
| 123 |
+
<tbody>
|
| 124 |
+
{rest.map((player) => (
|
| 125 |
+
<tr key={player.name}>
|
| 126 |
+
<td style={{ fontWeight: '600', color: 'var(--text-muted)' }}>#{player.rank}</td>
|
| 127 |
+
<td style={{ fontWeight: '500' }}>{player.name}</td>
|
| 128 |
+
<td style={{ textAlign: 'right', fontFamily: 'var(--font-mono)' }}>{player.ppg}</td>
|
| 129 |
+
<td style={{ textAlign: 'right', fontFamily: 'var(--font-mono)' }}>{player.rpg}</td>
|
| 130 |
+
<td style={{ textAlign: 'right', fontFamily: 'var(--font-mono)' }}>{player.apg}</td>
|
| 131 |
+
<td style={{ textAlign: 'right', fontFamily: 'var(--font-mono)', color: 'var(--accent-primary)', fontWeight: '600' }}>
|
| 132 |
+
{player.mvp_score}
|
| 133 |
+
</td>
|
| 134 |
+
</tr>
|
| 135 |
+
))}
|
| 136 |
+
</tbody>
|
| 137 |
+
</table>
|
| 138 |
+
</div>
|
| 139 |
+
)}
|
| 140 |
+
</div>
|
| 141 |
+
)
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
export default MvpRace
|
web/src/pages/PlayerStats.jsx
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { useState } from 'react'
|
| 2 |
+
import { IconSearch } from '../icons'
|
| 3 |
+
|
| 4 |
+
// Simulated player data - in production this would come from API
|
| 5 |
+
const MOCK_PLAYERS = [
|
| 6 |
+
{ id: 1, name: 'LeBron James', team: 'LAL', ppg: 25.4, rpg: 7.2, apg: 8.1, fg_pct: 54.2, position: 'SF' },
|
| 7 |
+
{ id: 2, name: 'Stephen Curry', team: 'GSW', ppg: 26.8, rpg: 4.5, apg: 5.2, fg_pct: 45.1, position: 'PG' },
|
| 8 |
+
{ id: 3, name: 'Giannis Antetokounmpo', team: 'MIL', ppg: 31.2, rpg: 11.5, apg: 5.8, fg_pct: 61.2, position: 'PF' },
|
| 9 |
+
{ id: 4, name: 'Nikola Jokic', team: 'DEN', ppg: 26.5, rpg: 12.2, apg: 9.1, fg_pct: 58.3, position: 'C' },
|
| 10 |
+
{ id: 5, name: 'Jayson Tatum', team: 'BOS', ppg: 27.0, rpg: 8.1, apg: 4.6, fg_pct: 47.1, position: 'SF' },
|
| 11 |
+
{ id: 6, name: 'Luka Doncic', team: 'DAL', ppg: 33.2, rpg: 9.1, apg: 9.5, fg_pct: 48.7, position: 'PG' },
|
| 12 |
+
{ id: 7, name: 'Kevin Durant', team: 'PHX', ppg: 27.5, rpg: 6.5, apg: 5.2, fg_pct: 52.4, position: 'SF' },
|
| 13 |
+
{ id: 8, name: 'Joel Embiid', team: 'PHI', ppg: 34.1, rpg: 11.0, apg: 5.7, fg_pct: 54.8, position: 'C' },
|
| 14 |
+
{ id: 9, name: 'Shai Gilgeous-Alexander', team: 'OKC', ppg: 31.5, rpg: 5.5, apg: 6.2, fg_pct: 53.5, position: 'SG' },
|
| 15 |
+
{ id: 10, name: 'Anthony Edwards', team: 'MIN', ppg: 26.0, rpg: 5.8, apg: 5.0, fg_pct: 46.2, position: 'SG' },
|
| 16 |
+
];
|
| 17 |
+
|
| 18 |
+
function PlayerStats() {
|
| 19 |
+
const [searchQuery, setSearchQuery] = useState('')
|
| 20 |
+
const [selectedPlayer, setSelectedPlayer] = useState(null)
|
| 21 |
+
|
| 22 |
+
const filteredPlayers = MOCK_PLAYERS.filter(player =>
|
| 23 |
+
player.name.toLowerCase().includes(searchQuery.toLowerCase()) ||
|
| 24 |
+
player.team.toLowerCase().includes(searchQuery.toLowerCase())
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
return (
|
| 28 |
+
<div className="animate-fadeIn">
|
| 29 |
+
<div className="page-header">
|
| 30 |
+
<h1 className="page-title">Player Stats</h1>
|
| 31 |
+
<p className="page-description">Search and compare NBA player statistics</p>
|
| 32 |
+
</div>
|
| 33 |
+
|
| 34 |
+
{/* Search */}
|
| 35 |
+
<div className="card" style={{ marginBottom: 'var(--space-6)' }}>
|
| 36 |
+
<div style={{ position: 'relative' }}>
|
| 37 |
+
<IconSearch className="nav-icon" style={{
|
| 38 |
+
position: 'absolute',
|
| 39 |
+
left: 'var(--space-4)',
|
| 40 |
+
top: '50%',
|
| 41 |
+
transform: 'translateY(-50%)',
|
| 42 |
+
color: 'var(--text-muted)'
|
| 43 |
+
}} />
|
| 44 |
+
<input
|
| 45 |
+
type="text"
|
| 46 |
+
className="form-input"
|
| 47 |
+
placeholder="Search players or teams..."
|
| 48 |
+
value={searchQuery}
|
| 49 |
+
onChange={(e) => setSearchQuery(e.target.value)}
|
| 50 |
+
style={{ paddingLeft: 'var(--space-10)' }}
|
| 51 |
+
/>
|
| 52 |
+
</div>
|
| 53 |
+
</div>
|
| 54 |
+
|
| 55 |
+
<div style={{ display: 'grid', gridTemplateColumns: selectedPlayer ? '1fr 1fr' : '1fr', gap: 'var(--space-6)' }}>
|
| 56 |
+
{/* Player List */}
|
| 57 |
+
<div className="table-container">
|
| 58 |
+
<table className="data-table">
|
| 59 |
+
<thead>
|
| 60 |
+
<tr>
|
| 61 |
+
<th>Player</th>
|
| 62 |
+
<th>Team</th>
|
| 63 |
+
<th>Pos</th>
|
| 64 |
+
<th style={{ textAlign: 'right' }}>PPG</th>
|
| 65 |
+
<th style={{ textAlign: 'right' }}>RPG</th>
|
| 66 |
+
<th style={{ textAlign: 'right' }}>APG</th>
|
| 67 |
+
</tr>
|
| 68 |
+
</thead>
|
| 69 |
+
<tbody>
|
| 70 |
+
{filteredPlayers.map((player) => (
|
| 71 |
+
<tr
|
| 72 |
+
key={player.id}
|
| 73 |
+
onClick={() => setSelectedPlayer(player)}
|
| 74 |
+
style={{ cursor: 'pointer', background: selectedPlayer?.id === player.id ? 'var(--bg-elevated)' : undefined }}
|
| 75 |
+
>
|
| 76 |
+
<td style={{ fontWeight: '500' }}>{player.name}</td>
|
| 77 |
+
<td style={{ color: 'var(--text-muted)' }}>{player.team}</td>
|
| 78 |
+
<td style={{ color: 'var(--text-muted)' }}>{player.position}</td>
|
| 79 |
+
<td style={{ textAlign: 'right', fontFamily: 'var(--font-mono)', color: 'var(--accent-primary)' }}>{player.ppg}</td>
|
| 80 |
+
<td style={{ textAlign: 'right', fontFamily: 'var(--font-mono)' }}>{player.rpg}</td>
|
| 81 |
+
<td style={{ textAlign: 'right', fontFamily: 'var(--font-mono)' }}>{player.apg}</td>
|
| 82 |
+
</tr>
|
| 83 |
+
))}
|
| 84 |
+
</tbody>
|
| 85 |
+
</table>
|
| 86 |
+
</div>
|
| 87 |
+
|
| 88 |
+
{/* Player Detail Card */}
|
| 89 |
+
{selectedPlayer && (
|
| 90 |
+
<div className="card animate-slideUp">
|
| 91 |
+
<div style={{ textAlign: 'center', marginBottom: 'var(--space-6)' }}>
|
| 92 |
+
<div style={{
|
| 93 |
+
width: '80px',
|
| 94 |
+
height: '80px',
|
| 95 |
+
borderRadius: '50%',
|
| 96 |
+
background: 'var(--bg-elevated)',
|
| 97 |
+
margin: '0 auto var(--space-4)',
|
| 98 |
+
display: 'flex',
|
| 99 |
+
alignItems: 'center',
|
| 100 |
+
justifyContent: 'center',
|
| 101 |
+
fontSize: '2rem',
|
| 102 |
+
fontWeight: '700',
|
| 103 |
+
color: 'var(--accent-primary)'
|
| 104 |
+
}}>
|
| 105 |
+
{selectedPlayer.name.split(' ').map(n => n[0]).join('')}
|
| 106 |
+
</div>
|
| 107 |
+
<h2 style={{ marginBottom: 'var(--space-2)' }}>{selectedPlayer.name}</h2>
|
| 108 |
+
<p style={{ color: 'var(--text-muted)' }}>
|
| 109 |
+
{selectedPlayer.team} • {selectedPlayer.position}
|
| 110 |
+
</p>
|
| 111 |
+
</div>
|
| 112 |
+
|
| 113 |
+
<div className="stats-grid" style={{ gridTemplateColumns: '1fr 1fr' }}>
|
| 114 |
+
<div className="stat-card">
|
| 115 |
+
<div className="stat-value accent">{selectedPlayer.ppg}</div>
|
| 116 |
+
<div className="stat-label">Points Per Game</div>
|
| 117 |
+
</div>
|
| 118 |
+
<div className="stat-card">
|
| 119 |
+
<div className="stat-value">{selectedPlayer.rpg}</div>
|
| 120 |
+
<div className="stat-label">Rebounds Per Game</div>
|
| 121 |
+
</div>
|
| 122 |
+
<div className="stat-card">
|
| 123 |
+
<div className="stat-value">{selectedPlayer.apg}</div>
|
| 124 |
+
<div className="stat-label">Assists Per Game</div>
|
| 125 |
+
</div>
|
| 126 |
+
<div className="stat-card">
|
| 127 |
+
<div className="stat-value">{selectedPlayer.fg_pct}%</div>
|
| 128 |
+
<div className="stat-label">Field Goal %</div>
|
| 129 |
+
</div>
|
| 130 |
+
</div>
|
| 131 |
+
|
| 132 |
+
<div style={{ marginTop: 'var(--space-4)', fontSize: '0.75rem', color: 'var(--text-muted)', textAlign: 'center' }}>
|
| 133 |
+
Note: Player stats are currently mock data. Connect to NBA API for live stats.
|
| 134 |
+
</div>
|
| 135 |
+
</div>
|
| 136 |
+
)}
|
| 137 |
+
</div>
|
| 138 |
+
</div>
|
| 139 |
+
)
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
export default PlayerStats
|
web/src/pages/Predictions.jsx
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { useState, useEffect } from 'react'
|
| 2 |
+
import { predictGame, getTeams } from '../api'
|
| 3 |
+
import { TeamLogo, getTeamName } from '../teamLogos'
|
| 4 |
+
|
| 5 |
+
function Predictions() {
|
| 6 |
+
const [teams, setTeams] = useState([])
|
| 7 |
+
const [homeTeam, setHomeTeam] = useState('LAL')
|
| 8 |
+
const [awayTeam, setAwayTeam] = useState('BOS')
|
| 9 |
+
const [prediction, setPrediction] = useState(null)
|
| 10 |
+
const [loading, setLoading] = useState(false)
|
| 11 |
+
|
| 12 |
+
useEffect(() => {
|
| 13 |
+
getTeams().then(data => {
|
| 14 |
+
setTeams(data.teams || [])
|
| 15 |
+
}).catch(console.error)
|
| 16 |
+
}, [])
|
| 17 |
+
|
| 18 |
+
const handlePredict = async () => {
|
| 19 |
+
if (!homeTeam || !awayTeam || homeTeam === awayTeam) return
|
| 20 |
+
|
| 21 |
+
setLoading(true)
|
| 22 |
+
try {
|
| 23 |
+
const result = await predictGame(homeTeam, awayTeam)
|
| 24 |
+
setPrediction(result)
|
| 25 |
+
} catch (err) {
|
| 26 |
+
console.error('Prediction failed:', err)
|
| 27 |
+
} finally {
|
| 28 |
+
setLoading(false)
|
| 29 |
+
}
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
const homeProb = prediction ? (prediction.home_win_probability * 100) : 50
|
| 33 |
+
const awayProb = prediction ? (prediction.away_win_probability * 100) : 50
|
| 34 |
+
|
| 35 |
+
return (
|
| 36 |
+
<div className="animate-fadeIn">
|
| 37 |
+
<div className="page-header">
|
| 38 |
+
<h1 className="page-title">Game Predictions</h1>
|
| 39 |
+
<p className="page-description">Select teams to get AI-powered win probabilities</p>
|
| 40 |
+
</div>
|
| 41 |
+
|
| 42 |
+
{/* Team Selector */}
|
| 43 |
+
<div className="card" style={{ marginBottom: 'var(--space-6)' }}>
|
| 44 |
+
<div style={{ display: 'grid', gridTemplateColumns: '1fr auto 1fr', gap: 'var(--space-6)', alignItems: 'end' }}>
|
| 45 |
+
{/* Away Team */}
|
| 46 |
+
<div className="form-group" style={{ marginBottom: 0 }}>
|
| 47 |
+
<label className="form-label">Away Team</label>
|
| 48 |
+
<div style={{ display: 'flex', alignItems: 'center', gap: 'var(--space-3)' }}>
|
| 49 |
+
<TeamLogo abbrev={awayTeam} size="md" />
|
| 50 |
+
<select
|
| 51 |
+
className="form-select"
|
| 52 |
+
value={awayTeam}
|
| 53 |
+
onChange={(e) => setAwayTeam(e.target.value)}
|
| 54 |
+
>
|
| 55 |
+
{teams.map(team => (
|
| 56 |
+
<option key={team.id} value={team.abbrev}>{team.abbrev} - {getTeamName(team.abbrev)}</option>
|
| 57 |
+
))}
|
| 58 |
+
</select>
|
| 59 |
+
</div>
|
| 60 |
+
</div>
|
| 61 |
+
|
| 62 |
+
<div style={{ color: 'var(--text-dim)', fontWeight: '600', paddingBottom: 'var(--space-3)' }}>@</div>
|
| 63 |
+
|
| 64 |
+
{/* Home Team */}
|
| 65 |
+
<div className="form-group" style={{ marginBottom: 0 }}>
|
| 66 |
+
<label className="form-label">Home Team</label>
|
| 67 |
+
<div style={{ display: 'flex', alignItems: 'center', gap: 'var(--space-3)' }}>
|
| 68 |
+
<TeamLogo abbrev={homeTeam} size="md" />
|
| 69 |
+
<select
|
| 70 |
+
className="form-select"
|
| 71 |
+
value={homeTeam}
|
| 72 |
+
onChange={(e) => setHomeTeam(e.target.value)}
|
| 73 |
+
>
|
| 74 |
+
{teams.map(team => (
|
| 75 |
+
<option key={team.id} value={team.abbrev}>{team.abbrev} - {getTeamName(team.abbrev)}</option>
|
| 76 |
+
))}
|
| 77 |
+
</select>
|
| 78 |
+
</div>
|
| 79 |
+
</div>
|
| 80 |
+
</div>
|
| 81 |
+
|
| 82 |
+
<button
|
| 83 |
+
className="btn btn-primary btn-lg btn-block"
|
| 84 |
+
onClick={handlePredict}
|
| 85 |
+
disabled={loading || homeTeam === awayTeam}
|
| 86 |
+
style={{ marginTop: 'var(--space-6)' }}
|
| 87 |
+
>
|
| 88 |
+
{loading ? 'Analyzing...' : 'Generate Prediction'}
|
| 89 |
+
</button>
|
| 90 |
+
</div>
|
| 91 |
+
|
| 92 |
+
{/* Prediction Result */}
|
| 93 |
+
{prediction && (
|
| 94 |
+
<div className="card animate-slideUp">
|
| 95 |
+
<div style={{ display: 'grid', gridTemplateColumns: '1fr auto 1fr', gap: 'var(--space-8)', alignItems: 'center' }}>
|
| 96 |
+
{/* Away Team */}
|
| 97 |
+
<div style={{ textAlign: 'center' }}>
|
| 98 |
+
<TeamLogo abbrev={prediction.away_team} size="xl" />
|
| 99 |
+
<div style={{ marginTop: 'var(--space-3)' }}>
|
| 100 |
+
<div style={{ fontSize: '1.25rem', fontWeight: '600' }}>{prediction.away_team}</div>
|
| 101 |
+
<div style={{ fontSize: '0.75rem', color: 'var(--text-muted)' }}>
|
| 102 |
+
ELO: {prediction.away_elo?.toFixed(0) || 'N/A'}
|
| 103 |
+
</div>
|
| 104 |
+
</div>
|
| 105 |
+
<div style={{ fontSize: '2.5rem', fontWeight: '700', color: 'var(--accent-secondary)', marginTop: 'var(--space-4)' }}>
|
| 106 |
+
{awayProb.toFixed(1)}%
|
| 107 |
+
</div>
|
| 108 |
+
</div>
|
| 109 |
+
|
| 110 |
+
{/* Prediction Center */}
|
| 111 |
+
<div style={{ textAlign: 'center' }}>
|
| 112 |
+
<div style={{ fontSize: '0.6875rem', fontWeight: '600', textTransform: 'uppercase', letterSpacing: '0.1em', color: 'var(--text-muted)', marginBottom: 'var(--space-2)' }}>
|
| 113 |
+
Predicted Winner
|
| 114 |
+
</div>
|
| 115 |
+
<div style={{ fontSize: '1.75rem', fontWeight: '700', color: 'var(--accent-primary)' }}>
|
| 116 |
+
{prediction.predicted_winner}
|
| 117 |
+
</div>
|
| 118 |
+
<span className={`badge confidence-${prediction.confidence}`} style={{ marginTop: 'var(--space-3)' }}>
|
| 119 |
+
{prediction.confidence?.toUpperCase()} CONFIDENCE
|
| 120 |
+
</span>
|
| 121 |
+
<div style={{ marginTop: 'var(--space-4)', fontSize: '0.75rem', color: 'var(--text-muted)' }}>
|
| 122 |
+
ELO Difference: {prediction.elo_diff > 0 ? '+' : ''}{prediction.elo_diff?.toFixed(0)}
|
| 123 |
+
</div>
|
| 124 |
+
</div>
|
| 125 |
+
|
| 126 |
+
{/* Home Team */}
|
| 127 |
+
<div style={{ textAlign: 'center' }}>
|
| 128 |
+
<TeamLogo abbrev={prediction.home_team} size="xl" />
|
| 129 |
+
<div style={{ marginTop: 'var(--space-3)' }}>
|
| 130 |
+
<div style={{ fontSize: '1.25rem', fontWeight: '600' }}>{prediction.home_team}</div>
|
| 131 |
+
<div style={{ fontSize: '0.75rem', color: 'var(--text-muted)' }}>
|
| 132 |
+
ELO: {prediction.home_elo?.toFixed(0) || 'N/A'}
|
| 133 |
+
</div>
|
| 134 |
+
</div>
|
| 135 |
+
<div style={{ fontSize: '2.5rem', fontWeight: '700', color: 'var(--accent-primary)', marginTop: 'var(--space-4)' }}>
|
| 136 |
+
{homeProb.toFixed(1)}%
|
| 137 |
+
</div>
|
| 138 |
+
</div>
|
| 139 |
+
</div>
|
| 140 |
+
|
| 141 |
+
{/* Probability Bar */}
|
| 142 |
+
<div className="probability-bar-container" style={{ marginTop: 'var(--space-8)' }}>
|
| 143 |
+
<div className="probability-bar" style={{ height: '8px' }}>
|
| 144 |
+
<div className="probability-fill-away" style={{ width: `${awayProb}%` }}></div>
|
| 145 |
+
<div className="probability-fill-home" style={{ width: `${homeProb}%` }}></div>
|
| 146 |
+
</div>
|
| 147 |
+
<div className="probability-labels" style={{ marginTop: 'var(--space-3)' }}>
|
| 148 |
+
<span>{prediction.away_team}: {awayProb.toFixed(1)}%</span>
|
| 149 |
+
<span>{prediction.home_team}: {homeProb.toFixed(1)}%</span>
|
| 150 |
+
</div>
|
| 151 |
+
</div>
|
| 152 |
+
|
| 153 |
+
{/* Factors */}
|
| 154 |
+
{prediction.factors && prediction.factors.length > 0 && (
|
| 155 |
+
<div style={{ marginTop: 'var(--space-6)', paddingTop: 'var(--space-6)', borderTop: 'var(--border-subtle)' }}>
|
| 156 |
+
<h4 style={{ marginBottom: 'var(--space-4)', color: 'var(--text-secondary)' }}>Key Factors</h4>
|
| 157 |
+
<ul style={{ listStyle: 'none' }}>
|
| 158 |
+
{prediction.factors.map((factor, idx) => (
|
| 159 |
+
<li key={idx} style={{
|
| 160 |
+
padding: 'var(--space-2) 0',
|
| 161 |
+
color: 'var(--text-muted)',
|
| 162 |
+
fontSize: '0.875rem'
|
| 163 |
+
}}>
|
| 164 |
+
• {factor}
|
| 165 |
+
</li>
|
| 166 |
+
))}
|
| 167 |
+
</ul>
|
| 168 |
+
</div>
|
| 169 |
+
)}
|
| 170 |
+
</div>
|
| 171 |
+
)}
|
| 172 |
+
</div>
|
| 173 |
+
)
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
export default Predictions
|
web/src/pages/Standings.jsx
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { useState, useEffect } from 'react'
|
| 2 |
+
import { TeamLogo, getTeamName } from '../teamLogos'
|
| 3 |
+
|
| 4 |
+
// API call for standings
|
| 5 |
+
async function getStandings() {
|
| 6 |
+
const response = await fetch('http://localhost:8000/api/standings')
|
| 7 |
+
return response.json()
|
| 8 |
+
}
|
| 9 |
+
|
| 10 |
+
function Standings() {
|
| 11 |
+
const [standings, setStandings] = useState({ east: [], west: [] })
|
| 12 |
+
const [loading, setLoading] = useState(true)
|
| 13 |
+
const [activeConference, setActiveConference] = useState('east')
|
| 14 |
+
|
| 15 |
+
useEffect(() => {
|
| 16 |
+
getStandings()
|
| 17 |
+
.then(data => {
|
| 18 |
+
setStandings(data)
|
| 19 |
+
setLoading(false)
|
| 20 |
+
})
|
| 21 |
+
.catch(err => {
|
| 22 |
+
console.error('Failed to load standings:', err)
|
| 23 |
+
setLoading(false)
|
| 24 |
+
})
|
| 25 |
+
}, [])
|
| 26 |
+
|
| 27 |
+
if (loading) {
|
| 28 |
+
return (
|
| 29 |
+
<div className="loading">
|
| 30 |
+
<div className="spinner"></div>
|
| 31 |
+
<p className="loading-text">Loading standings...</p>
|
| 32 |
+
</div>
|
| 33 |
+
)
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
const currentStandings = activeConference === 'east' ? standings.east : standings.west
|
| 37 |
+
|
| 38 |
+
return (
|
| 39 |
+
<div className="animate-fadeIn">
|
| 40 |
+
<div className="page-header">
|
| 41 |
+
<h1 className="page-title">Season Standings</h1>
|
| 42 |
+
<p className="page-description">2025-26 NBA Conference standings</p>
|
| 43 |
+
</div>
|
| 44 |
+
|
| 45 |
+
{/* Conference Tabs */}
|
| 46 |
+
<div style={{ display: 'flex', gap: 'var(--space-2)', marginBottom: 'var(--space-6)' }}>
|
| 47 |
+
<button
|
| 48 |
+
className={`btn ${activeConference === 'east' ? 'btn-primary' : 'btn-secondary'}`}
|
| 49 |
+
onClick={() => setActiveConference('east')}
|
| 50 |
+
>
|
| 51 |
+
Eastern Conference
|
| 52 |
+
</button>
|
| 53 |
+
<button
|
| 54 |
+
className={`btn ${activeConference === 'west' ? 'btn-primary' : 'btn-secondary'}`}
|
| 55 |
+
onClick={() => setActiveConference('west')}
|
| 56 |
+
>
|
| 57 |
+
Western Conference
|
| 58 |
+
</button>
|
| 59 |
+
</div>
|
| 60 |
+
|
| 61 |
+
{/* Standings Table */}
|
| 62 |
+
<div className="table-container">
|
| 63 |
+
<table className="data-table">
|
| 64 |
+
<thead>
|
| 65 |
+
<tr>
|
| 66 |
+
<th style={{ width: '50px' }}>Rank</th>
|
| 67 |
+
<th>Team</th>
|
| 68 |
+
<th style={{ textAlign: 'center' }}>W</th>
|
| 69 |
+
<th style={{ textAlign: 'center' }}>L</th>
|
| 70 |
+
<th style={{ textAlign: 'center' }}>PCT</th>
|
| 71 |
+
<th style={{ textAlign: 'center' }}>GB</th>
|
| 72 |
+
<th style={{ textAlign: 'center' }}>Streak</th>
|
| 73 |
+
</tr>
|
| 74 |
+
</thead>
|
| 75 |
+
<tbody>
|
| 76 |
+
{currentStandings?.length > 0 ? currentStandings.map((team, idx) => (
|
| 77 |
+
<tr key={team.team || idx}>
|
| 78 |
+
<td style={{ fontWeight: '600', color: idx < 6 ? 'var(--accent-success)' : idx < 10 ? 'var(--text-secondary)' : 'var(--text-muted)' }}>
|
| 79 |
+
{idx + 1}
|
| 80 |
+
</td>
|
| 81 |
+
<td>
|
| 82 |
+
<div className="table-team">
|
| 83 |
+
<TeamLogo abbrev={team.team_abbrev || team.team} size="sm" />
|
| 84 |
+
<span style={{ fontWeight: '500' }}>{team.team_name || getTeamName(team.team_abbrev || team.team)}</span>
|
| 85 |
+
</div>
|
| 86 |
+
</td>
|
| 87 |
+
<td style={{ textAlign: 'center', fontFamily: 'var(--font-mono)' }}>{team.wins || 0}</td>
|
| 88 |
+
<td style={{ textAlign: 'center', fontFamily: 'var(--font-mono)' }}>{team.losses || 0}</td>
|
| 89 |
+
<td style={{ textAlign: 'center', fontFamily: 'var(--font-mono)', color: 'var(--accent-primary)' }}>
|
| 90 |
+
{((team.win_pct || 0) * 100).toFixed(1)}%
|
| 91 |
+
</td>
|
| 92 |
+
<td style={{ textAlign: 'center', fontFamily: 'var(--font-mono)', color: 'var(--text-muted)' }}>
|
| 93 |
+
{team.gb || '-'}
|
| 94 |
+
</td>
|
| 95 |
+
<td style={{ textAlign: 'center' }}>
|
| 96 |
+
<span className={`badge ${team.streak?.includes('W') ? 'badge-success' : 'badge-danger'}`}>
|
| 97 |
+
{team.streak || '-'}
|
| 98 |
+
</span>
|
| 99 |
+
</td>
|
| 100 |
+
</tr>
|
| 101 |
+
)) : (
|
| 102 |
+
<tr>
|
| 103 |
+
<td colSpan="7" style={{ textAlign: 'center', padding: 'var(--space-8)', color: 'var(--text-muted)' }}>
|
| 104 |
+
No standings data available. API endpoint may need to be added.
|
| 105 |
+
</td>
|
| 106 |
+
</tr>
|
| 107 |
+
)}
|
| 108 |
+
</tbody>
|
| 109 |
+
</table>
|
| 110 |
+
</div>
|
| 111 |
+
</div>
|
| 112 |
+
)
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
export default Standings
|