Spaces:
Running
Running
James McCool
commited on
Commit
·
e4858f7
1
Parent(s):
832f614
Initial commit and update
Browse files- .streamlit/secrets.toml +1 -0
- Dockerfile +14 -0
- requirements.txt +8 -3
- src/streamlit_app.py +140 -36
.streamlit/secrets.toml
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
NBA_DATA="https://sheetdb.io/api/v1/mh9ep4ovqs45h"
|
Dockerfile
CHANGED
|
@@ -5,11 +5,25 @@ WORKDIR /app
|
|
| 5 |
RUN apt-get update && apt-get install -y \
|
| 6 |
build-essential \
|
| 7 |
curl \
|
|
|
|
| 8 |
git \
|
| 9 |
&& rm -rf /var/lib/apt/lists/*
|
| 10 |
|
| 11 |
COPY requirements.txt ./
|
| 12 |
COPY src/ ./src/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
RUN pip3 install -r requirements.txt
|
| 15 |
|
|
|
|
| 5 |
RUN apt-get update && apt-get install -y \
|
| 6 |
build-essential \
|
| 7 |
curl \
|
| 8 |
+
software-properties-common \
|
| 9 |
git \
|
| 10 |
&& rm -rf /var/lib/apt/lists/*
|
| 11 |
|
| 12 |
COPY requirements.txt ./
|
| 13 |
COPY src/ ./src/
|
| 14 |
+
COPY .streamlit/ ./.streamlit/
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
ENV MONGO_URI="mongodb+srv://multichem:Xr1q5wZdXPbxdUmJ@testcluster.lgwtp5i.mongodb.net/?retryWrites=true&w=majority&appName=TestCluster"
|
| 19 |
+
ENV NBA_DATA="https://sheetdb.io/api/v1/mh9ep4ovqs45h"
|
| 20 |
+
RUN useradd -m -u 1000 user
|
| 21 |
+
USER user
|
| 22 |
+
ENV HOME=/home/user\
|
| 23 |
+
PATH=/home/user/.local/bin:$PATH
|
| 24 |
+
WORKDIR $HOME/app
|
| 25 |
+
RUN pip install --no-cache-dir --upgrade pip
|
| 26 |
+
COPY --chown=user . $HOME/app
|
| 27 |
|
| 28 |
RUN pip3 install -r requirements.txt
|
| 29 |
|
requirements.txt
CHANGED
|
@@ -1,3 +1,8 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
openpyxl
|
| 3 |
+
matplotlib
|
| 4 |
+
pulp
|
| 5 |
+
docker
|
| 6 |
+
plotly
|
| 7 |
+
scipy
|
| 8 |
+
pymongo
|
src/streamlit_app.py
CHANGED
|
@@ -1,40 +1,144 @@
|
|
| 1 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
import numpy as np
|
| 3 |
import pandas as pd
|
| 4 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
""
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
st.set_page_config(layout="wide")
|
| 3 |
+
|
| 4 |
+
for name in dir():
|
| 5 |
+
if not name.startswith('_'):
|
| 6 |
+
del globals()[name]
|
| 7 |
+
|
| 8 |
import numpy as np
|
| 9 |
import pandas as pd
|
| 10 |
import streamlit as st
|
| 11 |
+
import os
|
| 12 |
+
import requests
|
| 13 |
+
|
| 14 |
+
NBA_DATA = os.getenv('NBA_DATA')
|
| 15 |
+
|
| 16 |
+
percentages_format = {'Pts% Boost': '{:.2%}', 'Reb% Boost': '{:.2%}', 'Ast% Boost': '{:.2%}', '3p% Boost': '{:.2%}',
|
| 17 |
+
'Stl Boost%': '{:.2%}', 'Blk Boost%': '{:.2%}', 'TOV Boost%': '{:.2%}', 'FPPM Boost': '{:.2%}',
|
| 18 |
+
'Team FPPM Boost': '{:.2%}'}
|
| 19 |
+
|
| 20 |
+
@st.cache_resource(ttl = 600)
|
| 21 |
+
def init_baselines():
|
| 22 |
+
|
| 23 |
+
json_matchups = requests.get(NBA_DATA + '?sheet=DEM%20Matchups').json()
|
| 24 |
+
raw_display = pd.DataFrame(json_matchups)
|
| 25 |
+
raw_display = raw_display.reset_index(drop=True)
|
| 26 |
+
matchups = raw_display[raw_display['Var'] != ""]
|
| 27 |
+
matchups_dict = dict(zip(matchups['Team'], matchups['Opp']))
|
| 28 |
+
|
| 29 |
+
json_pg_dem = requests.get(NBA_DATA + '?sheet=PG_DEM_Calc').json()
|
| 30 |
+
raw_display = pd.DataFrame(json_pg_dem)
|
| 31 |
+
raw_display = raw_display.reset_index(drop=True)
|
| 32 |
+
cols_to_check = ['Pts% Boost', 'Reb% Boost', 'Ast% Boost', '3p% Boost', 'Stl Boost%', 'Blk Boost%', 'TOV Boost%', 'FPPM Boost']
|
| 33 |
+
raw_display.loc[:, cols_to_check] = raw_display.loc[:, cols_to_check].replace({'%': ''}, regex=True).astype(float) / 100
|
| 34 |
+
raw_display = raw_display.apply(pd.to_numeric, errors='coerce').fillna(raw_display)
|
| 35 |
+
raw_display['position'] = 'Point Guard'
|
| 36 |
+
pg_dem = raw_display[raw_display['Acro'] != ""]
|
| 37 |
+
|
| 38 |
+
json_sg_dem = requests.get(NBA_DATA + '?sheet=SG_DEM_Calc').json()
|
| 39 |
+
raw_display = pd.DataFrame(json_sg_dem)
|
| 40 |
+
raw_display = raw_display.reset_index(drop=True)
|
| 41 |
+
cols_to_check = ['Pts% Boost', 'Reb% Boost', 'Ast% Boost', '3p% Boost', 'Stl Boost%', 'Blk Boost%', 'TOV Boost%', 'FPPM Boost']
|
| 42 |
+
raw_display.loc[:, cols_to_check] = raw_display.loc[:, cols_to_check].replace({'%': ''}, regex=True).astype(float) / 100
|
| 43 |
+
raw_display = raw_display.apply(pd.to_numeric, errors='coerce').fillna(raw_display)
|
| 44 |
+
raw_display['position'] = 'Shooting Guard'
|
| 45 |
+
sg_dem = raw_display[raw_display['Acro'] != ""]
|
| 46 |
+
|
| 47 |
+
json_sf_dem = requests.get(NBA_DATA + '?sheet=SF_DEM_Calc').json()
|
| 48 |
+
raw_display = pd.DataFrame(json_sf_dem)
|
| 49 |
+
raw_display = raw_display.reset_index(drop=True)
|
| 50 |
+
cols_to_check = ['Pts% Boost', 'Reb% Boost', 'Ast% Boost', '3p% Boost', 'Stl Boost%', 'Blk Boost%', 'TOV Boost%', 'FPPM Boost']
|
| 51 |
+
raw_display.loc[:, cols_to_check] = raw_display.loc[:, cols_to_check].replace({'%': ''}, regex=True).astype(float) / 100
|
| 52 |
+
raw_display = raw_display.apply(pd.to_numeric, errors='coerce').fillna(raw_display)
|
| 53 |
+
raw_display['position'] = 'Small Forward'
|
| 54 |
+
sf_dem = raw_display[raw_display['Acro'] != ""]
|
| 55 |
+
|
| 56 |
+
json_pf_dem = requests.get(NBA_DATA + '?sheet=PF_DEM_Calc').json()
|
| 57 |
+
raw_display = pd.DataFrame(json_pf_dem)
|
| 58 |
+
raw_display = raw_display.reset_index(drop=True)
|
| 59 |
+
cols_to_check = ['Pts% Boost', 'Reb% Boost', 'Ast% Boost', '3p% Boost', 'Stl Boost%', 'Blk Boost%', 'TOV Boost%', 'FPPM Boost']
|
| 60 |
+
raw_display.loc[:, cols_to_check] = raw_display.loc[:, cols_to_check].replace({'%': ''}, regex=True).astype(float) / 100
|
| 61 |
+
raw_display = raw_display.apply(pd.to_numeric, errors='coerce').fillna(raw_display)
|
| 62 |
+
raw_display['position'] = 'Power Forward'
|
| 63 |
+
pf_dem = raw_display[raw_display['Acro'] != ""]
|
| 64 |
+
|
| 65 |
+
json_c_dem = requests.get(NBA_DATA + '?sheet=C_DEM_Calc').json()
|
| 66 |
+
raw_display = pd.DataFrame(json_c_dem)
|
| 67 |
+
raw_display = raw_display.reset_index(drop=True)
|
| 68 |
+
cols_to_check = ['Pts% Boost', 'Reb% Boost', 'Ast% Boost', '3p% Boost', 'Stl Boost%', 'Blk Boost%', 'TOV Boost%', 'FPPM Boost']
|
| 69 |
+
raw_display.loc[:, cols_to_check] = raw_display.loc[:, cols_to_check].replace({'%': ''}, regex=True).astype(float) / 100
|
| 70 |
+
raw_display = raw_display.apply(pd.to_numeric, errors='coerce').fillna(raw_display)
|
| 71 |
+
raw_display['position'] = 'Center'
|
| 72 |
+
c_dem = raw_display[raw_display['Acro'] != ""]
|
| 73 |
+
|
| 74 |
+
overall_dem = pd.concat([pg_dem, sg_dem, sf_dem, pf_dem, c_dem])
|
| 75 |
+
overall_dem = overall_dem[['Acro', 'G', 'Pts% Boost', 'Reb% Boost', 'Ast% Boost', '3p% Boost',
|
| 76 |
+
'Stl Boost%', 'Blk Boost%', 'TOV Boost%', 'FPPM', 'FPPM Boost', 'position']]
|
| 77 |
+
overall_dem['Team'] = overall_dem['Acro'] + '-' + overall_dem['position']
|
| 78 |
+
overall_dem['Team FPPM Boost'] = overall_dem.groupby('Acro', sort=False)['FPPM Boost'].transform('mean')
|
| 79 |
+
overall_dem = overall_dem.reset_index()
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
export_dem = overall_dem[['Team', 'Acro', 'G', 'Pts% Boost', 'Reb% Boost', 'Ast% Boost', '3p% Boost',
|
| 83 |
+
'Stl Boost%', 'Blk Boost%', 'TOV Boost%', 'FPPM', 'FPPM Boost', 'Team FPPM Boost', 'position']]
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
return export_dem, matchups, matchups_dict
|
| 87 |
+
|
| 88 |
+
def convert_df_to_csv(df):
|
| 89 |
+
return df.to_csv().encode('utf-8')
|
| 90 |
+
|
| 91 |
+
overall_dem, matchups, matchups_dict = init_baselines()
|
| 92 |
|
| 93 |
+
col1, col2 = st.columns([1, 9])
|
| 94 |
+
with col1:
|
| 95 |
+
if st.button("Reset Data", key='reset1'):
|
| 96 |
+
st.cache_data.clear()
|
| 97 |
+
overall_dem, matchups, matchups_dict = init_baselines()
|
| 98 |
+
split_var1 = st.radio("View all teams or just this main slate's matchups?", ('Slate Matchups', 'All'), key='split_var1')
|
| 99 |
+
if split_var1 == 'Slate Matchups':
|
| 100 |
+
view_var1 = matchups.Opp.values.tolist()
|
| 101 |
+
split_var2 = st.radio("Would you like to view all teams or specific ones?", ('All', 'Specific Teams'), key='split_var2')
|
| 102 |
+
if split_var2 == 'Specific Teams':
|
| 103 |
+
team_var1 = st.multiselect('Which teams would you like to include in the tables?', options = view_var1, key='team_var1')
|
| 104 |
+
elif split_var2 == 'All':
|
| 105 |
+
team_var1 = view_var1
|
| 106 |
+
split_var3 = st.radio("Would you like to view all positions or specific ones?", ('All', 'Specific Positions'), key='split_var3')
|
| 107 |
+
if split_var3 == 'Specific Positions':
|
| 108 |
+
pos_var1 = st.multiselect('Which teams would you like to include in the tables?', options = overall_dem['position'].unique(), key='pos_var1')
|
| 109 |
+
elif split_var3 == 'All':
|
| 110 |
+
pos_var1 = overall_dem.position.values.tolist()
|
| 111 |
+
if split_var1 == 'All':
|
| 112 |
+
split_var2 = st.radio("Would you like to view all teams or specific ones?", ('All', 'Specific Teams'), key='split_var2')
|
| 113 |
+
if split_var2 == 'Specific Teams':
|
| 114 |
+
team_var1 = st.multiselect('Which teams would you like to include in the tables?', options = overall_dem['Acro'].unique(), key='team_var1')
|
| 115 |
+
elif split_var2 == 'All':
|
| 116 |
+
team_var1 = overall_dem.Acro.values.tolist()
|
| 117 |
+
split_var3 = st.radio("Would you like to view all positions or specific ones?", ('All', 'Specific Positions'), key='split_var3')
|
| 118 |
+
if split_var3 == 'Specific Positions':
|
| 119 |
+
pos_var1 = st.multiselect('Which teams would you like to include in the tables?', options = overall_dem['position'].unique(), key='pos_var1')
|
| 120 |
+
elif split_var3 == 'All':
|
| 121 |
+
pos_var1 = overall_dem.position.values.tolist()
|
| 122 |
+
with col2:
|
| 123 |
+
if split_var1 == 'Slate Matchups':
|
| 124 |
+
dem_display = overall_dem[overall_dem['Acro'].isin(view_var1)]
|
| 125 |
+
dem_display['Team (Getting Boost)'] = dem_display['Acro'].map(matchups_dict)
|
| 126 |
+
dem_display.rename(columns={"Acro": "Opp (Giving Boost)"}, inplace = True)
|
| 127 |
+
dem_display = dem_display[['Team (Getting Boost)', 'Opp (Giving Boost)', 'G', 'Pts% Boost', 'Reb% Boost', 'Ast% Boost', '3p% Boost',
|
| 128 |
+
'Stl Boost%', 'Blk Boost%', 'TOV Boost%', 'FPPM', 'FPPM Boost', 'Team FPPM Boost', 'position']]
|
| 129 |
+
dem_display = dem_display[dem_display['Team (Getting Boost)'].isin(team_var1)]
|
| 130 |
+
dem_display = dem_display[dem_display['position'].isin(pos_var1)]
|
| 131 |
+
dem_display = dem_display.sort_values(by='FPPM Boost', ascending=False)
|
| 132 |
+
elif split_var1 == 'All':
|
| 133 |
+
dem_display = overall_dem[overall_dem['Acro'].isin(team_var1)]
|
| 134 |
+
dem_display = dem_display[dem_display['position'].isin(pos_var1)]
|
| 135 |
+
dem_display = dem_display.sort_values(by='FPPM Boost', ascending=False)
|
| 136 |
+
dem_display.rename(columns={"Team": "Team (Giving Boost)"}, inplace = True)
|
| 137 |
+
dem_display = dem_display.set_index('Team (Giving Boost)')
|
| 138 |
+
st.dataframe(dem_display.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(percentages_format, precision=2), use_container_width = True)
|
| 139 |
+
st.download_button(
|
| 140 |
+
label="Export DEM Numbers",
|
| 141 |
+
data=convert_df_to_csv(overall_dem),
|
| 142 |
+
file_name='DEM_export.csv',
|
| 143 |
+
mime='text/csv',
|
| 144 |
+
)
|