Commit ·
fc4143b
0
Parent(s):
Duplicate from MichaelBurry/Error_
Browse filesCo-authored-by: Burry <MichaelBurry@users.noreply.huggingface.co>
- .gitattributes +3 -0
- 1vs2.png +0 -0
- EFvsMinvar.png +0 -0
- README.md +13 -0
- app.py +283 -0
- arima.py +64 -0
- betas.csv +0 -0
- betas.py +74 -0
- correlation.py +31 -0
- correlation_matrix.csv +3 -0
- data_and_sp500.csv +3 -0
- ef.py +193 -0
- plots.py +220 -0
- requirements.txt +11 -0
- sharp_ratio.py +146 -0
- us-shareprices-daily.csv +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
data_and_sp500.csv filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
correlation_matrix.csv filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
us-shareprices-daily.csv filter=lfs diff=lfs merge=lfs -text
|
1vs2.png
ADDED
|
EFvsMinvar.png
ADDED
|
README.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Cse6242 Dataminers
|
| 3 |
+
emoji: 📚
|
| 4 |
+
colorFrom: pink
|
| 5 |
+
colorTo: pink
|
| 6 |
+
sdk: streamlit
|
| 7 |
+
sdk_version: 1.10.0
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
duplicated_from: MichaelBurry/Error_
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from datetime import date, timedelta
|
| 3 |
+
#from rest_api.fetch_data import (get_symbol_data)
|
| 4 |
+
import pandas as pd
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import time
|
| 7 |
+
|
| 8 |
+
from plots import (
|
| 9 |
+
beta,
|
| 10 |
+
basic_portfolio,
|
| 11 |
+
# display_portfolio_return,
|
| 12 |
+
display_heat_map,
|
| 13 |
+
#circle_packing,
|
| 14 |
+
ER,
|
| 15 |
+
buble_interactive
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
### Koi
|
| 19 |
+
from ef import(
|
| 20 |
+
ef_viz
|
| 21 |
+
)
|
| 22 |
+
def risk_str(num):
|
| 23 |
+
if num >=5 and num <15:
|
| 24 |
+
return 'Low Risk Aversion'
|
| 25 |
+
elif num >= 15 and num <25:
|
| 26 |
+
return 'Medium Risk Aversion'
|
| 27 |
+
elif num >= 25 and num <=35:
|
| 28 |
+
return 'High Risk Aversion'
|
| 29 |
+
#### Koi
|
| 30 |
+
|
| 31 |
+
from sharp_ratio import(
|
| 32 |
+
cumulative_return,
|
| 33 |
+
|
| 34 |
+
sharp_ratio_func
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
from arima import (
|
| 38 |
+
# get_model_accuracy,
|
| 39 |
+
arima_chart
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def load_heading():
|
| 45 |
+
"""The function that displays the heading.
|
| 46 |
+
Provides instructions to the user
|
| 47 |
+
"""
|
| 48 |
+
with st.container():
|
| 49 |
+
st.title('Dataminers')
|
| 50 |
+
header = st.subheader('This App performs historical portfolio analysis and future analysis ')
|
| 51 |
+
st.subheader('Please read the instructions carefully and enjoy!')
|
| 52 |
+
# st.text('This is some text.')
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def get_choices():
|
| 56 |
+
"""Prompts the dialog to get the All Choices.
|
| 57 |
+
Returns:
|
| 58 |
+
An object of choices and an object of combined dataframes.
|
| 59 |
+
"""
|
| 60 |
+
choices = {}
|
| 61 |
+
#tab1, tab2, tab3, tab4, tab5 = st.tabs(["Tickers", "Quantity", "Benchmark","Risk Free Return","Risk Aversion"])
|
| 62 |
+
|
| 63 |
+
tickers = st.sidebar.text_input('Enter stock tickers.', 'GOOG,AA,AVGO,AMD')
|
| 64 |
+
|
| 65 |
+
# Set the weights
|
| 66 |
+
weights_str = st.sidebar.text_input('Enter the investment quantities', '50,30,25,25')
|
| 67 |
+
|
| 68 |
+
benchmark = st.sidebar.selectbox(
|
| 69 |
+
'Select your ideal benchmark of return',
|
| 70 |
+
('SP500', 'AOK', 'IXIC'))
|
| 71 |
+
if benchmark == 'IXIC':
|
| 72 |
+
st.sidebar.warning("You have selected a volatile benchmark.")
|
| 73 |
+
elif benchmark == 'SP500':
|
| 74 |
+
st.sidebar.success('You have selected a balanced benchmark')
|
| 75 |
+
elif benchmark == 'AOK':
|
| 76 |
+
st.sidebar.success('You have selected a conservative benchmark')
|
| 77 |
+
|
| 78 |
+
### koi
|
| 79 |
+
rf = st.sidebar.number_input('Enter current rate of risk free return', min_value=0.001, max_value=1.00, value=0.041)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
#A_coef_map =
|
| 83 |
+
A_coef = st.sidebar.slider('Enter The Coefficient of Risk Aversion', min_value=5, max_value=35, value=30, step=5)
|
| 84 |
+
|
| 85 |
+
if A_coef > 20:
|
| 86 |
+
st.sidebar.success("You have selected a "+ risk_str(A_coef) +" investing style")
|
| 87 |
+
investing_style = 'Conservative'
|
| 88 |
+
elif A_coef >10 and A_coef <= 20:
|
| 89 |
+
st.sidebar.success("You have selected a "+risk_str(A_coef) +" investing style")
|
| 90 |
+
investing_style = 'Balanced'
|
| 91 |
+
elif A_coef <= 10:
|
| 92 |
+
st.sidebar.warning("You have selected a "+ risk_str(A_coef) +" investing style")
|
| 93 |
+
investing_style = 'Risky'
|
| 94 |
+
|
| 95 |
+
# Every form must have a submit button.
|
| 96 |
+
submitted = st.sidebar.button("Calculate")
|
| 97 |
+
|
| 98 |
+
symbols = []
|
| 99 |
+
reset = False
|
| 100 |
+
|
| 101 |
+
# Reusable Error Button DRY!
|
| 102 |
+
#def reset_app(error):
|
| 103 |
+
# st.sidebar.write(f"{error}!")
|
| 104 |
+
# st.sidebar.write(f"Check The Syntax")
|
| 105 |
+
# reset = st.sidebar.button("RESET APP")
|
| 106 |
+
|
| 107 |
+
if submitted:
|
| 108 |
+
#with st.spinner('Running the calculations...'):
|
| 109 |
+
# time.sleep(8)
|
| 110 |
+
# st.success('Done!')
|
| 111 |
+
# convert strings to lists
|
| 112 |
+
tickers_list = tickers.split(",")
|
| 113 |
+
weights_list = weights_str.split(",")
|
| 114 |
+
#crypto_symbols_list = crypto_symbols.split(",")
|
| 115 |
+
# Create the Symbols List
|
| 116 |
+
symbols.extend(tickers_list)
|
| 117 |
+
#symbols.extend(crypto_symbols_list)
|
| 118 |
+
# Convert Weights To Decimals
|
| 119 |
+
weights = []
|
| 120 |
+
for item in weights_list:
|
| 121 |
+
weights.append(float(item))
|
| 122 |
+
|
| 123 |
+
if reset:
|
| 124 |
+
# # Clears all singleton caches:
|
| 125 |
+
#tickers = st.sidebar.selectbox('Enter 11 stock symbols.', ('GOOG','D','AAP','BLK'))
|
| 126 |
+
# crypto_symbols = st.sidebar.text_input('Enter 2 crypto symbols only as below', 'BTC-USD,ETH-USD')
|
| 127 |
+
#weights_str = st.sidebar.text_input('Enter The Investment Weights', '0.3,0.3 ,0.3')
|
| 128 |
+
|
| 129 |
+
st.experimental_singleton.clear()
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
else:
|
| 133 |
+
# Submit an object with choices
|
| 134 |
+
choices = {
|
| 135 |
+
|
| 136 |
+
'symbols': symbols,
|
| 137 |
+
'weights': weights,
|
| 138 |
+
'benchmark': benchmark,
|
| 139 |
+
'investing_style': investing_style,
|
| 140 |
+
'risk-free-rate': rf,
|
| 141 |
+
'A-coef': A_coef
|
| 142 |
+
|
| 143 |
+
}
|
| 144 |
+
# Load combined_df
|
| 145 |
+
data = pd.read_csv('data_and_sp500.csv')
|
| 146 |
+
combined_df = data[tickers_list]
|
| 147 |
+
raw_data=pd.read_csv('us-shareprices-daily.csv', sep=';')
|
| 148 |
+
# return object of objects
|
| 149 |
+
return {
|
| 150 |
+
'choices': choices,
|
| 151 |
+
'combined_df': combined_df,
|
| 152 |
+
'data': data,
|
| 153 |
+
'raw_data':raw_data
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def run():
|
| 158 |
+
"""The main function for running the script."""
|
| 159 |
+
|
| 160 |
+
load_heading()
|
| 161 |
+
choices = get_choices()
|
| 162 |
+
if choices:
|
| 163 |
+
st.success('''** Selected Tickers **''')
|
| 164 |
+
buble_interactive(choices['data'],choices['choices'])
|
| 165 |
+
st.header('Tickers Beta')
|
| 166 |
+
"""
|
| 167 |
+
The Capital Asset Pricing Model (CAPM) utilizes a formula to enable the application to calculate
|
| 168 |
+
risk, return, and variability of return with respect to a benchmark. The application uses this
|
| 169 |
+
benchmark, currently S&P 500 annual rate of return, to calculate the return of a stock using
|
| 170 |
+
Figure 2 in Appendix A. Elements such as beta can be calculated using the formula in Appendix
|
| 171 |
+
A Figure 1. The beta variable will serve as a variable to be used for calculating the variability of
|
| 172 |
+
the stock with respect to the benchmark. This variability factor will prove useful for a variety of
|
| 173 |
+
calculations such as understanding market risk and return. If the beta is equal to 1.0, the stock
|
| 174 |
+
price is correlated with the market. When beta is smaller than 1.0, the stock is less volatile than
|
| 175 |
+
the market. If beta is greater than 1.0, the stock is more volatile than the market.
|
| 176 |
+
The CAPM model was run for 9 stocks, using 10-year daily historical data for initial test analysis.
|
| 177 |
+
With this initial analysis, beta was calculated to determine the stock’s risk by measuring the
|
| 178 |
+
price changes to the benchmark. By using CAPM model, annual expected return and portfolio
|
| 179 |
+
return is calculated. The model results can be found in Appendix A.
|
| 180 |
+
"""
|
| 181 |
+
|
| 182 |
+
beta(choices['data'], choices['choices'])
|
| 183 |
+
ER(choices['data'], choices['choices'])
|
| 184 |
+
##### EDIT HERE ##### koi
|
| 185 |
+
st.header('CAPM Model and the Efficient Frontier')
|
| 186 |
+
"""
|
| 187 |
+
CAPM model measures systematic risks, however many of it's functions have unrealistic assumptions and rely heavily on a linear interpretation
|
| 188 |
+
of the risks vs. returns relationship. It is better to use CAPM model in conjunction with the Efficient Frontier to better
|
| 189 |
+
graphically depict volatility (a measure of investment risk) for the defined rate of return. \n
|
| 190 |
+
Below we map the linear Utility function from the CAPM economic model along with the Efficient Frontier
|
| 191 |
+
Each circle depicted above is a variation of the portfolio with the same input asset, only different weights.
|
| 192 |
+
Portfolios with higher volatilities have a yellower shade of hue, while portfolios with a higher return have a larger radius. \n
|
| 193 |
+
As you input different porfolio assets, take note of how diversification can improve a portfolio's risk versus reward profile.
|
| 194 |
+
"""
|
| 195 |
+
ef_viz(choices['data'],choices['choices'])
|
| 196 |
+
"""
|
| 197 |
+
There are in fact two components of the Efficient Frontier: the Efficient Frontier curve itself and the Minimum Variance Frontier.
|
| 198 |
+
The lower curve, which is also the Minimum Variance Frontier will contain assets in the portfolio
|
| 199 |
+
that has the lowest volatility. If our portfolio contains "safer" assets such as Governmental Bonds, the further to the right
|
| 200 |
+
of the lower curve we will see a portfolio that contains only these "safe" assets, the portfolios on
|
| 201 |
+
this curve, in theory, will have diminishing returns.\n
|
| 202 |
+
The upper curve, which is also the Efficient Frontier, contains portfolios that have marginally increasing returns as the risks
|
| 203 |
+
increases. In theory, we want to pick a portfolio on this curve, as these portfolios contain more balanced weights of assets
|
| 204 |
+
with acceptable trade-offs between risks and returns. \n
|
| 205 |
+
If an investor is more comfortable with investment risks, they can pick a portfolio on the right side of the Efficient Frontier.
|
| 206 |
+
Whereas, a conservative investor might want to pick a portfolio from the left side of the Efficient Frontier. \n
|
| 207 |
+
Take notes of the assets' Betas and how that changes the shape of the curve as well. \n
|
| 208 |
+
How does the shape of the curve change when
|
| 209 |
+
the assets are of similar Beta vs when they are all different?\n
|
| 210 |
+
Note the behavior of the curve when the portfolio contains only assets with Betas higher than 1 vs. when Betas are lower than 1.\n
|
| 211 |
+
|
| 212 |
+
"""
|
| 213 |
+
##### ##### Koi
|
| 214 |
+
# Creates the title for streamlit
|
| 215 |
+
st.subheader('Portfolio Historical Normalized Cumulative Returns')
|
| 216 |
+
"""
|
| 217 |
+
Cumulative Returns:\n
|
| 218 |
+
The cumulative return of an asset is calculated by subtracting the original price paid from the current profit or loss. This answers the question,
|
| 219 |
+
what is the return on my initial investment?\n
|
| 220 |
+
The graph below shows the historical normalized cumulative returns for each of the chosen assets for the entire time period of the available data.
|
| 221 |
+
The default line chart shows tickers AA, AMD, AVGO, and GOOG and we can see that all have a positive cumulative return over the period of the available data.
|
| 222 |
+
Any of these assets purchased on the starting day and sold on the ending day for the period would have earned a return on their investment.\n
|
| 223 |
+
This chart can also be used to analyze the correlation of the returns of the chosen assets over the displayed period.
|
| 224 |
+
Any segments of the line charts that show cumulative returns with similarly or oppositely angled segments can be considered to have some level of
|
| 225 |
+
correlation during those periods.
|
| 226 |
+
"""
|
| 227 |
+
basic_portfolio(choices['combined_df'])
|
| 228 |
+
"""
|
| 229 |
+
Negative Correlations (1): \n
|
| 230 |
+
Occur for assets whose cumulative returns move in opposite directions. When one goes up the other goes down and vice versa.
|
| 231 |
+
These negatively correlated assets would offer some level of diversification protection to each other.
|
| 232 |
+
Perfectly negatively correlated stocks are sort of the goal, but unlikely to be common.
|
| 233 |
+
In most cases finding some level of negatively correlated stocks, should offer some level of diversification protection to your portfolio.
|
| 234 |
+
The amount of protection depends upon the calculated metric. Our tool includes some CAPM analysis, which attempts to relate the risk and return
|
| 235 |
+
and the correlation of assets to determine the expected portfolio returns versus the combined, hopefully reduced, risk.\n
|
| 236 |
+
|
| 237 |
+
Positive Correlations (2):\n
|
| 238 |
+
Occur for assets whose cumulative returns move in concert. When one goes up the other also goes up and vice versa.
|
| 239 |
+
These positively correlated assets would not offer much or any diversification protection to each other.\n
|
| 240 |
+
"""
|
| 241 |
+
im = Image.open('1vs2.png')
|
| 242 |
+
col1, col2, col3 = st.columns([1,6,1])
|
| 243 |
+
|
| 244 |
+
with col1:
|
| 245 |
+
st.write("")
|
| 246 |
+
|
| 247 |
+
with col2:
|
| 248 |
+
st.image(im, caption='Trends of Assets Correlations',use_column_width='auto')
|
| 249 |
+
|
| 250 |
+
with col3:
|
| 251 |
+
st.write("")
|
| 252 |
+
|
| 253 |
+
# Creates the title for streamlit
|
| 254 |
+
st.subheader('Heatmap Showing Correlation Of Assets')
|
| 255 |
+
"""
|
| 256 |
+
Heatmap: \n
|
| 257 |
+
The Heat map shows the overall correlation of each asset to the other assets. Notice that the middle diagonal row is filled in with all 1’s.
|
| 258 |
+
That is because they are all perfectly correlated with themselves. A value of 1 equates to perfect correlation, -1 equates to perfect negative correlation,
|
| 259 |
+
and 0 equates to no correlation with values in between being relative to their distance from the extremes. A correlation value of .5 would mean
|
| 260 |
+
the asset moves half as much in the same direction as the correlated asset. A values of -0.5 would mean it moves half as much in the opposite direction
|
| 261 |
+
as the correlated asset. \n
|
| 262 |
+
The Heat map shows the correlation coefficient or value for each asset over the entire period to each other asset.
|
| 263 |
+
It also depicts the color of the intersection as darker for less correlation and lighter for more correlation, which could be either positive or negative.
|
| 264 |
+
The legend on the right indicates the absolute level of correlation for each color, again positive or negative associated to each color.\n
|
| 265 |
+
"""
|
| 266 |
+
|
| 267 |
+
display_heat_map(choices['data'],choices['choices'])
|
| 268 |
+
#display_portfolio_return(choices['combined_df'], choices['choices'])
|
| 269 |
+
|
| 270 |
+
cumulative_return(choices['combined_df'], choices['choices'])
|
| 271 |
+
sharp_ratio_func(choices['raw_data'], choices['choices'])
|
| 272 |
+
|
| 273 |
+
'''
|
| 274 |
+
ARIMA:\n
|
| 275 |
+
'''
|
| 276 |
+
|
| 277 |
+
arima_chart(choices['choices']['symbols'])
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
if __name__ == "__main__":
|
| 282 |
+
run()
|
| 283 |
+
|
arima.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
from datetime import datetime
|
| 3 |
+
from datetime import timedelta
|
| 4 |
+
import numpy as np
|
| 5 |
+
import statsmodels.api as sm
|
| 6 |
+
|
| 7 |
+
import plotly.express as px
|
| 8 |
+
import plotly.graph_objects as go
|
| 9 |
+
|
| 10 |
+
import warnings
|
| 11 |
+
warnings.filterwarnings("ignore")
|
| 12 |
+
|
| 13 |
+
# from sklearn.metrics import mean_squared_error
|
| 14 |
+
|
| 15 |
+
df = pd.read_csv('us-shareprices-daily.csv', sep=';')
|
| 16 |
+
|
| 17 |
+
def get_model_accuracy(data, ticker_symbol):
|
| 18 |
+
|
| 19 |
+
stock_data = data[data['Ticker'] == ticker_symbol]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# get MSE for testing data using 85/15 split for chosen stock symbol
|
| 23 |
+
|
| 24 |
+
train_data, test_data = stock_data[0:int(len(stock_data)*0.85)], stock_data[int(len(stock_data)*0.85):]
|
| 25 |
+
training_data = train_data['Close'].values
|
| 26 |
+
test_data = test_data['Close'].values
|
| 27 |
+
history = [x for x in training_data]
|
| 28 |
+
model_predictions = []
|
| 29 |
+
N_test_observations = len(test_data)
|
| 30 |
+
for time_point in range(N_test_observations):
|
| 31 |
+
model = sm.tsa.statespace.SARIMAX(history, order=(1,1,1))
|
| 32 |
+
model_fit = model.fit(disp=0)
|
| 33 |
+
output = model_fit.forecast()
|
| 34 |
+
yhat = output[0]
|
| 35 |
+
model_predictions.append(yhat)
|
| 36 |
+
true_test_value = test_data[time_point]
|
| 37 |
+
history.append(true_test_value)
|
| 38 |
+
|
| 39 |
+
MSE_error = mean_squared_error(test_data, model_predictions)
|
| 40 |
+
return 'Testing Mean Squared Error is {}'.format(MSE_error)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def arima_chart(tickers):
|
| 44 |
+
df = pd.read_csv('data_and_sp500.csv')
|
| 45 |
+
df = df[['Date']+tickers]
|
| 46 |
+
fig = px.line(df, x='Date', y=df.columns)
|
| 47 |
+
|
| 48 |
+
for ticker in tickers:
|
| 49 |
+
x = np.array(df['Date'])
|
| 50 |
+
y = np.array(df[ticker])
|
| 51 |
+
ticker_df = pd.concat([df['Date'], df[ticker]], axis=1)
|
| 52 |
+
|
| 53 |
+
model = sm.tsa.statespace.SARIMAX(ticker_df[ticker], order=(21,1,7))
|
| 54 |
+
model_fit = model.fit(disp=-1)
|
| 55 |
+
# print(model_fit.summary())
|
| 56 |
+
forecast = model_fit.forecast(7, alpha=0.05)#.predict(start=1259, end=1289)
|
| 57 |
+
begin_date = datetime.strptime('2021-10-22', '%Y-%m-%d')
|
| 58 |
+
forecast_dates = [begin_date+timedelta(days=i-1258) for i in forecast.index]
|
| 59 |
+
fig.add_trace(go.Scatter(x=forecast_dates, y=forecast.to_list(),
|
| 60 |
+
mode='lines',
|
| 61 |
+
name='{} forecast'.format(ticker)))
|
| 62 |
+
|
| 63 |
+
fig.update_xaxes(range=[begin_date-timedelta(days=120), begin_date+timedelta(days=10)])
|
| 64 |
+
st.plotly_chart(fig, use_container_width=True)
|
betas.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
betas.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
import datetime as dt
|
| 4 |
+
import pandas_datareader as pdr
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def convert_simFin2(path):
|
| 10 |
+
df = pd.read_csv(path, sep=';')
|
| 11 |
+
stocks = df.pivot(index="Date", columns="Ticker", values="Adj. Close")
|
| 12 |
+
return stocks
|
| 13 |
+
|
| 14 |
+
def log_of_returns2(stocks):
|
| 15 |
+
log_returns = np.log(stocks/stocks.shift())
|
| 16 |
+
return log_returns
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# Code to Calculate and output Betas
|
| 23 |
+
# Read in Stock csv data and convert to have each Ticker as a column.
|
| 24 |
+
#df = pd.read_csv('D:/SimFinData/us-shareprices-daily.csv', sep=';')
|
| 25 |
+
#stocks = df.pivot(index="Date", columns="Ticker", values="Adj. Close")
|
| 26 |
+
#stocks
|
| 27 |
+
#start = min(df['Date'])
|
| 28 |
+
#end = max(df['Date'])
|
| 29 |
+
#logRet = np.log(stocks/stocks.shift())
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
#SP500 = pdr.get_data_yahoo("^GSPC", start)
|
| 33 |
+
#IXIC = pdr.get_data_yahoo("^IXIC", start)
|
| 34 |
+
#AOK = pdr.get_data_yahoo("AOK", start)
|
| 35 |
+
|
| 36 |
+
#SP500['SP500'] = SP500['Adj Close']
|
| 37 |
+
#IXIC['IXIC'] = IXIC['Adj Close']
|
| 38 |
+
#AOK['AOK'] = AOK['Adj Close']
|
| 39 |
+
|
| 40 |
+
#spAC = np.log(SP500['SP500']/SP500['SP500'].shift())
|
| 41 |
+
#spAC = spAC.loc[spAC.index <= end]
|
| 42 |
+
|
| 43 |
+
#ixicAC = np.log(IXIC['IXIC']/IXIC['IXIC'].shift())
|
| 44 |
+
#ixicAC = ixicAC.loc[ixicAC.index <= end]
|
| 45 |
+
|
| 46 |
+
#aokAC = np.log(AOK['AOK']/AOK['AOK'].shift())
|
| 47 |
+
#aokAC = aokAC.loc[aokAC.index <= end]
|
| 48 |
+
|
| 49 |
+
#sp500B = logRet.join(spAC)
|
| 50 |
+
#ixicB = logRet.join(ixicAC)
|
| 51 |
+
#aokB = logRet.join(aokAC)
|
| 52 |
+
|
| 53 |
+
#sp5Cov = sp500B.cov()
|
| 54 |
+
#ixicCov = ixicB.cov()
|
| 55 |
+
#aokCov = aokB.cov()
|
| 56 |
+
|
| 57 |
+
#sp500Var = sp500B['SP500'].var()
|
| 58 |
+
#ixicVar = ixicB['IXIC'].var()
|
| 59 |
+
#aokVar = aokB['AOK'].var()
|
| 60 |
+
|
| 61 |
+
#sp500Beta = sp5Cov.loc['SP500']/sp500Var
|
| 62 |
+
#ixicBeta = ixicCov.loc['IXIC']/ixicVar
|
| 63 |
+
#aokBeta = aokCov.loc['AOK']/aokVar
|
| 64 |
+
|
| 65 |
+
#betas = pd.concat([sp500Beta,ixicBeta,aokBeta], axis=1)
|
| 66 |
+
|
| 67 |
+
#betas['Ticker'] = betas.index
|
| 68 |
+
|
| 69 |
+
#betas = betas[['Ticker','SP500','IXIC','AOK']]
|
| 70 |
+
|
| 71 |
+
#betas.to_csv (r'betas.csv', index = None, header=True)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
|
correlation.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
import datetime as dt
|
| 4 |
+
import pandas_datareader as pdr
|
| 5 |
+
|
| 6 |
+
# Read in Stock csv data and convert to have each Ticker as a column.
|
| 7 |
+
#df = pd.read_csv('us-shareprices-daily.csv', sep=';')
|
| 8 |
+
#stocks = df.pivot(index="Date", columns="Ticker", values="Adj. Close")
|
| 9 |
+
#logRet = np.log(stocks/stocks.shift())
|
| 10 |
+
|
| 11 |
+
# Calculate the Correlation Coefficient for all Stocks
|
| 12 |
+
#stocksCorr = logRet.corr()
|
| 13 |
+
|
| 14 |
+
# Output to csv
|
| 15 |
+
#stocksCorr.to_csv (r'correlation_matrix.csv', index = None, header=True)
|
| 16 |
+
|
| 17 |
+
# Enter path of SimFin Data to convert to format for Calculations
|
| 18 |
+
def convert_simFin(path):
|
| 19 |
+
df = pd.read_csv(path, sep=';')
|
| 20 |
+
stocks = df.pivot(index="Date", columns="Ticker", values="Adj. Close")
|
| 21 |
+
return stocks
|
| 22 |
+
|
| 23 |
+
# Calculate Log returns of the Formatted Stocks
|
| 24 |
+
def log_of_returns(stocks):
|
| 25 |
+
log_returns = np.log(stocks/stocks.shift())
|
| 26 |
+
return log_returns
|
| 27 |
+
|
| 28 |
+
# Enter Log returns of Stocks to Calculate the Correlation Matrix.
|
| 29 |
+
def correlation_matrix(lr):
|
| 30 |
+
return lr.corr()
|
| 31 |
+
|
correlation_matrix.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a2eb8bf375aa94290f54caa2d0dd2e73e5b3139607599e979cc580885a84bd0b
|
| 3 |
+
size 169323527
|
data_and_sp500.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6e14ce41ef63008499438d16624d25f42b1f2defb2a86521d06ddf4d0dbc4960
|
| 3 |
+
size 18749516
|
ef.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
from datetime import datetime as dt
|
| 4 |
+
from pypfopt.efficient_frontier import EfficientFrontier
|
| 5 |
+
import streamlit as st
|
| 6 |
+
import plotly.graph_objects as go
|
| 7 |
+
import plotly.express as px
|
| 8 |
+
from PIL import Image
|
| 9 |
+
|
| 10 |
+
### START AND RUN STREAMLIT
|
| 11 |
+
#https://docs.streamlit.io/library/get-started/installation
|
| 12 |
+
|
| 13 |
+
def ef_viz(stock_df,choices):
|
| 14 |
+
#st.write("EF Visualization KOI EDITS")
|
| 15 |
+
# st.header('CAPM Model and the Efficient Frontier')
|
| 16 |
+
|
| 17 |
+
symbols, weights, benchmark, investing_style, rf, A_coef = choices.values()
|
| 18 |
+
tickers = symbols
|
| 19 |
+
|
| 20 |
+
#tickers.append('sp500')
|
| 21 |
+
#st.write(tickers)
|
| 22 |
+
#st.write(stock_df)
|
| 23 |
+
|
| 24 |
+
# Yearly returns for individual companies
|
| 25 |
+
#https://stackoverflow.com/questions/69284773/unable-to-resample-the-pandas-with-date-column-typeerror-only-valid-with-dateti
|
| 26 |
+
stock_dff = stock_df.copy()
|
| 27 |
+
stock_dff['Date'] = pd.to_datetime(stock_dff['Date'])
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# ind_er_df = stock_dff.set_index('Date')
|
| 31 |
+
#st.write(stock_dff.columns)
|
| 32 |
+
ind_er_df = stock_dff.resample('Y', on = 'Date').last().pct_change().mean()
|
| 33 |
+
ind_er = ind_er_df[tickers]
|
| 34 |
+
#st.write(ind_er)
|
| 35 |
+
ann_sd = stock_df[tickers].pct_change().apply(lambda x: np.log(1+x)).std().apply(lambda x: x*np.sqrt(250))
|
| 36 |
+
assets = pd.concat([ind_er, ann_sd], axis=1) # Creating a table for visualising returns and volatility of assets
|
| 37 |
+
assets.columns = ['Returns', 'Volatility']
|
| 38 |
+
assets
|
| 39 |
+
#st.write(assets)
|
| 40 |
+
ln_pct_change = stock_df[tickers].pct_change().apply(lambda x: np.log(1+x))[1:]
|
| 41 |
+
#Cov Matrix
|
| 42 |
+
cov_matrix =ln_pct_change.cov()
|
| 43 |
+
|
| 44 |
+
## CREATE PORFOLIOS WEIGHTS
|
| 45 |
+
p_ret = [] # Define an empty array for portfolio returns
|
| 46 |
+
p_vol = [] # Define an empty array for portfolio volatility
|
| 47 |
+
p_weights = [] # Define an empty array for asset weights
|
| 48 |
+
|
| 49 |
+
num_assets = len(tickers)
|
| 50 |
+
num_portfolios = 1000
|
| 51 |
+
|
| 52 |
+
for portfolio in range(num_portfolios):
|
| 53 |
+
weights = np.random.random(num_assets)
|
| 54 |
+
weights = weights/np.sum(weights)
|
| 55 |
+
p_weights.append(weights)
|
| 56 |
+
returns = np.dot(weights, ind_er) # Returns are the product of individual expected returns of asset and its
|
| 57 |
+
# weights
|
| 58 |
+
p_ret.append(returns)
|
| 59 |
+
var = cov_matrix.mul(weights, axis=0).mul(weights, axis=1).sum().sum()# Portfolio Variance
|
| 60 |
+
sd = np.sqrt(var) # Daily standard deviation
|
| 61 |
+
ann_sd = sd*np.sqrt(250) # Annual standard deviation = volatility
|
| 62 |
+
p_vol.append(ann_sd)
|
| 63 |
+
|
| 64 |
+
data = {'Returns':p_ret, 'Volatility':p_vol}
|
| 65 |
+
|
| 66 |
+
for counter, symbol in enumerate(stock_df[tickers].columns.tolist()):
|
| 67 |
+
#print(counter, symbol)
|
| 68 |
+
data[symbol] = [w[counter] for w in p_weights]
|
| 69 |
+
|
| 70 |
+
port_ef_df = pd.DataFrame(data)
|
| 71 |
+
port_ef_df['Vol'] = port_ef_df['Volatility']
|
| 72 |
+
|
| 73 |
+
## NEEDS INPUT INSTEAD OF HARD CODE
|
| 74 |
+
#a = 5 #the coefficient of risk aversion is A. If an invest is less risk averse A is small. We assume 25 < A < 35.
|
| 75 |
+
#rf = 0.041
|
| 76 |
+
|
| 77 |
+
min_vol_port = port_ef_df.iloc[port_ef_df['Volatility'].idxmin()]
|
| 78 |
+
optimal_risky_port = port_ef_df.iloc[((port_ef_df['Returns']-rf)/port_ef_df['Volatility']).idxmax()]
|
| 79 |
+
|
| 80 |
+
### Make DF and data string for when hover over data points
|
| 81 |
+
def make_op_df(df, tickers):
|
| 82 |
+
new = {}
|
| 83 |
+
op_str = str()
|
| 84 |
+
new['Returns'] = df[0]
|
| 85 |
+
new['Volatility'] = df[1]
|
| 86 |
+
|
| 87 |
+
for i in range(0,len(tickers)):
|
| 88 |
+
new[tickers[i]]= df[i+2]
|
| 89 |
+
op_str += str(tickers[i]) + ': ' + str(round(df[i+2],4)) + '<br>'
|
| 90 |
+
|
| 91 |
+
return pd.DataFrame(new, index=[0]), op_str
|
| 92 |
+
|
| 93 |
+
op_df, op_str = make_op_df(optimal_risky_port, tickers)
|
| 94 |
+
|
| 95 |
+
def make_port_str(df, tickers):
|
| 96 |
+
port_str_lst = []
|
| 97 |
+
for i in range(0,len(df)):
|
| 98 |
+
temp = str()
|
| 99 |
+
for u in range(0,len(tickers)):
|
| 100 |
+
temp += str(tickers[u])+ ': ' + str(round(df[tickers[u]][i],4)) + '<br>'
|
| 101 |
+
port_str_lst.append(temp)
|
| 102 |
+
|
| 103 |
+
return port_str_lst
|
| 104 |
+
|
| 105 |
+
port_str_lst = make_port_str(port_ef_df, tickers)
|
| 106 |
+
|
| 107 |
+
## CREATE CAPM LINE #https://www.youtube.com/watch?v=JWx2wcrSGkk
|
| 108 |
+
cal_x = []
|
| 109 |
+
cal_y = []
|
| 110 |
+
utl = []
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
for er in np.linspace(rf, max(data['Returns'])+rf,20):
|
| 115 |
+
sd = (er - rf)/ ((optimal_risky_port[0] - rf)/ optimal_risky_port[1])
|
| 116 |
+
u = er - 0.5*A_coef*(sd**2)
|
| 117 |
+
cal_x.append(sd)
|
| 118 |
+
cal_y.append(er)
|
| 119 |
+
utl.append(u)
|
| 120 |
+
|
| 121 |
+
data2 = {'Utility':utl, 'cal_x':cal_x, 'cal_y':cal_y}
|
| 122 |
+
|
| 123 |
+
utl_df = pd.DataFrame(data2)
|
| 124 |
+
|
| 125 |
+
## Create Figure
|
| 126 |
+
fig3 = go.Figure()
|
| 127 |
+
|
| 128 |
+
#https://plotly.com/python/colorscales/
|
| 129 |
+
fig3.add_trace(go.Scatter(x=port_ef_df['Volatility'], y=port_ef_df['Returns'], hovertemplate='Volatility: %{x} <br>Returns: %{y} <br>%{text}',\
|
| 130 |
+
text= port_str_lst, mode='markers', \
|
| 131 |
+
marker=dict(color=port_ef_df['Volatility'], colorbar=dict(title="Volatility"), \
|
| 132 |
+
size=port_ef_df['Returns']*50, cmax=max(port_ef_df['Volatility']),\
|
| 133 |
+
cmin=min(port_ef_df['Volatility'])),name='Portfolio'))
|
| 134 |
+
#, mode='markers', size=port_ef_df['Returns'], \
|
| 135 |
+
#size_max=30, color=port_ef_df['Vol']))
|
| 136 |
+
fig3.add_trace(go.Scatter(x=utl_df['cal_x'], y=utl_df['cal_y'], mode='lines', line = dict(color='rgba(11,156,49,1)'),name='Ultility Function',\
|
| 137 |
+
hovertemplate='Volatility: %{x} <br>Returns: %{y}')) #))
|
| 138 |
+
|
| 139 |
+
fig3.add_trace(go.Scatter(x=op_df['Volatility'], y=op_df['Returns'], mode='markers', \
|
| 140 |
+
marker=dict(color= 'rgba(11,156,49,1)', size=30),\
|
| 141 |
+
hovertemplate='Volatility: %{x} <br>Returns: %{y} <br>%{text}',\
|
| 142 |
+
text=[op_str]))
|
| 143 |
+
### HOVER TEMPLATE # https://plotly.com/python/hover-text-and-formatting/
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
# ### SAVE IN CASE CANNOT FIGURE OUT THE HOVER TEMPLATE
|
| 147 |
+
# fig2 = px.scatter(op_df, 'Volatility', 'Returns')
|
| 148 |
+
# fig2.update_traces(marker=dict(color= 'rgba(11,156,49,1)', size=35))
|
| 149 |
+
|
| 150 |
+
# fig1 = px.line(utl_df, x="cal_x", y="cal_y")
|
| 151 |
+
# #fig1.update_traces(line=dict(color = 'rgba(11,156,49,1)'))
|
| 152 |
+
|
| 153 |
+
# fig = px.scatter(port_ef_df, 'Volatility', 'Returns', size='Returns', size_max=30, color='Vol')
|
| 154 |
+
# #https://stackoverflow.com/questions/59057881/python-plotly-how-to-customize-hover-template-on-with-what-information-to-show
|
| 155 |
+
# #https://stackoverflow.com/questions/65124833/plotly-how-to-combine-scatter-and-line-plots-using-plotly-express
|
| 156 |
+
|
| 157 |
+
# #data3 =
|
| 158 |
+
# fig3.data = [fig2.data,fig1.data,fig.data]
|
| 159 |
+
# #fig3.update_traces(line=dict(color = 'rgba(11,156,49,1)'))
|
| 160 |
+
# ####
|
| 161 |
+
|
| 162 |
+
fig3.update_layout(showlegend=False)#, legend_title_text = "Contestant")
|
| 163 |
+
fig3.update_xaxes(title_text="Volatility")
|
| 164 |
+
fig3.update_yaxes(title_text="Portfolio Return Rates")
|
| 165 |
+
|
| 166 |
+
st.plotly_chart(fig3, use_container_width=True)
|
| 167 |
+
|
| 168 |
+
#st.write(op_str)
|
| 169 |
+
op_df = op_df.style.set_properties(**{'color':'green'})
|
| 170 |
+
st.subheader('Optimal Returns vs Volatility and Portfolio weights')
|
| 171 |
+
col1, col2, col3 = st.columns([1,6,1])
|
| 172 |
+
with col1:
|
| 173 |
+
st.write("")
|
| 174 |
+
|
| 175 |
+
with col2:
|
| 176 |
+
st.write(op_df)
|
| 177 |
+
|
| 178 |
+
with col3:
|
| 179 |
+
st.write("")
|
| 180 |
+
|
| 181 |
+
im = Image.open('EFvsMinvar.png')
|
| 182 |
+
st.subheader('Understand the Efficient Frontier')
|
| 183 |
+
col1, col2, col3 = st.columns([1,6,1])
|
| 184 |
+
|
| 185 |
+
with col1:
|
| 186 |
+
st.write("")
|
| 187 |
+
|
| 188 |
+
with col2:
|
| 189 |
+
st.image(im, caption='Elements of the Efficient Frontier',use_column_width='auto')
|
| 190 |
+
|
| 191 |
+
with col3:
|
| 192 |
+
st.write("")
|
| 193 |
+
|
plots.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import seaborn as sns
|
| 3 |
+
import streamlit as st
|
| 4 |
+
import matplotlib.pyplot as plt
|
| 5 |
+
import numpy as np
|
| 6 |
+
import altair as alt
|
| 7 |
+
import plotly.express as px
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def beta(stock_df, choices):
|
| 11 |
+
symbols, weights, benchmark, investing_style, rf, A_coef = choices.values()
|
| 12 |
+
tickers = symbols
|
| 13 |
+
tickers.append(benchmark)
|
| 14 |
+
#print(tickers)
|
| 15 |
+
quantity = weights
|
| 16 |
+
selected_stocks = stock_df[tickers]
|
| 17 |
+
# calculating daily return
|
| 18 |
+
# loops through each stocks
|
| 19 |
+
# loops through each row belonging to the stock
|
| 20 |
+
# calculates the percentage change from previous day
|
| 21 |
+
# sets the value of first row to zero since there is no previous value
|
| 22 |
+
df_stocks = selected_stocks.copy()
|
| 23 |
+
|
| 24 |
+
for i in selected_stocks.columns[1:]:
|
| 25 |
+
for j in range(1, len(selected_stocks)):
|
| 26 |
+
df_stocks[i][j] = ((selected_stocks[i][j] - selected_stocks[i][j - 1]) / selected_stocks[i][j - 1]) * 100
|
| 27 |
+
df_stocks[i][0] = 0
|
| 28 |
+
# calculate Beta and alpha for a single stock
|
| 29 |
+
# used sp500 as a benchmark
|
| 30 |
+
# used polyfit to calculate beta
|
| 31 |
+
beta_list = []
|
| 32 |
+
alpha_list = []
|
| 33 |
+
stocks_daily_return = df_stocks
|
| 34 |
+
for i in stocks_daily_return.columns:
|
| 35 |
+
if i != 'Date' and i != benchmark:
|
| 36 |
+
# stocks_daily_return.plot(kind = 'scatter', x = 'A', y = i)
|
| 37 |
+
b, a = np.polyfit(stocks_daily_return[benchmark], stocks_daily_return[i], 1)
|
| 38 |
+
# plt.plot(stocks_daily_return['sp500'], b * stocks_daily_return['sp500'] + a, '-', color = 'r')
|
| 39 |
+
beta_list.append(round(b, 2))
|
| 40 |
+
alpha_list.append(round(a, 2))
|
| 41 |
+
# plt.show()
|
| 42 |
+
# Formats the results
|
| 43 |
+
symbols.remove(benchmark)
|
| 44 |
+
beta = {'Assets': symbols, 'Beta': beta_list}
|
| 45 |
+
alpha = {'Assets': symbols, 'Alpha': alpha_list}
|
| 46 |
+
# Creates a header for streamlit
|
| 47 |
+
st.subheader('Beta and Alpha of Assets Compared to S&P500 index')
|
| 48 |
+
col1, col2 = st.columns(2)
|
| 49 |
+
|
| 50 |
+
with col1:
|
| 51 |
+
st.dataframe(beta)
|
| 52 |
+
with col2:
|
| 53 |
+
st.dataframe(alpha)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def ER(stock_df, choices):
|
| 57 |
+
symbols, weights, benchmark, investing_style, rf, A_coef = choices.values()
|
| 58 |
+
symbols_ =symbols.copy()
|
| 59 |
+
tickers = symbols
|
| 60 |
+
tickers.append(benchmark)
|
| 61 |
+
#print(tickers)
|
| 62 |
+
quantity = weights
|
| 63 |
+
selected_stocks = stock_df[tickers]
|
| 64 |
+
# calculating daily return
|
| 65 |
+
# loops through each stocks
|
| 66 |
+
# loops through each row belonging to the stock
|
| 67 |
+
# calculates the percentage change from previous day
|
| 68 |
+
# sets the value of first row to zero since there is no previous value
|
| 69 |
+
df_stocks = selected_stocks.copy()
|
| 70 |
+
|
| 71 |
+
for i in selected_stocks.columns[1:]:
|
| 72 |
+
for j in range(1, len(selected_stocks)):
|
| 73 |
+
df_stocks[i][j] = ((selected_stocks[i][j] - selected_stocks[i][j - 1]) / selected_stocks[i][j - 1]) * 100
|
| 74 |
+
df_stocks[i][0] = 0
|
| 75 |
+
beta = {}
|
| 76 |
+
alpha = {}
|
| 77 |
+
stocks_daily_return = df_stocks
|
| 78 |
+
# print(df_stocks)
|
| 79 |
+
|
| 80 |
+
for i in stocks_daily_return.columns:
|
| 81 |
+
if i != 'Date' and i != benchmark:
|
| 82 |
+
# stocks_daily_return.plot(kind = 'scatter', x = 'A', y = i)
|
| 83 |
+
b, a = np.polyfit(stocks_daily_return[benchmark], stocks_daily_return[i], 1)
|
| 84 |
+
# plt.plot(stocks_daily_return['sp500'], b * stocks_daily_return['sp500'] + a, '-', color = 'r')
|
| 85 |
+
beta[i] = round(b, 2)
|
| 86 |
+
alpha[i] = round(a, 2)
|
| 87 |
+
# plt.show()
|
| 88 |
+
|
| 89 |
+
# calculating camp for a stock
|
| 90 |
+
keys = list(beta.keys())
|
| 91 |
+
ER_ = []
|
| 92 |
+
# rf = 0 assuming risk-free rate of 0
|
| 93 |
+
rf = 0
|
| 94 |
+
# rm - annualize retun
|
| 95 |
+
rm = stocks_daily_return[benchmark].mean() * 252
|
| 96 |
+
for i in keys:
|
| 97 |
+
ER_.append( round(rf + (beta[i] * (rm - rf)), 2))
|
| 98 |
+
|
| 99 |
+
#for i in keys:
|
| 100 |
+
# print('Expected Return based on CAPM for {} is {}%'.format(i, ER_[i]))
|
| 101 |
+
#print(ER)
|
| 102 |
+
symbols.remove(benchmark)
|
| 103 |
+
#st.subheader('Expected Annual Return Based on CAPM Model')
|
| 104 |
+
|
| 105 |
+
Expected_return = {'Assets': symbols_, 'Expected Annual Return': ER_}
|
| 106 |
+
# Creates a header for streamlit
|
| 107 |
+
#st.dataframe(Expected_return)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
# calculate expected return for the portfolio
|
| 111 |
+
# portfolio weights assume equal
|
| 112 |
+
portfolio_weights = []
|
| 113 |
+
current_cash_value = 0
|
| 114 |
+
total_portfolio_value = 0
|
| 115 |
+
cash_value_stocks =[]
|
| 116 |
+
for i in range(len(tickers) ):
|
| 117 |
+
stocks_name = tickers[i]
|
| 118 |
+
current_cash_value = selected_stocks[stocks_name].iloc[-1]
|
| 119 |
+
stocks_quantity = quantity[i]
|
| 120 |
+
cash_value = stocks_quantity * current_cash_value
|
| 121 |
+
cash_value_stocks.append(cash_value)
|
| 122 |
+
total_portfolio_value += cash_value
|
| 123 |
+
portfolio_weights.append(cash_value)
|
| 124 |
+
#print(portfolio_weights)
|
| 125 |
+
portfolio_weights = (portfolio_weights / total_portfolio_value)*100
|
| 126 |
+
ER_portfolio= []
|
| 127 |
+
ER_portfolio = sum(list(ER_) * portfolio_weights)/100
|
| 128 |
+
#print(ER_portfolio)
|
| 129 |
+
|
| 130 |
+
#st.subheader('Expected Portfolio Return Based on CAPM Model')
|
| 131 |
+
# Creates a header for streamlit
|
| 132 |
+
#st.write('Expected Portfolio Return is:', ER_portfolio)
|
| 133 |
+
Bar_output = Expected_return.copy()
|
| 134 |
+
Bar_output['Assets'].append('Portfolio')
|
| 135 |
+
Bar_output['Expected Annual Return'].append(ER_portfolio)
|
| 136 |
+
fig = px.bar(Bar_output, x='Assets', y="Expected Annual Return",color='Assets')
|
| 137 |
+
#fig.update_layout(title_text = 'Annual Expected Return of the Assets and Portfolio',title_x=0.458)
|
| 138 |
+
st.subheader('Annual Expected Return of the Assets and Portfolio')
|
| 139 |
+
st.plotly_chart(fig, use_container_width=True)
|
| 140 |
+
|
| 141 |
+
return beta, cash_value_stocks
|
| 142 |
+
|
| 143 |
+
def basic_portfolio(stock_df):
|
| 144 |
+
"""Uses the stock dataframe to graph the normalized historical cumulative returns of each asset.
|
| 145 |
+
"""
|
| 146 |
+
# Calculates the daily returns of the inputted dataframe
|
| 147 |
+
daily_return = stock_df.dropna().pct_change()
|
| 148 |
+
# Calculates the cumulative return of the previously calculated daily return
|
| 149 |
+
cumulative_return = (1 + daily_return).cumprod()
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
# Graphs the cumulative returns
|
| 153 |
+
st.line_chart(cumulative_return)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def display_heat_map(stock_df,choices):
|
| 157 |
+
symbols, weights, benchmark, investing_style, rf, A_coef = choices.values()
|
| 158 |
+
selected_stocks = stock_df[symbols]
|
| 159 |
+
# Calcuilates the correlation of the assets in the portfolio
|
| 160 |
+
price_correlation = selected_stocks.corr()
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
# Generates a figure for the heatmap
|
| 164 |
+
fig, ax = plt.subplots()
|
| 165 |
+
fig = px.imshow(price_correlation,text_auto=True, aspect="auto")
|
| 166 |
+
# Displays the heatmap on streamlit
|
| 167 |
+
st.write(fig)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
#def display_portfolio_return(stock_df, choices):
|
| 171 |
+
"""Uses the stock dataframe and the chosen weights from choices to calculate and graph the historical cumulative portfolio return.
|
| 172 |
+
"""
|
| 173 |
+
# symbols, weights, investment = choices.values()
|
| 174 |
+
|
| 175 |
+
# Calculates the daily percentage returns of the
|
| 176 |
+
# daily_returns = stock_df.pct_change().dropna()
|
| 177 |
+
# Applies the weights of each asset to the portfolio
|
| 178 |
+
# portfolio_returns = daily_returns.dot(weights)
|
| 179 |
+
# Calculates the cumulative weighted portfolio return
|
| 180 |
+
# cumulative_returns = (1 + portfolio_returns).cumprod()
|
| 181 |
+
# Calculates the cumulative profit using the cumulative portfolio return
|
| 182 |
+
# cumulative_profit = investment * cumulative_returns
|
| 183 |
+
|
| 184 |
+
# Graphs the result, and displays it with a header on streamlit
|
| 185 |
+
# st.subheader('Portfolio Historical Cumulative Returns Based On Inputs!')
|
| 186 |
+
# st.line_chart(cumulative_profit)
|
| 187 |
+
def buble_interactive(stock_df,choices):
|
| 188 |
+
symbols, weights, benchmark, investing_style, rf, A_coef = choices.values()
|
| 189 |
+
beta,cash_value_weights = ER(stock_df,choices)
|
| 190 |
+
my_list = []
|
| 191 |
+
my_colors = []
|
| 192 |
+
for i in beta.values():
|
| 193 |
+
my_list.append(i)
|
| 194 |
+
if i < 0.3:
|
| 195 |
+
my_colors.append("Conservative")
|
| 196 |
+
if i >= 0.3 and i <= 1.1:
|
| 197 |
+
my_colors.append("Moderate Risk")
|
| 198 |
+
if i > 1.1:
|
| 199 |
+
my_colors.append("Risky")
|
| 200 |
+
|
| 201 |
+
df_final =pd.DataFrame()
|
| 202 |
+
df_final['ticker'] = symbols
|
| 203 |
+
df_final['quantities'] = weights
|
| 204 |
+
df_final['cash_value'] =cash_value_weights
|
| 205 |
+
df_final['Beta'] = my_list
|
| 206 |
+
df_final['Risk'] = my_colors
|
| 207 |
+
|
| 208 |
+
fig = px.scatter(
|
| 209 |
+
df_final,
|
| 210 |
+
x="quantities",
|
| 211 |
+
y="Beta",
|
| 212 |
+
size="cash_value",
|
| 213 |
+
color="Risk",
|
| 214 |
+
hover_name="ticker",
|
| 215 |
+
log_x=True,
|
| 216 |
+
size_max=60,
|
| 217 |
+
)
|
| 218 |
+
fig.update_layout(title= benchmark +"Benchmark - Beta of Stock Ticker to Quantity")
|
| 219 |
+
# -- Input the Plotly chart to the Streamlit interface
|
| 220 |
+
st.plotly_chart(fig, use_container_width=True)
|
requirements.txt
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
matplotlib
|
| 3 |
+
folium
|
| 4 |
+
pandas
|
| 5 |
+
seaborn
|
| 6 |
+
numpy
|
| 7 |
+
plotly
|
| 8 |
+
pyportfolioopt
|
| 9 |
+
Pillow
|
| 10 |
+
statsmodels
|
| 11 |
+
sklearn
|
sharp_ratio.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
import streamlit as st
|
| 5 |
+
import matplotlib.pyplot as plt
|
| 6 |
+
import plotly.express as px
|
| 7 |
+
#import plotly.graph_objects as go
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def cumulative_return(stocks,choices):
|
| 11 |
+
symbols, weights, investing_style, benchmark, rf, A_coef = choices.values()
|
| 12 |
+
|
| 13 |
+
#tkers = sorted(set(stocks['Ticker'].unique()))
|
| 14 |
+
#preprocess
|
| 15 |
+
#stocks = stocks.pivot(index="Date", columns="Ticker", values="Adj. Close")
|
| 16 |
+
tkers = symbols.copy()
|
| 17 |
+
logRet = np.log(stocks/stocks.shift())
|
| 18 |
+
log_returns = np.log(stocks/stocks.shift())
|
| 19 |
+
tickers_list = symbols.copy()
|
| 20 |
+
weights_list = weights.copy()
|
| 21 |
+
##
|
| 22 |
+
stock_port = {}
|
| 23 |
+
for e in tickers_list: stock_port[e] = 0
|
| 24 |
+
# Convert Weights to Floats and Sum
|
| 25 |
+
weights = [float(x) for x in weights_list]
|
| 26 |
+
s = sum(weights)
|
| 27 |
+
# Calc Weight Proportions
|
| 28 |
+
new_weights = []
|
| 29 |
+
for i in weights: new_weights.append(i/s)
|
| 30 |
+
# Assign Weights to Ticker Dict
|
| 31 |
+
i = 0
|
| 32 |
+
for e in stock_port:
|
| 33 |
+
stock_port[e] = new_weights[i]
|
| 34 |
+
i += 1
|
| 35 |
+
|
| 36 |
+
port = dict.fromkeys(tkers, 0)
|
| 37 |
+
port.update(stock_port)
|
| 38 |
+
|
| 39 |
+
portfolio_dict = port
|
| 40 |
+
|
| 41 |
+
for e in portfolio_dict:
|
| 42 |
+
tmp = 0
|
| 43 |
+
if portfolio_dict[e] > tmp:
|
| 44 |
+
tmp = portfolio_dict[e]
|
| 45 |
+
tick = e
|
| 46 |
+
list_ =[]
|
| 47 |
+
for e in tickers_list:
|
| 48 |
+
if e not in list_:
|
| 49 |
+
list_.append(e)
|
| 50 |
+
|
| 51 |
+
df = stocks[list_]
|
| 52 |
+
df = df/df.iloc[0]
|
| 53 |
+
df.reset_index(inplace=True)
|
| 54 |
+
df=pd.DataFrame(df)
|
| 55 |
+
print(df)
|
| 56 |
+
fig = px.line(df, x='Date' ,y=df.columns[1:,])
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
#layout reference = https://linuxtut.com/en/b13e3e721519c2842cc9/
|
| 60 |
+
fig.update_layout(
|
| 61 |
+
xaxis=dict(
|
| 62 |
+
rangeselector=dict(
|
| 63 |
+
buttons=list([
|
| 64 |
+
dict(count=1,
|
| 65 |
+
label="1m",
|
| 66 |
+
step="month",
|
| 67 |
+
stepmode="backward"),
|
| 68 |
+
dict(count=6,
|
| 69 |
+
label="6m",
|
| 70 |
+
step="month",
|
| 71 |
+
stepmode="backward"),
|
| 72 |
+
dict(count=1,
|
| 73 |
+
label="YTD",
|
| 74 |
+
step="year",
|
| 75 |
+
stepmode="todate"),
|
| 76 |
+
dict(count=1,
|
| 77 |
+
label="1y",
|
| 78 |
+
step="year",
|
| 79 |
+
stepmode="backward"),
|
| 80 |
+
dict(step="all")
|
| 81 |
+
])
|
| 82 |
+
),
|
| 83 |
+
rangeslider=dict(
|
| 84 |
+
visible=True
|
| 85 |
+
),
|
| 86 |
+
type="date"
|
| 87 |
+
)
|
| 88 |
+
)
|
| 89 |
+
fig.update_layout(xaxis=dict(rangeselector = dict(font = dict( color = "black"))))
|
| 90 |
+
st.subheader('Portfolio Historical Normalized Cumulative Returns')
|
| 91 |
+
|
| 92 |
+
st.plotly_chart(fig, use_container_width=True)
|
| 93 |
+
|
| 94 |
+
def sharp_ratio_func(stocks,choices):
|
| 95 |
+
symbols, weights, investing_style, benchmark, rf, A_coef = choices.values()
|
| 96 |
+
logRet,tickers_list,weights_list = preprocess(stocks,choices)
|
| 97 |
+
tkers = sorted(set(stocks['Ticker'].unique()))
|
| 98 |
+
|
| 99 |
+
stocks = stocks.pivot(index="Date", columns="Ticker", values="Adj. Close")
|
| 100 |
+
|
| 101 |
+
stock_port = {}
|
| 102 |
+
for e in tickers_list: stock_port[e] = 0
|
| 103 |
+
# Convert Weights to Floats and Sum
|
| 104 |
+
weights = [float(x) for x in weights_list]
|
| 105 |
+
s = sum(weights)
|
| 106 |
+
# Calc Weight Proportions
|
| 107 |
+
new_weights = []
|
| 108 |
+
for i in weights: new_weights.append(i/s)
|
| 109 |
+
# Assign Weights to Ticker Dict
|
| 110 |
+
i = 0
|
| 111 |
+
for e in stock_port:
|
| 112 |
+
stock_port[e] = new_weights[i]
|
| 113 |
+
i += 1
|
| 114 |
+
|
| 115 |
+
port = dict.fromkeys(tkers, 0)
|
| 116 |
+
port.update(stock_port)
|
| 117 |
+
|
| 118 |
+
portfolio_dict = port
|
| 119 |
+
|
| 120 |
+
sharp_ratio_list = []
|
| 121 |
+
for ticker in symbols:
|
| 122 |
+
logRet = np.log(stocks/stocks.shift())
|
| 123 |
+
stk = dict.fromkeys(tkers, 0)
|
| 124 |
+
stkTicker = {ticker:1}
|
| 125 |
+
stk.update(stkTicker)
|
| 126 |
+
ttlStk = np.sum(logRet*stk, axis=1)
|
| 127 |
+
stock_sharpe_ratio = ttlStk.mean() / ttlStk.std()
|
| 128 |
+
sharp_ratio_list.append(stock_sharpe_ratio)
|
| 129 |
+
|
| 130 |
+
sharp_ratio = {'Assets': symbols, 'Sharpe Ratio': sharp_ratio_list}
|
| 131 |
+
|
| 132 |
+
# Portfolio sharp Ratio Calculation
|
| 133 |
+
logRet = np.log(stocks/stocks.shift())
|
| 134 |
+
portfolio = dict.fromkeys(tkers, 0)
|
| 135 |
+
portfolio.update(portfolio_dict)
|
| 136 |
+
totalPortfolio = np.sum(logRet*portfolio, axis=1)
|
| 137 |
+
portfolio_sharpe_ratio = totalPortfolio.mean() / totalPortfolio.std()
|
| 138 |
+
|
| 139 |
+
sharp_ratio['Assets'].append('Portfolio')
|
| 140 |
+
sharp_ratio['Sharpe Ratio'].append(portfolio_sharpe_ratio)
|
| 141 |
+
|
| 142 |
+
fig = px.bar(sharp_ratio, x='Assets', y="Sharpe Ratio",color='Assets')
|
| 143 |
+
fig.update_layout(title_text = 'Sharpe Ratio of the Assets and Portfolio',
|
| 144 |
+
title_x=0.458)
|
| 145 |
+
st.plotly_chart(fig, use_container_width=True)
|
| 146 |
+
|
us-shareprices-daily.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:104d6137243993225d2098604004dc3bbbfe5e9411afd8f8d8da5447ff6e7c04
|
| 3 |
+
size 220037366
|