code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
__docformat__ = "numpy"
import logging
from typing import Tuple
import numpy as np
import pandas as pd
import pandas_ta as ta
from sklearn.linear_model import LinearRegression
from openbb_terminal.decorators import log_start_end
from openbb_terminal.common.technical_analysis import ta_helpers
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def cci(
data: pd.DataFrame,
window: int = 14,
scalar: float = 0.0015,
) -> pd.DataFrame:
"""Commodity channel index
Parameters
----------
high_vals: pd.Series
High values
low_values: pd.Series
Low values
close-values: pd.Series
Close values
window: int
Length of window
scalar: float
Scalar variable
Returns
-------
pd.DataFrame
Dataframe of technical indicator
"""
close_col = ta_helpers.check_columns(data)
if close_col is None:
return pd.DataFrame()
return pd.DataFrame(
ta.cci(
high=data["High"],
low=data["Low"],
close=data[close_col],
length=window,
scalar=scalar,
).dropna()
)
@log_start_end(log=logger)
def macd(
data: pd.Series,
n_fast: int = 12,
n_slow: int = 26,
n_signal: int = 9,
) -> pd.DataFrame:
"""Moving average convergence divergence
Parameters
----------
data: pd.Series
Values for calculation
n_fast : int
Fast period
n_slow : int
Slow period
n_signal : int
Signal period
Returns
-------
pd.DataFrame
Dataframe of technical indicator
"""
if isinstance(data, pd.DataFrame):
console.print("[red]Please send a series and not a DataFrame.[/red]\n")
return pd.DataFrame()
return pd.DataFrame(
ta.macd(data, fast=n_fast, slow=n_slow, signal=n_signal).dropna()
)
@log_start_end(log=logger)
def rsi(
data: pd.Series, window: int = 14, scalar: float = 100, drift: int = 1
) -> pd.DataFrame:
"""Relative strength index
Parameters
----------
data: pd.Series
Dataframe of prices
window: int
Length of window
scalar: float
Scalar variable
drift: int
Drift variable
Returns
-------
pd.DataFrame
Dataframe of technical indicator
"""
if isinstance(data, pd.DataFrame):
console.print("[red]Please send a series and not a DataFrame.[/red]\n")
return pd.DataFrame()
raw_data = ta.rsi(data, length=window, scalar=scalar, drift=drift)
if raw_data is None:
return pd.DataFrame()
if raw_data.empty:
return pd.DataFrame()
return pd.DataFrame(raw_data.dropna())
@log_start_end(log=logger)
def stoch(
data: pd.DataFrame,
fastkperiod: int = 14,
slowdperiod: int = 3,
slowkperiod: int = 3,
):
"""Stochastic oscillator
Parameters
----------
data : pd.DataFrame
Dataframe of OHLC prices
fastkperiod : int
Fast k period
slowdperiod : int
Slow d period
slowkperiod : int
Slow k period
Returns
-------
pd.DataFrame
Dataframe of technical indicator
"""
close_col = ta_helpers.check_columns(data)
if close_col is None:
return pd.DataFrame()
return pd.DataFrame(
ta.stoch(
high=data["High"],
low=data["Low"],
close=data[close_col],
k=fastkperiod,
d=slowdperiod,
smooth_k=slowkperiod,
).dropna()
)
@log_start_end(log=logger)
def fisher(data: pd.DataFrame, window: int = 14) -> pd.DataFrame:
"""Fisher Transform
Parameters
----------
data : pd.DataFrame
Dataframe of OHLC prices
window: int
Length for indicator window
Returns
-------
df_ta: pd.DataFrame
Dataframe of technical indicator
"""
# Daily
close_col = ta_helpers.check_columns(data, close=False)
if close_col is None:
return pd.DataFrame()
return pd.DataFrame(
ta.fisher(high=data["High"], low=data["Low"], length=window).dropna()
)
@log_start_end(log=logger)
def cg(values: pd.Series, window: int) -> pd.DataFrame:
"""Center of gravity
Parameters
----------
values: pd.DataFrame
Data to use with close being titled values
window: int
Length for indicator window
Returns
-------
pd.DataFrame
Dataframe of technical indicator
"""
return pd.DataFrame(ta.cg(close=values, length=window).dropna())
@log_start_end(log=logger)
def clenow_momentum(
values: pd.Series, window: int = 90
) -> Tuple[float, float, pd.Series]:
"""Gets the Clenow Volatility Adjusted Momentum. this is defined as the regression coefficient on log prices
multiplied by the R^2 value of the regression
Parameters
----------
values: pd.Series
Values to perform regression for
window: int
Length of lookback period
Returns
-------
float:
R2 of fit to log data
float:
Coefficient of linear regression
pd.Series:
Values for best fit line
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.load("AAPL")
>>> openbb.ta.clenow(df["Close"])
"""
if len(values) < window:
console.print(
f"[red]Calculation asks for at least last {window} days of data[/red]"
)
return np.nan, np.nan, pd.Series()
values = values[-window:]
y = np.log(values)
X = np.arange(len(y)).reshape(-1, 1)
lr = LinearRegression()
lr.fit(X, y)
r2 = lr.score(X, y)
coef = lr.coef_[0]
annualized_coef = (np.exp(coef) ** 252) - 1
return r2, annualized_coef, pd.Series(lr.predict(X))
@log_start_end(log=logger)
def demark_seq(values: pd.Series) -> pd.DataFrame:
"""Get the integer value for demark sequential indicator
Parameters
----------
values: pd.Series
Series of close values
Returns
-------
pd.DataFrame
Dataframe of UP and DOWN sequential indicators
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.load("AAPL")
>>> openbb.ta.demark(df["Close"])
"""
return ta.td_seq(values, asint=True) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/technical_analysis/momentum_model.py | 0.930718 | 0.418222 | momentum_model.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
import pandas_ta as ta
from openbb_terminal.decorators import log_start_end
from openbb_terminal.common.technical_analysis import ta_helpers
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def ad(data: pd.DataFrame, use_open: bool = False) -> pd.DataFrame:
"""Calculate AD technical indicator
Parameters
----------
data : pd.DataFrame
Dataframe of prices with OHLC and Volume
use_open : bool
Whether to use open prices
Returns
-------
pd.DataFrame
Dataframe with technical indicator
"""
kwargs = {}
if use_open:
kwargs["Open"] = data["Open"]
df_ta = ta.ad(
high=data["High"],
low=data["Low"],
close=data["Close"],
volume=data["Volume"],
**kwargs
).dropna()
return pd.DataFrame(df_ta)
@log_start_end(log=logger)
def adosc(
data: pd.DataFrame, use_open: bool = False, fast: int = 3, slow: int = 10
) -> pd.DataFrame:
"""Calculate AD oscillator technical indicator
Parameters
----------
data : pd.DataFrame
Dataframe of OHLC prices
use_open : bool
Whether to use open prices
fast: int
Fast value
slow: int
Slow value
Returns
-------
pd.DataFrame
Dataframe with technical indicator
"""
if use_open:
df_ta = ta.adosc(
high=data["High"],
low=data["Low"],
close=data["Close"],
volume=data["Volume"],
open_=data["Open"],
fast=fast,
slow=slow,
).dropna()
else:
df_ta = ta.adosc(
high=data["High"],
low=data["Low"],
close=data["Close"],
volume=data["Volume"],
fast=fast,
slow=slow,
).dropna()
return pd.DataFrame(df_ta)
@log_start_end(log=logger)
def obv(data: pd.DataFrame) -> pd.DataFrame:
"""On Balance Volume
Parameters
----------
data: pd.DataFrame
Dataframe of OHLC prices
Returns
-------
pd.DataFrame
Dataframe with technical indicator
"""
close_col = ta_helpers.check_columns(data, high=False, low=False)
if close_col is None:
return pd.DataFrame()
if "Volume" not in data.columns:
console.print("[red]Volume column not found[/red]\n")
return pd.DataFrame()
return pd.DataFrame(ta.obv(close=data[close_col], volume=data["Volume"]).dropna()) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/technical_analysis/volume_model.py | 0.761272 | 0.273702 | volume_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
import matplotlib.pyplot as plt
import pandas as pd
from openbb_terminal.config_terminal import theme
from openbb_terminal.common.technical_analysis import volatility_model
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
reindex_dates,
is_valid_axes_count,
)
from openbb_terminal.common.technical_analysis import ta_helpers
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_bbands(
data: pd.DataFrame,
symbol: str = "",
window: int = 15,
n_std: float = 2,
mamode: str = "sma",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots bollinger bands
Parameters
----------
data : pd.DataFrame
Dataframe of ohlc prices
symbol : str
Ticker symbol
window : int
Length of window to calculate BB
n_std : float
Number of standard deviations to show
mamode : str
Method of calculating average
export : str
Format of export file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_ta = volatility_model.bbands(data, window, n_std, mamode)
plot_data = pd.merge(data, df_ta, how="outer", left_index=True, right_index=True)
plot_data = reindex_dates(plot_data)
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
close_col = ta_helpers.check_columns(data, high=False, low=False)
if close_col is None:
return
ax.plot(
plot_data.index,
plot_data[close_col].values,
)
ax.plot(
plot_data.index,
plot_data[df_ta.columns[0]].values,
theme.down_color,
linewidth=0.7,
)
ax.plot(plot_data.index, plot_data[df_ta.columns[1]].values, ls="--", linewidth=0.7)
ax.plot(
plot_data.index,
plot_data[df_ta.columns[2]].values,
theme.up_color,
linewidth=0.7,
)
ax.set_title(f"{symbol} Bollinger Bands")
ax.set_xlim(plot_data.index[0], plot_data.index[-1])
ax.set_ylabel("Share Price ($)")
ax.legend([symbol, df_ta.columns[0], df_ta.columns[1], df_ta.columns[2]])
ax.fill_between(
df_ta.index, df_ta.iloc[:, 0].values, df_ta.iloc[:, 2].values, alpha=0.1
)
theme.style_primary_axis(
ax,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"bbands",
df_ta,
)
@log_start_end(log=logger)
def display_donchian(
data: pd.DataFrame,
symbol: str = "",
upper_length: int = 20,
lower_length: int = 20,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots donchian channels
Parameters
----------
data : pd.DataFrame
Dataframe of ohlc prices
symbol : str
Ticker symbol
upper_length : int
Length of window to calculate upper channel
lower_length : int
Length of window to calculate lower channel
export : str
Format of export file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_ta = volatility_model.donchian(data, upper_length, lower_length)
plot_data = pd.merge(data, df_ta, how="outer", left_index=True, right_index=True)
plot_data = reindex_dates(plot_data)
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
close_col = ta_helpers.check_columns(data)
if close_col is None:
return
ax.plot(plot_data.index, plot_data[close_col].values)
ax.plot(
plot_data.index,
plot_data[df_ta.columns[0]].values,
linewidth=0.7,
label="Upper",
)
ax.plot(plot_data.index, plot_data[df_ta.columns[1]].values, linewidth=0.7, ls="--")
ax.plot(
plot_data.index,
plot_data[df_ta.columns[2]].values,
linewidth=0.7,
label="Lower",
)
ax.fill_between(
plot_data.index,
plot_data[df_ta.columns[0]].values,
plot_data[df_ta.columns[2]].values,
alpha=0.1,
)
ax.set_title(f"{symbol} donchian")
ax.set_xlim(plot_data.index[0], plot_data.index[-1])
ax.set_ylabel("Price ($)")
ax.legend([symbol, df_ta.columns[0], df_ta.columns[1], df_ta.columns[2]])
theme.style_primary_axis(
ax,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"donchian",
df_ta,
)
@log_start_end(log=logger)
def view_kc(
data: pd.DataFrame,
window: int = 20,
scalar: float = 2,
mamode: str = "ema",
offset: int = 0,
symbol: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots Keltner Channels Indicator
Parameters
----------
data: pd.DataFrame
Dataframe of ohlc prices
window: int
Length of window
scalar: float
Scalar value
mamode: str
Type of filter
offset: int
Offset value
symbol: str
Ticker symbol
export: str
Format to export data
external_axes: Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
df_ta = volatility_model.kc(
data,
window,
scalar,
mamode,
offset,
)
plot_data = pd.merge(data, df_ta, how="outer", left_index=True, right_index=True)
plot_data = reindex_dates(plot_data)
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
close_col = ta_helpers.check_columns(data)
if close_col is None:
return
ax.plot(plot_data.index, plot_data[close_col].values)
ax.plot(
plot_data.index,
plot_data[df_ta.columns[0]].values,
linewidth=0.7,
label="Upper",
)
ax.plot(plot_data.index, plot_data[df_ta.columns[1]].values, linewidth=0.7, ls="--")
ax.plot(
plot_data.index,
plot_data[df_ta.columns[2]].values,
linewidth=0.7,
label="Lower",
)
ax.fill_between(
plot_data.index,
plot_data[df_ta.columns[0]].values,
plot_data[df_ta.columns[2]].values,
alpha=0.1,
)
ax.set_title(f"{symbol} Keltner Channels")
ax.set_xlim(plot_data.index[0], plot_data.index[-1])
ax.set_ylabel("Price")
ax.legend([symbol, df_ta.columns[0], df_ta.columns[1], df_ta.columns[2]])
theme.style_primary_axis(
ax,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"kc",
df_ta,
)
@log_start_end(log=logger)
def display_atr(
data: pd.DataFrame,
symbol: str = "",
window: int = 14,
mamode: str = "sma",
offset: int = 0,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots ATR
Parameters
----------
data : pd.DataFrame
Dataframe of ohlc prices
symbol : str
Ticker symbol
window : int
Length of window to calculate upper channel
export : str
Format of export file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_ta = volatility_model.atr(data, window=window, mamode=mamode, offset=offset)
# This plot has 2 axes
if external_axes is None:
_, axes = plt.subplots(
2, 1, figsize=plot_autoscale(), sharex=True, dpi=PLOT_DPI
)
(ax1, ax2) = axes
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
plot_data = pd.merge(data, df_ta, how="outer", left_index=True, right_index=True)
plot_data = reindex_dates(plot_data)
ax1.plot(plot_data.index, plot_data.iloc[:, 1].values, color=theme.get_colors()[0])
ax1.set_title(f"{symbol} ATR")
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel("Share Price ($)")
theme.style_primary_axis(
ax=ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax2.plot(
plot_data.index, plot_data[df_ta.columns[0]].values, color=theme.get_colors()[1]
)
ax2.set_xlim(plot_data.index[0], plot_data.index[-1])
ax2.set_ylabel("ATR")
theme.style_primary_axis(
ax=ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"atr",
df_ta,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/technical_analysis/volatility_view.py | 0.89419 | 0.52756 | volatility_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import matplotlib.pyplot as plt
import mplfinance as mpf
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.config_terminal import theme
from openbb_terminal.common.technical_analysis import overlap_model
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
reindex_dates,
lambda_long_number_format_y_axis,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def view_ma(
data: pd.Series,
window: List[int] = None,
offset: int = 0,
ma_type: str = "EMA",
symbol: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots MA technical indicator
Parameters
----------
data: pd.Series
Series of prices
window: List[int]
Length of EMA window
offset: int
Offset variable
ma_type: str
Type of moving average. Either "EMA" "ZLMA" or "SMA"
symbol: str
Ticker
export: str
Format to export data
external_axes: Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.load("AAPL")
>>> openbb.ta.ma_chart(data=df["Adj Close"], symbol="AAPL", ma_type="EMA", window=[20, 50, 100])
>>> from openbb_terminal.sdk import openbb
>>> spuk_index = openbb.economy.index(indices = ["^SPUK"])
>>> openbb.ta.ma_chart(data = spuk_index["^SPUK"], symbol = "S&P UK Index", ma_type = "EMA", window = [20, 50, 100])
"""
# Define a dataframe for adding EMA series to it
price_df = pd.DataFrame(data)
price_df.index.name = "date"
l_legend = [symbol]
if not window:
window = [20, 50]
for win in window:
if ma_type == "EMA":
df_ta = overlap_model.ema(data, win, offset)
l_legend.append(f"EMA {win}")
elif ma_type == "SMA":
df_ta = overlap_model.sma(data, win, offset)
l_legend.append(f"SMA {win}")
elif ma_type == "WMA":
df_ta = overlap_model.wma(data, win, offset)
l_legend.append(f"WMA {win}")
elif ma_type == "HMA":
df_ta = overlap_model.hma(data, win, offset)
l_legend.append(f"HMA {win}")
elif ma_type == "ZLMA":
df_ta = overlap_model.zlma(data, win, offset)
l_legend.append(f"ZLMA {win}")
price_df = price_df.join(df_ta)
plot_data = reindex_dates(price_df)
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(plot_data.index, plot_data.iloc[:, 1].values)
ax.set_xlim([plot_data.index[0], plot_data.index[-1]])
ax.set_ylabel(f"{symbol} Price")
for idx in range(2, plot_data.shape[1]):
ax.plot(plot_data.iloc[:, idx])
ax.set_title(f"{symbol} {ma_type.upper()}")
ax.legend(l_legend)
theme.style_primary_axis(
ax,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
f"{ma_type.lower()}{'_'.join([str(win) for win in window])}",
price_df,
)
@log_start_end(log=logger)
def view_vwap(
data: pd.DataFrame,
symbol: str = "",
start_date: Optional[str] = None,
end_date: Optional[str] = None,
offset: int = 0,
interval: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots VWMA technical indicator
Parameters
----------
data : pd.DataFrame
Dataframe of OHLC prices
symbol : str
Ticker
offset : int
Offset variable
start_date: Optional[str]
Initial date, format YYYY-MM-DD
end_date: Optional[str]
Final date, format YYYY-MM-DD
interval : str
Interval of data
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (3 axes are expected in the list), by default None
"""
data.index = data.index.tz_localize(None)
if start_date is None:
start = data.index[0].date()
console.print(f"No start date specified. Start date: {start}")
else:
start = start_date
if end_date is None:
end = data.index[-1].date()
console.print(f"No end date specified. End date: {end}")
else:
end = end_date
day_df = data[(start <= data.index.date) & (data.index.date <= end)]
if len(day_df) == 0:
console.print(
f"[red]No data found between {start.strftime('%Y-%m-%d')} and {end.strftime('%Y-%m-%d')}\n[/red]"
)
return
df_vwap = overlap_model.vwap(day_df, offset)
candle_chart_kwargs = {
"type": "candle",
"style": theme.mpf_style,
"volume": True,
"xrotation": theme.xticks_rotation,
"scale_padding": {"left": 0.3, "right": 1.2, "top": 0.8, "bottom": 0.8},
"update_width_config": {
"candle_linewidth": 0.6,
"candle_width": 0.8,
"volume_linewidth": 0.8,
"volume_width": 0.8,
},
"warn_too_much_data": 10000,
}
# This plot has 2 axes
if external_axes is None:
candle_chart_kwargs["returnfig"] = True
candle_chart_kwargs["figratio"] = (10, 7)
candle_chart_kwargs["figscale"] = 1.10
candle_chart_kwargs["figsize"] = plot_autoscale()
candle_chart_kwargs["addplot"] = mpf.make_addplot(
df_vwap, width=theme.line_width
)
fig, ax = mpf.plot(day_df, **candle_chart_kwargs)
fig.suptitle(
f"{symbol} {interval} VWAP",
x=0.055,
y=0.965,
horizontalalignment="left",
)
lambda_long_number_format_y_axis(day_df, "Volume", ax)
theme.visualize_output(force_tight_layout=False)
elif is_valid_axes_count(external_axes, 3):
(ax1, ax2, ax3) = external_axes
candle_chart_kwargs["ax"] = ax1
candle_chart_kwargs["volume"] = ax2
candle_chart_kwargs["addplot"] = mpf.make_addplot(
df_vwap, width=theme.line_width, ax=ax3
)
mpf.plot(day_df, **candle_chart_kwargs)
else:
return
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"VWAP",
df_vwap,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/technical_analysis/overlap_view.py | 0.816589 | 0.381248 | overlap_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
import matplotlib.pyplot as plt
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.config_terminal import theme
from openbb_terminal.common.technical_analysis import trend_indicators_model
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
reindex_dates,
is_valid_axes_count,
)
from openbb_terminal.common.technical_analysis import ta_helpers
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def display_adx(
data: pd.DataFrame,
window: int = 14,
scalar: int = 100,
drift: int = 1,
symbol: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots ADX indicator
Parameters
----------
data : pd.DataFrame
Dataframe with OHLC price data
window : int
Length of window
scalar : int
Scalar variable
drift : int
Drift variable
symbol : str
Ticker
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
df_ta = trend_indicators_model.adx(
data=data,
window=window,
scalar=scalar,
drift=drift,
)
plot_data = pd.merge(data, df_ta, how="outer", left_index=True, right_index=True)
plot_data = reindex_dates(plot_data)
# This plot has 2 axes
if not external_axes:
_, axes = plt.subplots(
2, 1, sharex=True, figsize=plot_autoscale(), dpi=PLOT_DPI
)
ax1, ax2 = axes
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
ax1.plot(plot_data.index, plot_data["Close"].values)
ax1.set_title(f"Average Directional Movement Index (ADX) on {symbol}")
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel("Price")
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax2.plot(plot_data.index, plot_data[df_ta.columns[0]].values)
ax2.plot(plot_data.index, plot_data[df_ta.columns[1]].values, color=theme.up_color)
ax2.plot(
plot_data.index, plot_data[df_ta.columns[2]].values, color=theme.down_color
)
ax2.set_xlim(plot_data.index[0], plot_data.index[-1])
ax2.axhline(25, ls="--")
ax2.legend(
[
f"ADX ({df_ta.columns[0]})",
f"+DI ({df_ta.columns[1]})",
f"-DI ({df_ta.columns[2]})",
]
)
ax2.set_ylim([0, 100])
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"adx",
df_ta,
)
@log_start_end(log=logger)
def display_aroon(
data: pd.DataFrame,
window: int = 25,
scalar: int = 100,
symbol: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots Aroon indicator
Parameters
----------
data: pd.DataFrame
Dataframe with OHLC price data
window: int
Length of window
symbol: str
Ticker
scalar: int
Scalar variable
export: str
Format to export data
external_axes: Optional[List[plt.Axes]], optional
External axes (3 axes are expected in the list), by default None
"""
df_ta = trend_indicators_model.aroon(
data=data,
window=window,
scalar=scalar,
)
plot_data = pd.merge(data, df_ta, how="outer", left_index=True, right_index=True)
plot_data = reindex_dates(plot_data)
# This plot has 3 axes
if not external_axes:
_, axes = plt.subplots(
3, 1, sharex=True, figsize=plot_autoscale(), dpi=PLOT_DPI
)
ax1, ax2, ax3 = axes
elif is_valid_axes_count(external_axes, 3):
(ax1, ax2, ax3) = external_axes
else:
return
close_col = ta_helpers.check_columns(data)
if close_col is None:
return
ax1.plot(plot_data.index, plot_data[close_col].values)
ax1.set_title(f"Aroon on {symbol}")
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel("Price")
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax2.plot(plot_data.index, plot_data[df_ta.columns[0]].values, theme.down_color)
ax2.plot(plot_data.index, plot_data[df_ta.columns[1]].values, theme.up_color)
ax2.set_xlim(plot_data.index[0], plot_data.index[-1])
ax2.axhline(50, ls="--")
ax2.legend([f"Aroon DOWN ({df_ta.columns[0]})", f"Aroon UP ({df_ta.columns[1]})"])
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax3.plot(plot_data.index, plot_data[df_ta.columns[2]].values)
ax3.set_xlim(plot_data.index[0], plot_data.index[-1])
ax3.legend([f"Aroon OSC ({df_ta.columns[2]})"])
ax3.set_ylim([-100, 100])
theme.style_primary_axis(
ax3,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"aroon",
df_ta,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/technical_analysis/trend_indicators_view.py | 0.884008 | 0.473231 | trend_indicators_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, Union, List
import matplotlib.pyplot as plt
import pandas as pd
from openbb_terminal.config_terminal import theme
from openbb_terminal.common.technical_analysis import custom_indicators_model
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
reindex_dates,
is_intraday,
is_valid_axes_count,
)
from openbb_terminal.common.technical_analysis import ta_helpers
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def fibonacci_retracement(
data: pd.DataFrame,
limit: int = 120,
start_date: Optional[Union[str, None]] = None,
end_date: Optional[Union[str, None]] = None,
symbol: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots Calculated fibonacci retracement levels
Parameters
----------
data: pd.DataFrame
OHLC data
limit: int
Days to lookback
start_date: Optional[str, None]
User picked date for starting retracement
end_date: Optional[str, None]
User picked date for ending retracement
symbol: str
Ticker symbol
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.load(symbol="aapl")
>>> openbb.ta.fib_chart(data=df)
"""
(
df_fib,
min_date,
max_date,
min_pr,
max_pr,
) = custom_indicators_model.calculate_fib_levels(data, limit, start_date, end_date)
levels = df_fib.Price
plot_data = reindex_dates(data)
# This plot has 2 axes
if external_axes is None:
_, ax1 = plt.subplots(
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
ax2 = ax1.twinx()
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
close_col = ta_helpers.check_columns(data)
if close_col is None:
return
ax1.plot(plot_data[close_col])
if is_intraday(data):
date_format = "%b %d %H:%M"
else:
date_format = "%Y-%m-%d"
min_date_index = plot_data[
plot_data["date"] == min_date.strftime(date_format)
].index
max_date_index = plot_data[
plot_data["date"] == max_date.strftime(date_format)
].index
ax1.plot(
[min_date_index, max_date_index],
[min_pr, max_pr],
)
for i in levels:
ax1.axhline(y=i, alpha=0.5)
for i in range(6):
ax1.fill_between(plot_data.index, levels[i], levels[i + 1], alpha=0.15)
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_title(f"Fibonacci Support for {symbol.upper()}")
ax1.set_yticks(levels)
ax1.set_yticklabels([0, 0.235, 0.382, 0.5, 0.618, 0.65, 1])
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax2.set_ylim(ax1.get_ylim())
ax2.set_ylabel("Price")
theme.style_primary_axis(ax2)
if external_axes is None:
theme.visualize_output()
print_rich_table(
df_fib,
headers=["Fib Level", "Price"],
show_index=False,
title="Fibonacci retracement levels",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"fib",
df_fib,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/technical_analysis/custom_indicators_view.py | 0.85744 | 0.326768 | custom_indicators_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
import matplotlib.pyplot as plt
import pandas as pd
from openbb_terminal.config_terminal import theme
from openbb_terminal.common.technical_analysis import volume_model
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
reindex_dates,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal.common.technical_analysis import ta_helpers
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_ad(
data: pd.DataFrame,
use_open: bool = False,
symbol: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots AD technical indicator
Parameters
----------
data : pd.DataFrame
Dataframe of ohlc prices
use_open : bool
Whether to use open prices in calculation
symbol : str
Ticker symbol
export: str
Format to export data as
external_axes : Optional[List[plt.Axes]], optional
External axes (3 axes are expected in the list), by default None
"""
divisor = 1_000_000
df_vol = data["Volume"] / divisor
df_vol.name = "Adj Volume"
df_ta = volume_model.ad(data, use_open)
# check if AD exists in dataframe
if "AD" in df_ta.columns:
df_cal = df_ta["AD"] / divisor
elif "ADo" in df_ta.columns:
df_cal = df_ta["ADo"] / divisor
else:
console.print("AD not found in dataframe")
return
df_cal.name = "Adj AD"
plot_data = pd.merge(data, df_vol, how="outer", left_index=True, right_index=True)
plot_data = pd.merge(
plot_data, df_ta, how="outer", left_index=True, right_index=True
)
plot_data = pd.merge(
plot_data, df_cal, how="outer", left_index=True, right_index=True
)
plot_data = reindex_dates(plot_data)
# This plot has 3 axes
if external_axes is None:
_, axes = plt.subplots(
3,
1,
sharex=True,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
ax1, ax2, ax3 = axes
elif is_valid_axes_count(external_axes, 3):
(ax1, ax2, ax3) = external_axes
else:
return
close_col = ta_helpers.check_columns(data)
if close_col is None:
return
ax1.plot(plot_data.index, plot_data[close_col].values)
ax1.set_title(f"{symbol} AD", x=0.04, y=1)
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel("Price")
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax2.set_ylabel("Volume [M]")
bar_colors = [
theme.down_color if x[1].Open < x[1].Close else theme.up_color
for x in plot_data.iterrows()
]
ax2.bar(
plot_data.index,
plot_data["Adj Volume"].values,
color=bar_colors,
width=theme.volume_bar_width,
)
ax2.set_xlim(plot_data.index[0], plot_data.index[-1])
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax3.set_ylabel("A/D [M]")
ax3.plot(plot_data.index, plot_data["Adj AD"])
ax3.set_xlim(plot_data.index[0], plot_data.index[-1])
ax3.axhline(0, linestyle="--")
theme.style_primary_axis(
ax3,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"ad",
df_ta,
)
@log_start_end(log=logger)
def display_adosc(
data: pd.DataFrame,
fast: int = 3,
slow: int = 10,
use_open: bool = False,
symbol: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots AD Osc Indicator
Parameters
----------
data : pd.DataFrame
Dataframe of ohlc prices
use_open : bool
Whether to use open prices in calculation
fast: int
Length of fast window
slow : int
Length of slow window
symbol : str
Stock ticker
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (3 axes are expected in the list), by default None
"""
divisor = 1_000_000
df_vol = data["Volume"] / divisor
df_vol.name = "Adj Volume"
df_ta = volume_model.adosc(data, use_open, fast, slow)
df_cal = df_ta[df_ta.columns[0]] / divisor
df_cal.name = "Adj ADOSC"
plot_data = pd.merge(data, df_vol, how="outer", left_index=True, right_index=True)
plot_data = pd.merge(
plot_data, df_ta, how="outer", left_index=True, right_index=True
)
plot_data = pd.merge(
plot_data, df_cal, how="outer", left_index=True, right_index=True
)
plot_data = reindex_dates(plot_data)
# This plot has 3 axes
if external_axes is None:
_, axes = plt.subplots(
3,
1,
sharex=True,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
ax1, ax2, ax3 = axes
elif is_valid_axes_count(external_axes, 3):
(ax1, ax2, ax3) = external_axes
else:
return
ax1.set_title(f"{symbol} AD Oscillator")
ax1.plot(plot_data.index, plot_data["Adj Close"].values)
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel("Price")
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax2.set_ylabel("Volume [M]")
bar_colors = [
theme.down_color if x[1].Open < x[1].Close else theme.up_color
for x in plot_data.iterrows()
]
ax2.bar(
plot_data.index,
plot_data["Adj Volume"],
color=bar_colors,
width=theme.volume_bar_width,
)
ax2.set_xlim(plot_data.index[0], plot_data.index[-1])
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax3.set_ylabel("AD Osc [M]")
ax3.plot(plot_data.index, plot_data["Adj ADOSC"], label="AD Osc")
ax3.set_xlim(plot_data.index[0], plot_data.index[-1])
theme.style_primary_axis(
ax3,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"adosc",
df_ta,
)
@log_start_end(log=logger)
def display_obv(
data: pd.DataFrame,
symbol: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots OBV technical indicator
Parameters
----------
data : pd.DataFrame
Dataframe of ohlc prices
symbol : str
Ticker
export: str
Format to export data as
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
divisor = 1_000_000
df_vol = data["Volume"] / divisor
df_vol.name = "Adj Volume"
df_ta = volume_model.obv(data)
df_cal = df_ta[df_ta.columns[0]] / divisor
df_cal.name = "Adj OBV"
plot_data = pd.merge(data, df_vol, how="outer", left_index=True, right_index=True)
plot_data = pd.merge(
plot_data, df_ta, how="outer", left_index=True, right_index=True
)
plot_data = pd.merge(
plot_data, df_cal, how="outer", left_index=True, right_index=True
)
plot_data = reindex_dates(plot_data)
# This plot has 3 axes
if external_axes is None:
_, axes = plt.subplots(
3,
1,
sharex=True,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
ax1, ax2, ax3 = axes
elif is_valid_axes_count(external_axes, 3):
(ax1, ax2, ax3) = external_axes
else:
return
close_col = ta_helpers.check_columns(data)
if close_col is None:
return
ax1.plot(plot_data.index, plot_data[close_col].values)
ax1.set_title(f"{symbol} OBV")
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel("Price")
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax2.set_xlim(plot_data.index[0], plot_data.index[-1])
ax2.set_ylabel("Volume [M]")
bar_colors = [
theme.down_color if x[1].Open < x[1].Close else theme.up_color
for x in plot_data.iterrows()
]
ax2.bar(
plot_data.index,
plot_data["Adj Volume"],
color=bar_colors,
alpha=0.8,
width=theme.volume_bar_width,
)
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax3.set_ylabel("OBV [M]")
ax3.plot(plot_data.index, plot_data["Adj OBV"])
ax3.set_xlim(plot_data.index[0], plot_data.index[-1])
theme.style_primary_axis(
ax3,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"obv",
df_ta,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/technical_analysis/volume_view.py | 0.869382 | 0.460956 | volume_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
import mplfinance as mpf
from openbb_terminal.config_terminal import theme
from openbb_terminal.common.technical_analysis import momentum_model
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
reindex_dates,
is_valid_axes_count,
print_rich_table,
)
from openbb_terminal.rich_config import console
from openbb_terminal.common.technical_analysis import ta_helpers
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def display_cci(
data: pd.DataFrame,
window: int = 14,
scalar: float = 0.0015,
symbol: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots CCI Indicator
Parameters
----------
data : pd.DataFrame
Dataframe of OHLC
window : int
Length of window
scalar : float
Scalar variable
symbol : str
Stock ticker
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
df_ta = momentum_model.cci(data, window, scalar)
plot_data = pd.merge(data, df_ta, how="outer", left_index=True, right_index=True)
plot_data = reindex_dates(plot_data)
# This plot has 2 axes
if external_axes is None:
_, axes = plt.subplots(
2, 1, figsize=plot_autoscale(), sharex=True, dpi=PLOT_DPI
)
(ax1, ax2) = axes
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
close_col = ta_helpers.check_columns(data)
if close_col is None:
return
ax1.set_title(f"{symbol} CCI")
ax1.plot(
plot_data.index,
plot_data[close_col].values,
)
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel("Share Price ($)")
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax2.plot(plot_data.index, plot_data[df_ta.columns[0]].values)
ax2.set_xlim(plot_data.index[0], plot_data.index[-1])
ax2.axhspan(100, ax2.get_ylim()[1], facecolor=theme.down_color, alpha=0.2)
ax2.axhspan(ax2.get_ylim()[0], -100, facecolor=theme.up_color, alpha=0.2)
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax3 = ax2.twinx()
ax3.set_ylim(ax2.get_ylim())
ax3.axhline(100, color=theme.down_color, ls="--")
ax3.axhline(-100, color=theme.up_color, ls="--")
theme.style_twin_axis(ax3)
ax2.set_yticks([-100, 100])
ax2.set_yticklabels(["OVERSOLD", "OVERBOUGHT"])
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"cci",
df_ta,
)
@log_start_end(log=logger)
def display_macd(
data: pd.Series,
n_fast: int = 12,
n_slow: int = 26,
n_signal: int = 9,
symbol: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots MACD signal
Parameters
----------
data : pd.Series
Values to input
n_fast : int
Fast period
n_slow : int
Slow period
n_signal : int
Signal period
symbol : str
Stock ticker
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
df_ta = momentum_model.macd(data, n_fast, n_slow, n_signal)
plot_data = pd.merge(data, df_ta, how="outer", left_index=True, right_index=True)
plot_data = reindex_dates(plot_data)
# This plot has 2 axes
if external_axes is None:
_, axes = plt.subplots(
2, 1, figsize=plot_autoscale(), sharex=True, dpi=PLOT_DPI
)
(ax1, ax2) = axes
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
ax1.set_title(f"{symbol} MACD")
ax1.plot(plot_data.index, plot_data.iloc[:, 1].values)
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel("Share Price ($)")
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax2.plot(plot_data.index, plot_data.iloc[:, 2].values)
ax2.plot(
plot_data.index,
plot_data.iloc[:, 4].values,
color=theme.down_color,
)
ax2.bar(
plot_data.index,
plot_data.iloc[:, 3].values,
width=theme.volume_bar_width,
color=theme.up_color,
)
ax2.legend(
[
f"MACD Line {plot_data.columns[2]}",
f"Signal Line {plot_data.columns[4]}",
f"Histogram {plot_data.columns[3]}",
],
loc=2,
prop={"size": 6},
)
ax2.set_xlim(plot_data.index[0], plot_data.index[-1])
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"macd",
df_ta,
)
@log_start_end(log=logger)
def display_rsi(
data: pd.Series,
window: int = 14,
scalar: float = 100.0,
drift: int = 1,
symbol: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots RSI Indicator
Parameters
----------
data : pd.Series
Values to input
window : int
Length of window
scalar : float
Scalar variable
drift : int
Drift variable
symbol : str
Stock ticker
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
if isinstance(data, pd.DataFrame):
console.print("[red]Please send a series and not a dataframe[/red]\n")
return
df_ta = momentum_model.rsi(data, window, scalar, drift)
# This plot has 2 axes
if external_axes is None:
_, axes = plt.subplots(
2, 1, figsize=plot_autoscale(), sharex=True, dpi=PLOT_DPI
)
(ax1, ax2) = axes
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
plot_data = pd.merge(data, df_ta, how="outer", left_index=True, right_index=True)
plot_data = reindex_dates(plot_data)
ax1.plot(plot_data.index, plot_data.iloc[:, 1].values)
ax1.set_title(f"{symbol} RSI{window}")
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel("Share Price ($)")
theme.style_primary_axis(
ax=ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax2.plot(plot_data.index, plot_data[df_ta.columns[0]].values)
ax2.set_xlim(plot_data.index[0], plot_data.index[-1])
ax2.axhspan(0, 30, facecolor=theme.up_color, alpha=0.2)
ax2.axhspan(70, 100, facecolor=theme.down_color, alpha=0.2)
ax2.set_ylim([0, 100])
theme.style_primary_axis(
ax=ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax3 = ax2.twinx()
ax3.set_ylim(ax2.get_ylim())
ax3.axhline(30, color=theme.up_color, ls="--")
ax3.axhline(70, color=theme.down_color, ls="--")
ax2.set_yticks([30, 70])
ax2.set_yticklabels(["OVERSOLD", "OVERBOUGHT"])
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"rsi",
df_ta,
)
@log_start_end(log=logger)
def display_stoch(
data: pd.DataFrame,
fastkperiod: int = 14,
slowdperiod: int = 3,
slowkperiod: int = 3,
symbol: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots stochastic oscillator signal
Parameters
----------
data : pd.DataFrame
Dataframe of OHLC prices
fastkperiod : int
Fast k period
slowdperiod : int
Slow d period
slowkperiod : int
Slow k period
symbol : str
Stock ticker symbol
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (3 axes are expected in the list), by default None
"""
close_col = ta_helpers.check_columns(data)
if close_col is None:
return
df_ta = momentum_model.stoch(
data,
fastkperiod,
slowdperiod,
slowkperiod,
)
# This plot has 3 axes
if not external_axes:
_, axes = plt.subplots(
2, 1, sharex=True, figsize=plot_autoscale(), dpi=PLOT_DPI
)
ax1, ax2 = axes
ax3 = ax2.twinx()
elif is_valid_axes_count(external_axes, 3):
(ax1, ax2, ax3) = external_axes
else:
return
plot_data = pd.merge(data, df_ta, how="outer", left_index=True, right_index=True)
plot_data = reindex_dates(plot_data)
ax1.plot(plot_data.index, plot_data[close_col].values)
ax1.set_title(f"Stochastic Relative Strength Index (STOCH RSI) on {symbol}")
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel("Share Price ($)")
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax2.plot(plot_data.index, plot_data[df_ta.columns[0]].values)
ax2.plot(plot_data.index, plot_data[df_ta.columns[1]].values, ls="--")
ax2.set_xlim(plot_data.index[0], plot_data.index[-1])
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax3.set_ylim(ax2.get_ylim())
ax3.axhspan(80, 100, facecolor=theme.down_color, alpha=0.2)
ax3.axhspan(0, 20, facecolor=theme.up_color, alpha=0.2)
ax3.axhline(80, color=theme.down_color, ls="--")
ax3.axhline(20, color=theme.up_color, ls="--")
theme.style_twin_axis(ax3)
ax2.set_yticks([20, 80])
ax2.set_yticklabels(["OVERSOLD", "OVERBOUGHT"])
ax2.legend(
[f"%K {df_ta.columns[0]}", f"%D {df_ta.columns[1]}"],
loc=2,
prop={"size": 6},
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"stoch",
df_ta,
)
@log_start_end(log=logger)
def display_fisher(
data: pd.DataFrame,
window: int = 14,
symbol: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots Fisher Indicator
Parameters
----------
data : pd.DataFrame
Dataframe of OHLC prices
window : int
Length of window
symbol : str
Ticker string
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (3 axes are expected in the list), by default None
"""
df_ta = momentum_model.fisher(data, window)
if df_ta.empty:
return
plot_data = pd.merge(data, df_ta, how="outer", left_index=True, right_index=True)
plot_data = reindex_dates(plot_data)
# This plot has 3 axes
if not external_axes:
_, axes = plt.subplots(
2, 1, sharex=True, figsize=plot_autoscale(), dpi=PLOT_DPI
)
ax1, ax2 = axes
ax3 = ax2.twinx()
elif is_valid_axes_count(external_axes, 3):
(ax1, ax2, ax3) = external_axes
else:
return
ax1.set_title(f"{symbol} Fisher Transform")
close_col = ta_helpers.check_columns(data)
if close_col is None:
return
ax1.plot(plot_data.index, plot_data[close_col].values)
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel("Price")
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax2.plot(
plot_data.index,
plot_data[df_ta.columns[0]].values,
label="Fisher",
)
ax2.plot(
plot_data.index,
plot_data[df_ta.columns[1]].values,
label="Signal",
)
ax2.set_xlim(plot_data.index[0], plot_data.index[-1])
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax3.set_ylim(ax2.get_ylim())
ax3.axhspan(2, ax2.get_ylim()[1], facecolor=theme.down_color, alpha=0.2)
ax3.axhspan(ax2.get_ylim()[0], -2, facecolor=theme.up_color, alpha=0.2)
ax3.axhline(2, color=theme.down_color, ls="--")
ax3.axhline(-2, color=theme.up_color, ls="--")
theme.style_twin_axis(ax3)
ax2.set_yticks([-2, 0, 2])
ax2.set_yticklabels(["-2 STDEV", "0", "+2 STDEV"])
ax2.legend(loc=2, prop={"size": 6})
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"fisher",
df_ta,
)
@log_start_end(log=logger)
def display_cg(
data: pd.Series,
window: int = 14,
symbol: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots center of gravity Indicator
Parameters
----------
data : pd.Series
Series of values
window : int
Length of window
symbol : str
Stock ticker
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
df_ta = momentum_model.cg(data, window)
plot_data = pd.merge(data, df_ta, how="outer", left_index=True, right_index=True)
plot_data = reindex_dates(plot_data)
# This plot has 2 axes
if external_axes is None:
_, axes = plt.subplots(
2, 1, figsize=plot_autoscale(), sharex=True, dpi=PLOT_DPI
)
(ax1, ax2) = axes
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
ax1.set_title(f"{symbol} Centre of Gravity")
ax1.plot(plot_data.index, plot_data[data.name].values)
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel("Share Price ($)")
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax2.plot(plot_data.index, plot_data[df_ta.columns[0]].values, label="CG")
# shift cg 1 bar forward for signal
signal = np.roll(plot_data[df_ta.columns[0]].values, 1)
ax2.plot(plot_data.index, signal, label="Signal")
ax2.set_xlim(plot_data.index[0], plot_data.index[-1])
ax2.legend()
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"cg",
df_ta,
)
@log_start_end(log=logger)
def display_clenow_momentum(
data: pd.Series,
symbol: str = "",
window: int = 90,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Prints table and plots clenow momentum
Parameters
----------
data : pd.Series
Series of values
symbol : str
Symbol that the data corresponds to
window : int
Length of window
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.load("AAPL")
>>> openbb.ta.clenow_chart(df["Close"])
"""
r2, coef, fit_data = momentum_model.clenow_momentum(data, window)
df = pd.DataFrame.from_dict(
{
"R^2": f"{r2:.5f}",
"Fit Coef": f"{coef:.5f}",
"Factor": f"{coef * r2:.5f}",
},
orient="index",
)
print_rich_table(
df,
show_index=True,
headers=[""],
title=f"Clenow Exponential Regression Factor on {symbol}",
show_header=False,
)
# This plot has 2 axes
if external_axes is None:
_, ax1 = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
ax1 = external_axes
else:
return
ax1.plot(data.index, np.log(data.values))
ax1.plot(data.index[-window:], fit_data, linewidth=2)
ax1.set_title(f"Clenow Momentum Exponential Regression on {symbol}")
ax1.set_xlim(data.index[0], data.index[-1])
ax1.set_ylabel("Log Price")
theme.style_primary_axis(
ax1,
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"clenow",
)
def display_demark(
data: pd.DataFrame,
symbol: str = "",
min_to_show: int = 5,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot demark sequential indicator
Parameters
----------
data : pd.DataFrame
DataFrame of values
symbol : str
Symbol that the data corresponds to
min_to_show: int
Minimum value to show
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axes are expected in the list), by default None
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.load("AAPL")
>>> openbb.ta.demark_chart(df)
"""
close_col = ta_helpers.check_columns(data, high=False, low=False)
if close_col is None:
return
demark_df = momentum_model.demark_seq(data[close_col])
demark_df.index = data.index
stock_data = data.copy()
stock_data["up"] = demark_df.TD_SEQ_UPa
stock_data["down"] = demark_df.TD_SEQ_DNa
# MPLfinance can do series of markers :)
markersUP = (
stock_data["up"]
.apply(lambda x: f"${x}$" if x > min_to_show else None)
.to_list()
)
markersDOWN = (
stock_data["down"]
.apply(lambda x: f"${x}$" if x > min_to_show else None)
.to_list()
)
adp = [
mpf.make_addplot(
0.98 * stock_data["Low"],
type="scatter",
markersize=30,
marker=markersDOWN,
color="r",
),
mpf.make_addplot(
1.012 * stock_data["High"],
type="scatter",
markersize=30,
marker=markersUP,
color="b",
),
]
# Stuff for mplfinance
candle_chart_kwargs = {
"type": "ohlc",
"style": theme.mpf_style,
"volume": False,
"addplot": adp,
"xrotation": theme.xticks_rotation,
"scale_padding": {"left": 0.3, "right": 1, "top": 0.8, "bottom": 0.8},
"update_width_config": {
"candle_linewidth": 0.6,
"candle_width": 0.8,
"volume_linewidth": 0.8,
"volume_width": 0.8,
},
"warn_too_much_data": 10000,
}
if external_axes is None:
candle_chart_kwargs["returnfig"] = True
candle_chart_kwargs["figratio"] = (10, 7)
candle_chart_kwargs["figscale"] = 1.10
candle_chart_kwargs["figsize"] = plot_autoscale()
candle_chart_kwargs["warn_too_much_data"] = 100_000
fig, _ = mpf.plot(stock_data, **candle_chart_kwargs)
fig.suptitle(
f"{symbol} Demark Sequential",
x=0.055,
y=0.965,
horizontalalignment="left",
)
theme.visualize_output(force_tight_layout=False)
else:
if len(external_axes) != 1:
logger.error("Expected list of one axis item.")
console.print("[red]Expected list of 1 axis items.\n[/red]")
ax1 = external_axes
candle_chart_kwargs["ax"] = ax1
mpf.plot(stock_data, **candle_chart_kwargs)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"demark",
stock_data,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/technical_analysis/momentum_view.py | 0.863593 | 0.428233 | momentum_view.py | pypi |
___docformat__ = "numpy"
import logging
import pandas as pd
import pandas_ta as ta
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
WINDOW_LENGTHS = [20, 50]
WINDOW_LENGTHS2 = [10, 20]
@log_start_end(log=logger)
def ema(data: pd.Series, length: int = 50, offset: int = 0) -> pd.DataFrame:
"""Gets exponential moving average (EMA) for stock
Parameters
----------
data: pd.Series
Dataframe of dates and prices
length: int
Length of EMA window
offset: int
Length of offset
Returns
-------
pd.DataFrame
Dataframe containing prices and EMA
"""
if isinstance(data, pd.DataFrame):
console.print("[red]Please send a series and not a DataFrame.[/red]\n")
return pd.DataFrame()
return pd.DataFrame(ta.ema(data, length=length, offset=offset)).dropna()
@log_start_end(log=logger)
def sma(data: pd.Series, length: int = 50, offset: int = 0) -> pd.DataFrame:
"""Gets simple moving average (EMA) for stock
Parameters
----------
data: pd.Series
Dataframe of dates and prices
length: int
Length of SMA window
offset: int
Length of offset
Returns
-------
pd.DataFrame
Dataframe containing prices and SMA
"""
if isinstance(data, pd.DataFrame):
console.print("[red]Please send a series and not a DataFrame.[/red]\n")
return pd.DataFrame()
return pd.DataFrame(ta.sma(data, length=length, offset=offset)).dropna()
@log_start_end(log=logger)
def wma(data: pd.Series, length: int = 50, offset: int = 0) -> pd.DataFrame:
"""Gets weighted moving average (WMA) for stock
Parameters
----------
data: pd.Series
Dataframe of dates and prices
length: int
Length of SMA window
offset: int
Length of offset
Returns
-------
df_ta: pd.DataFrame
Dataframe containing prices and WMA
"""
if isinstance(data, pd.DataFrame):
console.print("[red]Please send a series and not a DataFrame.[/red]\n")
return pd.DataFrame()
return pd.DataFrame(ta.wma(data, length=length, offset=offset)).dropna()
@log_start_end(log=logger)
def hma(data: pd.Series, length: int = 50, offset: int = 0) -> pd.DataFrame:
"""Gets hull moving average (HMA) for stock
Parameters
----------
data: pd.Series
Dataframe of dates and prices
length: int
Length of SMA window
offset: int
Length of offset
Returns
-------
df_ta: pd.DataFrame
Dataframe containing prices and HMA
"""
if isinstance(data, pd.DataFrame):
console.print("[red]Please send a series and not a DataFrame.[/red]\n")
return pd.DataFrame()
return pd.DataFrame(ta.hma(data, length=length, offset=offset)).dropna()
@log_start_end(log=logger)
def zlma(data: pd.Series, length: int = 50, offset: int = 0) -> pd.DataFrame:
"""Gets zero-lagged exponential moving average (ZLEMA) for stock
Parameters
----------
data: pd.Series
Dataframe of dates and prices
length: int
Length of EMA window
offset: int
Length of offset
Returns
-------
df_ta: pd.DataFrame
Dataframe containing prices and EMA
"""
if isinstance(data, pd.DataFrame):
console.print("[red]Please send a series and not a DataFrame.[/red]\n")
return pd.DataFrame()
return pd.DataFrame(ta.zlma(data, length=length, offset=offset)).dropna()
@log_start_end(log=logger)
def vwap(data: pd.Series, offset: int = 0) -> pd.DataFrame:
"""Gets volume weighted average price (VWAP)
Parameters
----------
data: pd.DataFrame
Dataframe of dates and prices
offset: int
Length of offset
Returns
-------
df_vwap: pd.DataFrame
Dataframe with VWAP data
"""
df_vwap = ta.vwap(
high=data["High"],
low=data["Low"],
close=data["Close"],
volume=data["Volume"],
offset=offset,
)
return pd.DataFrame(df_vwap) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/technical_analysis/overlap_model.py | 0.798108 | 0.466906 | overlap_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Dict, Tuple
import pandas as pd
import requests
from tqdm import tqdm
import yfinance as yf
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_allocation(
category: str, benchmark_info: Dict, portfolio_trades: pd.DataFrame
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Get category allocation for benchmark and portfolio
Parameters
----------
category: str
Chosen category: Asset, Sector, Country or Region
benchmark_info: Dict
Dictionary containing Yahoo Finance information
portfolio_trades: pd.DataFrame
Object containing trades made within the portfolio
Returns
-------
pd.DataFrame
DataFrame with the top 10 of the benchmark's asset allocations
pd.DataFrame
DataFrame with the portfolio's asset allocations
"""
if category == "Asset":
return get_assets_allocation(benchmark_info, portfolio_trades)
if category == "Sector":
return get_sectors_allocation(benchmark_info, portfolio_trades)
if category == "Country":
return get_countries_allocation(benchmark_info, portfolio_trades)
if category == "Region":
return get_regions_allocation(benchmark_info, portfolio_trades)
console.print(
"Category not available. Choose from: Asset, Sector, Country or Region."
)
return pd.DataFrame(), pd.DataFrame()
@log_start_end(log=logger)
def get_assets_allocation(
benchmark_info: Dict, portfolio_trades: pd.DataFrame
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Get assets allocation for benchmark and portfolio [Source: Yahoo Finance]
Parameters
----------
benchmark_info: Dict
Dictionary containing Yahoo Finance information
portfolio_trades: pd.DataFrame
Object containing trades made within the portfolio
Returns
-------
pd.DataFrame
DataFrame with the top 10 of the benchmark's asset allocations
pd.DataFrame
DataFrame with the portfolio's asset allocations
"""
benchmark_assets_allocation = pd.DataFrame(benchmark_info["holdings"])
benchmark_assets_allocation.rename(
columns={"symbol": "Symbol", "holdingPercent": "Benchmark"}, inplace=True
)
benchmark_assets_allocation.drop(columns=["holdingName"], inplace=True)
portfolio_assets_allocation = (
portfolio_trades[portfolio_trades["Type"] != "CASH"]
.groupby(by="Ticker")
.agg({"Portfolio Value": "sum"})
.div(portfolio_trades["Portfolio Value"].sum())
).sort_values(by="Portfolio Value", ascending=False)
portfolio_assets_allocation.reset_index(inplace=True)
portfolio_assets_allocation.rename(
columns={"Ticker": "Symbol", "Portfolio Value": "Portfolio"}, inplace=True
)
portfolio_assets_allocation.fillna(0, inplace=True)
return benchmark_assets_allocation, portfolio_assets_allocation
@log_start_end(log=logger)
def get_sectors_allocation(
benchmark_info: Dict, portfolio_trades: pd.DataFrame
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Get sector allocation for benchmark and portfolio [Source: Yahoo Finance]
Parameters
----------
benchmark_info: Dict
Dictionary containing Yahoo Finance information
portfolio_trades: pd.DataFrame
Object containing trades made within the portfolio
Returns
-------
pd.DataFrame
DataFrame with regional allocations.
pd.DataFrame
DataFrame with country allocations
"""
benchmark_sectors_allocation = (
pd.DataFrame.from_dict(
data={
sector_name: allocation
for sector in benchmark_info["sectorWeightings"]
for sector_name, allocation in sector.items()
},
orient="index",
)
.squeeze()
.sort_values(ascending=False)
)
# Prettify sector allocations of benchmark to align with Portfolio Excel
prettified = [
sector.replace("_", " ").title()
for sector in benchmark_sectors_allocation.index
]
benchmark_sectors_allocation.index = prettified
benchmark_sectors_allocation = pd.DataFrame(benchmark_sectors_allocation)
benchmark_sectors_allocation.reset_index(inplace=True)
benchmark_sectors_allocation.columns = ["Sector", "Benchmark"]
# Define portfolio sector allocation
# Aggregate sector value for stocks and crypto
portfolio_sectors_allocation = (
portfolio_trades[portfolio_trades["Type"].isin(["STOCK", "CRYPTO"])]
.groupby(by="Sector")
.agg({"Portfolio Value": "sum"})
)
# Aggregate sector value for ETFs
# Start by getting value by isin/symbol
etf_ticker_value = (
portfolio_trades[portfolio_trades["Type"].isin(["ETF"])]
.groupby(by="Ticker")
.agg({"Portfolio Value": "sum"})
)
etf_global_sector_alloc = pd.DataFrame()
if not etf_ticker_value.empty:
# Loop through each etf and multiply sector weights by current value
for item in tqdm(etf_ticker_value.index.values, desc="Loading ETF data"):
# TODO: This can be improved by caching this info similar to what is done in stocks
etf_info = yf.Ticker(item).info
try:
etf_sector_weight = pd.DataFrame.from_dict(
data={
sector_name: allocation
for sector in etf_info["sectorWeightings"]
for sector_name, allocation in sector.items()
},
orient="index",
columns=["Portfolio Value"],
)
except Exception:
# If ETF has no sectors like VIX for example or it was not found, add to Other
etf_sector_weight = pd.DataFrame.from_dict(
data={"Other": 1}, orient="index", columns=["Portfolio Value"]
)
etf_ticker_sector_alloc = (
etf_sector_weight * etf_ticker_value["Portfolio Value"][item]
)
# Aggregate etf sector allocation by value
etf_global_sector_alloc = pd.concat(
[etf_global_sector_alloc, etf_ticker_sector_alloc], axis=1
)
etf_global_sector_alloc.fillna(0, inplace=True)
etf_global_sector_alloc = etf_global_sector_alloc.sum(axis=1)
etf_global_sector_alloc = pd.DataFrame(
etf_global_sector_alloc, columns=["Portfolio Value"]
)
console.print("\n")
# Rename columns to match stock and crypto classification
etf_global_sector_alloc.index.name = "Sector"
prettified = [
sector.replace("_", " ").title() for sector in etf_global_sector_alloc.index
]
etf_global_sector_alloc.index = prettified
# Aggregate sector allocation for stocks and crypto with ETFs
portfolio_sectors_allocation = pd.merge(
portfolio_sectors_allocation,
etf_global_sector_alloc,
how="outer",
left_index=True,
right_index=True,
).sum(axis=1)
portfolio_sectors_allocation = pd.DataFrame(
portfolio_sectors_allocation, columns=["Portfolio Value"]
)
portfolio_sectors_allocation = portfolio_sectors_allocation.div(
portfolio_trades["Portfolio Value"].sum()
).sort_values(by="Portfolio Value", ascending=False)
portfolio_sectors_allocation.fillna(0, inplace=True)
portfolio_sectors_allocation = pd.DataFrame(portfolio_sectors_allocation)
portfolio_sectors_allocation.reset_index(inplace=True)
portfolio_sectors_allocation.columns = ["Sector", "Portfolio"]
return benchmark_sectors_allocation, portfolio_sectors_allocation
@log_start_end(log=logger)
def get_countries_allocation(
benchmark_info: Dict, portfolio_trades: pd.DataFrame
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Get countries allocation for benchmark and portfolio [Source: Yahoo Finance]
Parameters
----------
benchmark_info: Dict
Dictionary containing Yahoo Finance information
portfolio_trades: pd.DataFrame
Object containing trades made within the portfolio
Returns
-------
pd.DataFrame
DataFrame with regional allocations.
pd.DataFrame
DataFrame with country allocations
"""
benchmark_allocation = get_symbol_allocation(
symbol=benchmark_info["symbol"], category="Country", col_name="Benchmark"
)
portfolio_allocation = get_portfolio_allocation(
category="Country", portfolio_trades=portfolio_trades
)
return benchmark_allocation, portfolio_allocation
@log_start_end(log=logger)
def get_regions_allocation(
benchmark_info: Dict, portfolio_trades: pd.DataFrame
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Get regions allocation for benchmark and portfolio [Source: Yahoo Finance]
Parameters
----------
benchmark_info: Dict
Dictionary containing Yahoo Finance information
portfolio_trades: pd.DataFrame
Object containing trades made within the portfolio
Returns
-------
pd.DataFrame
DataFrame with regional allocations.
pd.DataFrame
DataFrame with country allocations
"""
benchmark_allocation = get_symbol_allocation(
symbol=benchmark_info["symbol"], category="Region", col_name="Benchmark"
)
portfolio_allocation = get_portfolio_allocation(
category="Region", portfolio_trades=portfolio_trades
)
return benchmark_allocation, portfolio_allocation
def get_symbol_allocation(
symbol: str, category: str, col_name: str = "Weight"
) -> pd.DataFrame:
"""Get benchmark allocation [Source: Fidelity]
Parameters
----------
symbol: str
ETF symbol to get allocation
category: str
Chosen category: Country or Region
Returns
-------
pd.DataFrame
Dictionary with country allocations
"""
if category == "Region":
category_list = [
"North America",
"Europe",
"Asia",
"Latin America",
"Africa",
"Middle East",
]
if category == "Country":
category_list = [
"United States",
"United Kingdom",
"Japan",
"Switzerland",
"China",
]
item_list = 0
# Collect data from Fidelity about the portfolio composition of the benchmark
URL = f"https://screener.fidelity.com/ftgw/etf/goto/snapshot/portfolioComposition.jhtml?symbols={symbol}"
html = requests.get(URL).content
df_list = pd.read_html(html)
# Find the ones that contain regions and countries
for index, item in enumerate(df_list):
for category_item in category_list:
if category_item in item.values:
item_list = index
break
if item_list:
allocation = {
row[1]: float(row[2].strip("%")) / 100
for _, row in df_list[item_list].dropna(axis="columns").iterrows()
}
allocation_df = pd.DataFrame.from_dict(allocation, orient="index")
allocation_df.reset_index(inplace=True)
allocation_df.columns = [category, col_name]
else:
allocation_df = pd.DataFrame(columns=[category, col_name])
return allocation_df
@log_start_end(log=logger)
def get_portfolio_allocation(
category: str, portfolio_trades: pd.DataFrame
) -> pd.DataFrame:
"""Get portfolio allocation
Parameters
----------
category: str
Chosen category: Country or Region
portfolio_trades: pd.DataFrame
Object containing trades made within the portfolio
Returns
-------
pd.DataFrame
Dictionary with country allocations
"""
# Define portfolio allocation
if not portfolio_trades[category].isnull().any():
allocation = (
portfolio_trades[portfolio_trades["Type"].isin(["STOCK", "CRYPTO"])]
.groupby(by=category)
.agg({"Portfolio Value": "sum"})
)
else:
allocation = pd.DataFrame()
# Aggregate sector value for ETFs
# Start by getting value by symbol
etf_ticker_value = (
portfolio_trades[portfolio_trades["Type"].isin(["ETF"])]
.groupby(by="Ticker")
.agg({"Portfolio Value": "sum"})
)
etf_global_alloc = pd.DataFrame(columns=[category, "Portfolio Value"])
if not etf_ticker_value.empty:
no_info = []
# Loop through each etf and multiply sector weights by current value
for item in tqdm(etf_ticker_value.index.values, desc="Loading ETF data"):
etf_weight = get_symbol_allocation(
symbol=item, category=category, col_name="Portfolio Value"
)
if etf_weight.empty:
etf_weight = pd.DataFrame.from_dict(
data={"Other": 1}, orient="index", columns=["Portfolio Value"]
)
etf_weight.index.name = category
no_info.append(item)
else:
etf_weight.set_index(category, inplace=True)
# Aggregate etf allocation by value
etf_ticker_alloc = etf_weight
etf_ticker_alloc["Portfolio Value"] = (
etf_ticker_alloc["Portfolio Value"]
* etf_ticker_value["Portfolio Value"][item]
)
etf_global_alloc = pd.concat([etf_global_alloc, etf_ticker_alloc], axis=1)
etf_global_alloc.fillna(0, inplace=True)
etf_global_alloc = etf_global_alloc.sum(axis=1)
etf_global_alloc = pd.DataFrame(etf_global_alloc, columns=["Portfolio Value"])
console.print("")
if no_info:
console.print(
f"[red]No data found for: {', '.join(no_info)}. Included in 'Other'.[/red]\n"
)
# Aggregate allocation for stocks and crypto with ETFs
allocation = pd.merge(
allocation,
etf_global_alloc,
how="outer",
left_index=True,
right_index=True,
).sum(axis=1)
allocation = pd.DataFrame(allocation, columns=["Portfolio Value"])
allocation = allocation.div(portfolio_trades["Portfolio Value"].sum()).sort_values(
by="Portfolio Value", ascending=False
)
allocation.fillna(0, inplace=True)
allocation.reset_index(inplace=True)
allocation.columns = [category, "Portfolio"]
return allocation | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/allocation_model.py | 0.801198 | 0.276703 | allocation_model.py | pypi |
__docformat__ = "numpy"
import logging
from datetime import datetime
from typing import Tuple
import pandas as pd
import numpy as np
from openbb_terminal.decorators import log_start_end
from openbb_terminal.core.config.paths import USER_PORTFOLIO_DATA_DIRECTORY
from openbb_terminal.rich_config import console
from openbb_terminal.portfolio.statics import PERIODS
from openbb_terminal.portfolio.portfolio_helper import filter_df_by_period
logger = logging.getLogger(__name__)
# pylint: disable=too-many-return-statements, too-many-lines, too-many-statements
# pylint: disable=C0302
now = datetime.now()
PERIODS_DAYS = {
"mtd": (now - datetime(now.year, now.month, 1)).days,
"qtd": (
now
- datetime(
now.year,
1 if now.month < 4 else 4 if now.month < 7 else 7 if now.month < 10 else 10,
1,
)
).days,
"ytd": (now - datetime(now.year, 1, 1)).days,
"all": 1,
"3m": 3 * 21,
"6m": 6 * 21,
"1y": 12 * 21,
"3y": 3 * 12 * 21,
"5y": 5 * 12 * 21,
"10y": 10 * 12 * 21,
}
DEFAULT_HOLDINGS_PATH = USER_PORTFOLIO_DATA_DIRECTORY / "holdings"
@log_start_end(log=logger)
def rolling_volatility(
portfolio_returns: pd.Series, window: str = "1y"
) -> pd.DataFrame:
"""Get rolling volatility
Parameters
----------
portfolio_returns : pd.Series
Series of portfolio returns
window : str
Rolling window size to use
Returns
-------
pd.DataFrame
Rolling volatility DataFrame
"""
length = PERIODS_DAYS[window]
sample_length = len(portfolio_returns)
if length > sample_length:
console.print(
f"[red]Window length ({window}->{length}) is larger than returns length ({sample_length}).\
\nTry a smaller window.[/red]"
)
return pd.DataFrame()
# max(2, length) -> std needs at least 2 observations
return portfolio_returns.rolling(max(2, length)).std()
@log_start_end(log=logger)
def sharpe_ratio(portfolio_returns: pd.Series, risk_free_rate: float) -> float:
"""Get sharpe ratio
Parameters
----------
return_series : pd.Series
Series of portfolio returns
risk_free_rate: float
Value to use for risk free rate
Returns
-------
float
Sharpe ratio
"""
mean = portfolio_returns.mean() - risk_free_rate
sigma = portfolio_returns.std()
return mean / sigma
@log_start_end(log=logger)
def rolling_sharpe(
portfolio_returns: pd.DataFrame, risk_free_rate: float, window: str = "1y"
) -> pd.DataFrame:
"""Get rolling sharpe ratio
Parameters
----------
portfolio_returns : pd.Series
Series of portfolio returns
risk_free_rate : float
Risk free rate
window : str
Rolling window to use
Possible options: mtd, qtd, ytd, 1d, 5d, 10d, 1m, 3m, 6m, 1y, 3y, 5y, 10y
Returns
-------
pd.DataFrame
Rolling sharpe ratio DataFrame
"""
length = PERIODS_DAYS[window]
sample_length = len(portfolio_returns)
if length > sample_length:
console.print(
f"[red]Window length ({window}->{length}) is larger than returns length ({sample_length}).\
\nTry a smaller window.[/red]"
)
return pd.DataFrame()
# max(2, length) -> std needs at least 2 observations
rolling_sharpe_df = portfolio_returns.rolling(max(2, length)).apply(
lambda x: (x.mean() - risk_free_rate) / x.std()
)
return rolling_sharpe_df
@log_start_end(log=logger)
def sortino_ratio(portfolio_returns: pd.Series, risk_free_rate: float) -> float:
"""Get sortino ratio
Parameters
----------
portfolio_returns : pd.Series
Series of portfolio returns
risk_free_rate: float
Value to use for risk free rate
Returns
-------
float
Sortino ratio
"""
mean = portfolio_returns.mean() - risk_free_rate
std_neg = portfolio_returns[portfolio_returns < 0].std()
return mean / std_neg
@log_start_end(log=logger)
def rolling_sortino(
portfolio_returns: pd.Series, risk_free_rate: float, window: str = "1y"
) -> pd.DataFrame:
"""Get rolling sortino ratio
Parameters
----------
portfolio_returns : pd.Series
Series of portfolio returns
risk_free_rate : float
Risk free rate
window : str
Rolling window to use
Returns
-------
pd.DataFrame
Rolling sortino ratio DataFrame
"""
length = PERIODS_DAYS[window]
sample_length = len(portfolio_returns)
if length > sample_length:
console.print(
f"[red]Window length ({window}->{length}) is larger than returns length ({sample_length}).\
\nTry a smaller window.[/red]"
)
return pd.DataFrame()
# max(2, length) -> std needs at least 2 observations
rolling_sortino_df = portfolio_returns.rolling(max(2, length)).apply(
lambda x: (x.mean() - risk_free_rate) / x[x < 0].std()
)
return rolling_sortino_df
@log_start_end(log=logger)
def rolling_beta(
portfolio_returns: pd.Series,
benchmark_returns: pd.Series,
window: str = "1y",
) -> pd.DataFrame:
"""Get rolling beta using portfolio and benchmark returns
Parameters
----------
returns: pd.Series
Series of portfolio returns
benchmark_returns: pd.Series
Series of benchmark returns
window: string
Interval used for rolling values.
Possible options: mtd, qtd, ytd, 1d, 5d, 10d, 1m, 3m, 6m, 1y, 3y, 5y, 10y.
Returns
-------
pd.DataFrame
DataFrame of the portfolio's rolling beta
"""
length = PERIODS_DAYS[window]
sample_length = len(portfolio_returns)
if length > sample_length:
console.print(
f"[red]Window length ({window}->{length}) is larger than returns length ({sample_length}).\
\nTry a smaller window.[/red]"
)
return pd.DataFrame()
covs = (
pd.DataFrame({"Portfolio": portfolio_returns, "Benchmark": benchmark_returns})
.dropna(axis=0)
.rolling(max(2, length)) # needs at least 2 observations.
.cov()
.unstack()
.dropna()
)
rolling_beta_num = covs["Portfolio"]["Benchmark"] / covs["Benchmark"]["Benchmark"]
return rolling_beta_num
@log_start_end(log=logger)
def maximum_drawdown(portfolio_returns: pd.Series) -> float:
"""Get maximum drawdown
Parameters
----------
portfolio_returns : pd.Series
Series of portfolio returns
Returns
-------
float
Maximum drawdown
"""
comp_ret = (portfolio_returns + 1).cumprod()
peak = comp_ret.expanding(min_periods=1).max()
dd = (comp_ret / peak) - 1
return dd.min()
@log_start_end(log=logger)
def cumulative_returns(data: pd.Series) -> pd.Series:
"""Calculate cumulative returns filtered by period
Parameters
----------
data : pd.Series
Series of portfolio returns
Returns
----------
pd.Series
Cumulative investment returns series
-------
"""
return (1 + data.shift(periods=1, fill_value=0)).cumprod() - 1
@log_start_end(log=logger)
def get_gaintopain_ratio(
historical_trade_data: pd.DataFrame,
benchmark_trades: pd.DataFrame,
benchmark_returns: pd.DataFrame,
) -> pd.DataFrame:
"""Get Pain-to-Gain ratio
Parameters
----------
historical_trade_data: pd.DataFrame
Dataframe of historical data for the portfolios trade
benchmark_trades: pd.DataFrame
Dataframe of the benchmark's trades
benchmark_returns: pd.DataFrame
Dataframe of benchmark returns
Returns
-------
pd.DataFrame
DataFrame of the portfolio's gain-to-pain ratio
"""
benchmark_trades = benchmark_trades.set_index("Date")
vals = list()
for period in PERIODS:
period_historical_trade_data = filter_df_by_period(
historical_trade_data, period
)
period_bench_trades = filter_df_by_period(benchmark_trades, period)
period_bench_return = filter_df_by_period(benchmark_returns, period)
if not period_historical_trade_data.empty:
if not period_bench_trades.empty:
benchmark_values = (
period_bench_trades["Benchmark Value"].sum()
/ period_bench_trades["Benchmark Investment"].sum()
- 1
) / maximum_drawdown(period_bench_return)
else:
benchmark_values = ((1 + period_bench_return).cumprod() - 1).iloc[
-1
] / maximum_drawdown(period_bench_return)
vals.append(
[
round(
(
period_historical_trade_data["End Value"]["Total"].iloc[-1]
/ (
period_historical_trade_data["Initial Value"][
"Total"
].iloc[0]
+ period_historical_trade_data["Investment"][
"Total"
].iloc[-1]
- period_historical_trade_data["Investment"][
"Total"
].iloc[0]
)
- 1
)
/ maximum_drawdown(
period_historical_trade_data["Returns"]["Total"]
),
3,
),
round(
benchmark_values,
3,
),
]
)
else:
vals.append(["-", "-"])
gtr_period_df = pd.DataFrame(
vals, index=PERIODS, columns=["Portfolio", "Benchmark"]
)
return gtr_period_df
@log_start_end(log=logger)
def calculate_beta(portfolio_returns: pd.Series, benchmark_returns: pd.Series) -> float:
"""Calculate the beta using portfolio and benchmark return values
Parameters
----------
portfolio_returns: pd.Series
Series of portfolio returns
benchmark_returns: pd.Series
Series of benchmark returns
Returns
-------
float
The calculated beta value
"""
axis_diff = len(portfolio_returns) - len(benchmark_returns)
axis_diff_bench = 0
if axis_diff < 0:
axis_diff_bench = -axis_diff
axis_diff = 0
covariance = np.cov(
portfolio_returns[axis_diff:], benchmark_returns[axis_diff_bench:]
)[0][1]
variance = portfolio_returns.var()
return covariance / variance
@log_start_end(log=logger)
def get_tracking_error(
portfolio_returns: pd.Series, benchmark_returns: pd.Series, window: str = "252d"
) -> Tuple[pd.DataFrame, pd.Series]:
"""Get tracking error, or active risk, using portfolio and benchmark returns
Parameters
----------
portfolio_returns: pd.Series
Series of portfolio returns
benchmark_returns: pd.Series
Series of benchmark returns
window: string
Interval used for rolling values in days.
Examples: 1d, 5d, 10d
Returns
-------
pd.DataFrame
DataFrame of tracking errors during different time periods
pd.Series
Series of rolling tracking error
"""
diff_returns = portfolio_returns - benchmark_returns
tracker_rolling = diff_returns.rolling(window).std()
vals = list()
for periods in PERIODS:
period_return = filter_df_by_period(diff_returns, periods)
if not period_return.empty:
vals.append([round(period_return.std(), 3)])
else:
vals.append(["-"])
tracker_period_df = pd.DataFrame(vals, index=PERIODS, columns=["Tracking Error"])
return tracker_period_df, tracker_rolling
@log_start_end(log=logger)
def get_information_ratio(
portfolio_returns: pd.Series,
historical_trade_data: pd.DataFrame,
benchmark_trades: pd.DataFrame,
benchmark_returns: pd.Series,
) -> pd.DataFrame:
"""Calculate information ratio, which measures the active return of an investment
compared to the benchmark relative to the volatility of the active return
Parameters
----------
portfolio_returns: pd.Series
Series of portfolio returns
historical_trade_data: pd.DataFrame
Dataframe of historical data for the portfolio's trade
benchmark_trades: pd.DataFrame
Dataframe of the benchmark's trades
benchmark_returns: pd.Series
Series of benchmark returns
Returns
-------
pd.DataFrame
DataFrame of the information ratio during different time periods
"""
tracking_err_df, _ = get_tracking_error(portfolio_returns, benchmark_returns)
benchmark_trades = benchmark_trades.set_index("Date")
vals = list()
for periods in PERIODS:
period_historical_trade_data = filter_df_by_period(
historical_trade_data, periods
)
period_bench_trades = filter_df_by_period(benchmark_trades, periods)
period_bench_return = filter_df_by_period(benchmark_returns, periods)
if not period_historical_trade_data.empty:
if not period_bench_trades.empty:
period_bench_total_return = (
period_bench_trades["Benchmark Value"].sum()
/ period_bench_trades["Benchmark Investment"].sum()
- 1
)
else:
period_bench_total_return = (
(1 + period_bench_return).cumprod() - 1
).iloc[-1]
vals.append(
[
round(
(
(
period_historical_trade_data["End Value"]["Total"].iloc[
-1
]
/ (
period_historical_trade_data["Initial Value"][
"Total"
].iloc[0]
+ period_historical_trade_data["Investment"][
"Total"
].iloc[-1]
- period_historical_trade_data["Investment"][
"Total"
].iloc[0]
)
- 1
)
- period_bench_total_return
)
/ tracking_err_df.loc[periods, "Tracking Error"],
3,
)
]
)
else:
vals.append(["-"])
ir_period_df = pd.DataFrame(vals, index=PERIODS, columns=["Information Ratio"])
return ir_period_df
@log_start_end(log=logger)
def get_tail_ratio(
portfolio_returns: pd.Series, benchmark_returns: pd.Series, window: str = "252d"
) -> Tuple[pd.DataFrame, pd.Series, pd.Series]:
"""Return the portfolios tail ratio
Parameters
----------
portfolio_returns: pd.Series
Series of portfolio returns
benchmark_returns: pd.Series
Series of benchmark returns
window: string
Interval used for rolling values in days.
Examples: 1d, 5d, 10d
Returns
-------
pd.DataFrame
DataFrame of the portfolios and the benchmarks tail ratio during different time periods
pd.Series
Series of the portfolios rolling tail ratio
pd.Series
Series of the benchmarks rolling tail ratio
"""
returns_r = portfolio_returns.rolling(window)
benchmark_returns_r = benchmark_returns.rolling(window)
portfolio_tr = returns_r.quantile(0.95) / abs(returns_r.quantile(0.05))
benchmark_tr = benchmark_returns_r.quantile(0.95) / abs(
benchmark_returns_r.quantile(0.05)
)
vals = list()
for periods in PERIODS:
period_return = filter_df_by_period(portfolio_returns, periods)
period_bench_return = filter_df_by_period(benchmark_returns, periods)
if not period_return.empty:
vals.append(
[
round(
period_return.quantile(0.95)
/ abs(period_return.quantile(0.05)),
3,
),
round(
period_bench_return.quantile(0.95)
/ abs(period_bench_return.quantile(0.05)),
3,
),
]
)
else:
vals.append(["-", "-"])
tailr_period_df = pd.DataFrame(
vals, index=PERIODS, columns=["Portfolio", "Benchmark"]
)
return tailr_period_df, portfolio_tr, benchmark_tr
@log_start_end(log=logger)
def get_common_sense_ratio(
portfolio_returns: pd.Series,
historical_trade_data: pd.DataFrame,
benchmark_trades: pd.DataFrame,
benchmark_returns: pd.Series,
) -> pd.DataFrame:
"""Get common sense ratio
Parameters
----------
portfolio_returns: pd.Series
Series of portfolio returns
historical_trade_data: pd.DataFrame
Dataframe of historical data for the portfolios trade
benchmark_trades: pd.DataFrame
Dataframe of the benchmarks trades
benchmark_returns: pd.Series
Series of benchmark returns
Returns
-------
pd.DataFrame
DataFrame of the portfolios and the benchmarks common sense ratio during different time periods
"""
tail_ratio_df, _, _ = get_tail_ratio(portfolio_returns, benchmark_returns)
gaintopain_ratio_df = get_gaintopain_ratio(
historical_trade_data, benchmark_trades, benchmark_returns
)
vals = list()
for period in PERIODS:
vals.append(
[
round(
tail_ratio_df.loc[period, "Portfolio"]
* gaintopain_ratio_df.loc[period, "Portfolio"],
3,
),
round(
tail_ratio_df.loc[period, "Benchmark"]
* gaintopain_ratio_df.loc[period, "Benchmark"],
3,
),
]
)
csr_period_df = pd.DataFrame(
vals, index=PERIODS, columns=["Portfolio", "Benchmark"]
)
return csr_period_df
@log_start_end(log=logger)
def jensens_alpha(
portfolio_returns: pd.Series,
historical_trade_data: pd.DataFrame,
benchmark_trades: pd.DataFrame,
benchmark_returns: pd.Series,
risk_free_rate: float = 0,
window: str = "1y",
) -> Tuple[pd.DataFrame, pd.Series]:
"""Get jensen's alpha
Parameters
----------
portfolio_returns: pd.Series
Series of portfolio returns
historical_trade_data: pd.DataFrame
Dataframe of historical data for the portfolios trade
benchmark_trades: pd.DataFrame
Dataframe of the benchmarks trades
benchmark_returns: pd.Series
Series of benchmark returns
risk_free_rate: float
Risk free rate
window: str
Interval used for rolling values.
Possible options: mtd, qtd, ytd, 1d, 5d, 10d, 1m, 3m, 6m, 1y, 3y, 5y, 10y.
Returns
-------
pd.DataFrame
DataFrame of jensens's alpha during different time periods
pd.Series
Series of jensens's alpha data
"""
length = PERIODS_DAYS[window]
periods_d = PERIODS_DAYS
period_cum_returns = (1.0 + portfolio_returns).rolling(window=length).agg(
lambda x: x.prod()
) - 1
period_cum_bench_returns = (1.0 + benchmark_returns).rolling(window=length).agg(
lambda x: x.prod()
) - 1
rfr_cum_returns = risk_free_rate * length / 252
beta = rolling_beta(portfolio_returns, benchmark_returns, window)
ja_rolling = period_cum_returns - (
rfr_cum_returns + beta * (period_cum_bench_returns - rfr_cum_returns)
)
benchmark_trades = benchmark_trades.set_index("Date")
vals = list()
for periods in PERIODS:
period_return = filter_df_by_period(portfolio_returns, periods)
period_bench_return = filter_df_by_period(benchmark_returns, periods)
period_historical_trade_data = filter_df_by_period(
historical_trade_data, periods
)
period_bench_trades = filter_df_by_period(benchmark_trades, periods)
if not period_return.empty:
beta = calculate_beta(period_return, period_bench_return)
period_cum_returns = (
period_historical_trade_data["End Value"]["Total"].iloc[-1]
/ (
period_historical_trade_data["Initial Value"]["Total"].iloc[0]
+ period_historical_trade_data["Investment"]["Total"].iloc[-1]
- period_historical_trade_data["Investment"]["Total"].iloc[0]
)
- 1
)
if not period_bench_trades.empty:
period_bench_total_return = (
period_bench_trades["Benchmark Value"].sum()
/ period_bench_trades["Benchmark Investment"].sum()
- 1
)
else:
period_bench_total_return = (
(1 + period_bench_return).cumprod() - 1
).iloc[-1]
rfr_cum_returns = risk_free_rate * periods_d[periods] / 252
vals.append(
[
round(
period_cum_returns
- (
rfr_cum_returns
+ beta * (period_bench_total_return - rfr_cum_returns)
),
3,
)
]
)
else:
vals.append(["-"])
ja_period_df = pd.DataFrame(vals, index=PERIODS, columns=["Portfolio"])
return ja_period_df, ja_rolling
@log_start_end(log=logger)
def get_calmar_ratio(
portfolio_returns: pd.Series,
historical_trade_data: pd.DataFrame,
benchmark_trades: pd.DataFrame,
benchmark_returns: pd.Series,
window: str = "3y",
) -> Tuple[pd.DataFrame, pd.Series]:
"""Get calmar ratio
Parameters
----------
portfolio_returns: pd.Serires
Series of portfolio returns
historical_trade_data: pd.DataFrame
Dataframe of historical data for the portfolios trade
benchmark_trades: pd.DataFrame
Dataframe of the benchmarks trades
benchmark_returns: pd.DataFrame
Series of benchmark returns
window: str
Interval used for rolling values.
Possible options: mtd, qtd, ytd, 1d, 5d, 10d, 1m, 3m, 6m, 1y, 3y, 5y, 10y.
Returns
-------
pd.DataFrame
DataFrame of calmar ratio of the benchmark and portfolio during different time periods
pd.Series
Series of calmar ratio data
"""
periods_d = PERIODS_DAYS
period_cum_returns = (1.0 + portfolio_returns).rolling(window=window).agg(
lambda x: x.prod()
) - 1
# Calculate annual return
annual_return = period_cum_returns ** (1 / (int(window) / 252)) - 1
cr_rolling = annual_return / maximum_drawdown(portfolio_returns)
benchmark_trades = benchmark_trades.set_index("Date")
vals = list()
for periods in PERIODS:
period_return = filter_df_by_period(portfolio_returns, periods)
period_historical_trade_data = filter_df_by_period(
historical_trade_data, periods
)
period_bench_trades = filter_df_by_period(benchmark_trades, periods)
period_bench_return = filter_df_by_period(benchmark_returns, periods)
if (not period_return.empty) and (periods_d[periods] != 0):
period_cum_returns = (
period_historical_trade_data["End Value"]["Total"].iloc[-1]
/ (
period_historical_trade_data["Initial Value"]["Total"].iloc[0]
+ period_historical_trade_data["Investment"]["Total"].iloc[-1]
- period_historical_trade_data["Investment"]["Total"].iloc[0]
)
- 1
)
if not period_bench_trades.empty:
period_bench_total_return = (
period_bench_trades["Benchmark Value"].sum()
/ period_bench_trades["Benchmark Investment"].sum()
- 1
)
else:
period_bench_total_return = (
(1 + period_bench_return).cumprod() - 1
).iloc[-1]
annual_return = (1 + period_cum_returns) ** (
1 / (len(period_return) / 252)
) - 1
annual_bench_return = (1 + period_bench_total_return) ** (
1 / (len(period_bench_return) / 252)
) - 1
drawdown = maximum_drawdown(period_return)
bench_drawdown = maximum_drawdown(period_bench_return)
if (drawdown != 0) and (bench_drawdown != 0):
vals.append(
[
round(annual_return / drawdown, 3),
round(annual_bench_return / bench_drawdown, 3),
]
)
else:
vals.append(["-", "-"])
else:
vals.append(["-", "-"])
cr_period_df = pd.DataFrame(vals, index=PERIODS, columns=["Portfolio", "Benchmark"])
return cr_period_df, cr_rolling
@log_start_end(log=logger)
def get_kelly_criterion(
portfolio_returns: pd.Series, portfolio_trades: pd.DataFrame
) -> pd.DataFrame:
"""Get kelly criterion
Parameters
----------
portfolio_returns: pd.Series
DataFrame of portfolio returns
portfolio_trades: pd.DataFrame
DataFrame of the portfolio trades with trade return in %
Returns
-------
pd.DataFrame
DataFrame of kelly criterion of the portfolio during different time periods
"""
portfolio_trades["Date"] = pd.to_datetime(portfolio_trades["Date"])
portfolio_trades = portfolio_trades.set_index("Date")
vals: list = list()
for period in PERIODS:
period_return = filter_df_by_period(portfolio_returns, period)
period_portfolio_tr = filter_df_by_period(portfolio_trades, period)
if (not period_return.empty) and (not period_portfolio_tr.empty):
w = len(period_return[period_return > 0]) / len(period_return)
r = len(
period_portfolio_tr[period_portfolio_tr["Portfolio % Return"] > 0]
) / len(
period_portfolio_tr[period_portfolio_tr["Type"].str.upper() != "CASH"]
)
if r != 0:
vals.append([round(w - (1 - w) / r, 3)])
else:
vals.append(["-"])
else:
vals.append(["-"])
kc_period_df = pd.DataFrame(vals, index=PERIODS, columns=["Kelly %"])
return kc_period_df
@log_start_end(log=logger)
def get_payoff_ratio(portfolio_trades: pd.DataFrame) -> pd.DataFrame:
"""Get payoff ratio
Parameters
----------
portfolio_trades: pd.DataFrame
DataFrame of the portfolio trades with trade return in % and abs values
Returns
-------
pd.DataFrame
DataFrame of payoff ratio of the portfolio during different time periods
"""
portfolio_trades["Date"] = pd.to_datetime(portfolio_trades["Date"])
portfolio_trades = portfolio_trades.set_index("Date")
no_losses = False
vals = list()
for period in PERIODS:
period_portfolio_tr = filter_df_by_period(portfolio_trades, period)
if not portfolio_trades.empty:
portfolio_wins = period_portfolio_tr[
period_portfolio_tr["Portfolio % Return"] > 0
]
portfolio_loses = period_portfolio_tr[
period_portfolio_tr["Portfolio % Return"] < 0
]
if portfolio_loses.empty:
vals.append(["-"])
no_losses = True
continue
avg_w = portfolio_wins["Abs Portfolio Return"].mean()
avg_l = portfolio_loses["Abs Portfolio Return"].mean()
vals.append(
[round(avg_w / abs(avg_l), 3)] if avg_w is not np.nan else ["0"]
)
else:
vals.append(["-"])
if no_losses:
console.print(
"During some time periods there were no losing trades.",
"Thus some values could not be calculated.",
)
pr_period_ratio = pd.DataFrame(
vals, index=PERIODS, columns=["Payoff Ratio"]
).fillna("-")
return pr_period_ratio
@log_start_end(log=logger)
def get_profit_factor(portfolio_trades: pd.DataFrame) -> pd.DataFrame:
"""Get profit factor
Parameters
----------
portfolio_trades: pd.DataFrame
DataFrame of the portfolio trades with trade return in % and abs values
Returns
-------
pd.DataFrame
DataFrame of profit factor of the portfolio during different time periods
"""
portfolio_trades["Date"] = pd.to_datetime(portfolio_trades["Date"])
portfolio_trades = portfolio_trades.set_index("Date")
no_losses = False
vals = list()
for period in PERIODS:
period_portfolio_tr = filter_df_by_period(portfolio_trades, period)
if not portfolio_trades.empty:
portfolio_wins = period_portfolio_tr[
period_portfolio_tr["Portfolio % Return"] > 0
]
portfolio_loses = period_portfolio_tr[
period_portfolio_tr["Portfolio % Return"] < 0
]
if portfolio_loses.empty:
vals.append(["-"])
no_losses = True
continue
gross_profit = portfolio_wins["Abs Portfolio Return"].sum()
gross_loss = portfolio_loses["Abs Portfolio Return"].sum()
vals.append([round(gross_profit / abs(gross_loss), 3)])
else:
vals.append(["-"])
if no_losses:
console.print(
"During some time periods there were no losing trades.",
"Thus some values could not be calculated.",
)
pf_period_df = pd.DataFrame(vals, index=PERIODS, columns=["Profit Factor"]).fillna(
"-"
)
return pf_period_df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/metrics_model.py | 0.876423 | 0.31239 | metrics_model.py | pypi |
__docformat__ = "numpy"
BENCHMARK_CHOICES = {
"SPY": "SPDR S&P 500 ETF Trust (SPY)",
"IVV": "iShares Core S&P 500 ETF (IVV)",
"VTI": "Vanguard Total Stock Market ETF (VTI)",
"VOO": "Vanguard S&P 500 ETF (VOO)",
"QQQ": "Invesco QQQ Trust (QQQ)",
"VTV": "Vanguard Value ETF (VTV)",
"VEA": "Vanguard FTSE Developed Markets ETF (VEA)",
"IEFA": "iShares Core MSCI EAFE ETF (IEFA)",
"AGG": "iShares Core U.S. Aggregate Bond ETF (AGG)",
"BND": "Vanguard Total Bond Market ETF (BND)",
"VWO": "Vanguard FTSE Emerging Markets ETF (VWO)",
"VUG": "Vanguard Growth ETF (VUG)",
"IEMG": "iShares Core MSCI Emerging Markets ETF (IEMG)",
"IJR": "iShares Core S&P Small-Cap ETF (IJR)",
"GLD": "SPDR Gold Shares (GLD)",
"IWF": "iShares Russell 1000 Growth ETF (IWF)",
"IJH": "iShares Core S&P Mid-Cap ETF (IJH)",
"VIG": "Vanguard Dividend Appreciation ETF (VIG)",
"IWM": "iShares Russell 2000 ETF (IWM)",
"IWD": "iShares Russell 1000 Value ETF (IWD)",
"VO": "Vanguard Mid-Cap ETF (VO)",
"EFA": "iShares MSCI EAFE ETF (EFA)",
"VXUS": "Vanguard Total International Stock ETF (VXUS)",
"VGT": "Vanguard Information Technology ETF (VGT)",
"VYM": "Vanguard High Dividend Yield Index ETF (VYM)",
"BNDX": "Vanguard Total International Bond ETF (BNDX)",
"VNQ": "Vanguard Real Estate ETF (VNQ)",
"VB": "Vanguard Small Cap ETF (VB)",
"XLK": "Technology Select Sector SPDR Fund (XLK)",
"ITOT": "iShares Core S&P Total U.S. Stock Market ETF (ITOT)",
"VCIT": "Vanguard Intermediate-Term Corporate Bond ETF (VCIT)",
"VCSH": "Vanguard Short-Term Corporate Bond ETF (VCSH)",
"XLE": "Energy Select Sector SPDR Fund (XLE)",
"XLV": "Health Care Select Sector SPDR Fund (XLV)",
"BSV": "Vanguard Short-Term Bond ETF (BSV)",
"XLF": "Financial Select Sector SPDR Fund (XLF)",
"SCHD": "Schwab US Dividend Equity ETF (SCHD)",
"RSP": "Invesco S&P 500® Equal Weight ETF (RSP)",
"LQD": "iShares iBoxx $ Investment Grade Corporate Bond ETF (LQD)",
"IVW": "iShares S&P 500 Growth ETF (IVW)",
"VEU": "Vanguard FTSE All-World ex-US Index Fund (VEU)",
"TIP": "iShares TIPS Bond ETF (TIP)",
"IAU": "iShares Gold Trust (IAU)",
"SCHX": "Schwab U.S. Large-Cap ETF (SCHX)",
"IXUS": "iShares Core MSCI Total International Stock ETF (IXUS)",
"IWR": "iShares Russell Midcap ETF (IWR)",
"IWB": "iShares Russell 1000 ETF (IWB)",
"DIA": "SPDR Dow Jones Industrial Average ETF Trust (DIA)",
"EEM": "iShares MSCI Emerging Markets ETF (EEM)",
"USMV": "iShares MSCI USA Min Vol Factor ETF (USMV)",
"SCHF": "Schwab International Equity ETF (SCHF)",
"IVE": "iShares S&P 500 Value ETF (IVE)",
"MUB": "iShares National Muni Bond ETF (MUB)",
"VV": "Vanguard Large Cap ETF (VV)",
"VBR": "Vanguard Small Cap Value ETF (VBR)",
"ESGU": "iShares ESG Aware MSCI USA ETF (ESGU)",
"VT": "Vanguard Total World Stock ETF (VT)",
"DGRO": "iShares Core Dividend Growth ETF (DGRO)",
"SHY": "iShares 1-3 Year Treasury Bond ETF (SHY)",
"DVY": "iShares Select Dividend ETF (DVY)",
"QUAL": "iShares MSCI USA Quality Factor ETF (QUAL)",
"SCHB": "Schwab U.S. Broad Market ETF (SCHB)",
"MBB": "iShares MBS ETF (MBB)",
"SDY": "SPDR S&P Dividend ETF (SDY)",
"IGSB": "iShares 1-5 Year Investment Grade Corporate Bond ETF (IGSB)",
"VTIP": "Vanguard Short-Term Inflation-Protected Securities ETF (VTIP)",
"JPST": "JPMorgan Ultra-Short Income ETF (JPST)",
"TLT": "iShares 20+ Year Treasury Bond ETF (TLT)",
"ACWI": "iShares MSCI ACWI ETF (ACWI)",
"MDY": "SPDR S&P Midcap 400 ETF Trust (MDY)",
"IUSB": "iShares Core Total USD Bond Market ETF (IUSB)",
"SHV": "iShares Short Treasury Bond ETF (SHV)",
"VGK": "Vanguard FTSE Europe ETF (VGK)",
"XLY": "Consumer Discretionary Select Sector SPDR Fund (XLY)",
"BIL": "SPDR Bloomberg 1-3 Month T-Bill ETF (BIL)",
"GOVT": "iShares U.S. Treasury Bond ETF (GOVT)",
"VHT": "Vanguard Health Care ETF (VHT)",
"VOE": "Vanguard Mid-Cap Value ETF (VOE)",
"XLP": "Consumer Staples Select Sector SPDR Fund (XLP)",
"SCHP": "Schwab U.S. TIPS ETF (SCHP)",
"IEF": "iShares 7-10 Year Treasury Bond ETF (IEF)",
"PFF": "iShares Preferred & Income Securities ETF (PFF)",
"XLU": "Utilities Select Sector SPDR Fund (XLU)",
"VTEB": "Vanguard Tax-Exempt Bond ETF (VTEB)",
"EFV": "iShares MSCI EAFE Value ETF (EFV)",
"SCHG": "Schwab U.S. Large-Cap Growth ETF (SCHG)",
"EMB": "iShares J.P. Morgan USD Emerging Markets Bond ETF (EMB)",
"DFAC": "Dimensional U.S. Core Equity 2 ETF (DFAC)",
"SCHA": "Schwab U.S. Small-Cap ETF (SCHA)",
"GDX": "VanEck Gold Miners ETF (GDX)",
"VMBS": "Vanguard Mortgage-Backed Securities ETF (VMBS)",
"TQQQ": "ProShares UltraPro QQQ (TQQQ)",
"VGSH": "Vanguard Short-Term Treasury ETF (VGSH)",
"HYG": "iShares iBoxx $ High Yield Corporate Bond ETF (HYG)",
"XLI": "Industrial Select Sector SPDR Fund (XLI)",
"IWS": "iShares Russell Mid-Cap Value ETF (IWS)",
"VXF": "Vanguard Extended Market ETF (VXF)",
"SPLG": "SPDR Portfolio S&P 500 ETF (SPLG)",
"SPYV": "SPDR Portfolio S&P 500 Value ETF (SPYV)",
"IWN": "iShares Russell 2000 Value ETF (IWN)",
}
PERIODS = ["mtd", "qtd", "ytd", "3m", "6m", "1y", "3y", "5y", "10y", "all"]
REGIONS = {
"Afghanistan": "Middle East",
"Anguilla": "North America",
"Argentina": "Latin America",
"Australia": "Asia",
"Austria": "Europe",
"Azerbaijan": "Europe",
"Bahamas": "North America",
"Bangladesh": "Asia",
"Barbados": "North America",
"Belgium": "Europe",
"Belize": "North America",
"Bermuda": "North America",
"Botswana": "Africa",
"Brazil": "Latin America",
"British Virgin Islands": "North America",
"Cambodia": "Asia",
"Canada": "North America",
"Cayman Islands": "North America",
"Chile": "Latin America",
"China": "Asia",
"Colombia": "Latin America",
"Costa Rica": "North America",
"Cyprus": "Europe",
"Czech Republic": "Europe",
"Denmark": "Europe",
"Dominican Republic": "North America",
"Egypt": "Middle East",
"Estonia": "Europe",
"Falkland Islands": "Latin America",
"Finland": "Europe",
"France": "Europe",
"French Guiana": "Europe",
"Gabon": "Africa",
"Georgia": "Europe",
"Germany": "Europe",
"Ghana": "Africa",
"Gibraltar": "Europe",
"Greece": "Europe",
"Greenland": "North America",
"Guernsey": "Europe",
"Hong Kong": "Asia",
"Hungary": "Europe",
"Iceland": "Europe",
"India": "Asia",
"Indonesia": "Asia",
"Ireland": "Europe",
"Isle of Man": "Europe",
"Israel": "Middle East",
"Italy": "Europe",
"Ivory Coast": "Africa",
"Japan": "Asia",
"Jersey": "Europe",
"Jordan": "Middle East",
"Kazakhstan": "Asia",
"Kyrgyzstan": "Asia",
"Latvia": "Europe",
"Liechtenstein": "Europe",
"Lithuania": "Europe",
"Luxembourg": "Europe",
"Macau": "Asia",
"Macedonia": "Europe",
"Malaysia": "Asia",
"Malta": "Europe",
"Mauritius": "Africa",
"Mexico": "Latin America",
"Monaco": "Europe",
"Mongolia": "Asia",
"Montenegro": "Europe",
"Morocco": "Africa",
"Mozambique": "Africa",
"Myanmar": "Asia",
"Namibia": "Africa",
"Netherlands": "Europe",
"Netherlands Antilles": "Europe",
"New Zealand": "Asia",
"Nigeria": "Africa",
"Norway": "Europe",
"Panama": "North America",
"Papua New Guinea": "Asia",
"Peru": "Latin America",
"Philippines": "Asia",
"Poland": "Europe",
"Portugal": "Europe",
"Qatar": "Middle East",
"Reunion": "Africa",
"Romania": "Europe",
"Russia": "Asia",
"Saudi Arabia": "Middle East",
"Senegal": "Africa",
"Singapore": "Asia",
"Slovakia": "Europe",
"Slovenia": "Europe",
"South Africa": "Africa",
"South Korea": "Asia",
"Spain": "Europe",
"Suriname": "Latin America",
"Sweden": "Europe",
"Switzerland": "Europe",
"Taiwan": "Asia",
"Tanzania": "Africa",
"Thailand": "Asia",
"Turkey": "Middle East",
"Ukraine": "Europe",
"United Arab Emirates": "Middle East",
"United Kingdom": "Europe",
"United States": "North America",
"Uruguay": "Latin America",
"Vietnam": "Asia",
"Zambia": "Africa",
} | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/statics.py | 0.547706 | 0.273635 | statics.py | pypi |
__docformat__ = "numpy"
import logging
from datetime import datetime, date
import os
from pathlib import Path
import csv
from typing import List
from dateutil.relativedelta import relativedelta
import yfinance as yf
import pandas as pd
from openbb_terminal.core.config.paths import USER_PORTFOLIO_DATA_DIRECTORY
from openbb_terminal.rich_config import console
from openbb_terminal.portfolio.statics import REGIONS
logger = logging.getLogger(__name__)
# pylint: disable=too-many-return-statements, too-many-lines, too-many-statements
# pylint: disable=C0302
now = datetime.now()
PERIODS_DAYS = {
"mtd": (now - datetime(now.year, now.month, 1)).days,
"qtd": (
now
- datetime(
now.year,
1 if now.month < 4 else 4 if now.month < 7 else 7 if now.month < 10 else 10,
1,
)
).days,
"ytd": (now - datetime(now.year, 1, 1)).days,
"all": 1,
"3m": 3 * 21,
"6m": 6 * 21,
"1y": 12 * 21,
"3y": 3 * 12 * 21,
"5y": 5 * 12 * 21,
"10y": 10 * 12 * 21,
}
DEFAULT_HOLDINGS_PATH = USER_PORTFOLIO_DATA_DIRECTORY / "holdings"
def is_ticker(ticker: str) -> bool:
"""Determine whether a string is a valid ticker
Parameters
----------
ticker : str
The string to be tested
Returns
-------
bool
Whether the string is a ticker
"""
item = yf.Ticker(ticker)
return "previousClose" in item.info
# TODO: Is this being used anywhere?
def beta_word(beta: float) -> str:
"""Describe a beta
Parameters
----------
beta : float
The beta for a portfolio
Returns
-------
str
The description of the beta
"""
if abs(1 - beta) > 3:
part = "extremely "
elif abs(1 - beta) > 2:
part = "very "
elif abs(1 - beta) > 1:
part = ""
else:
part = "moderately "
return part + "high" if beta > 1 else "low"
def clean_name(name: str) -> str:
"""Clean a name to a ticker
Parameters
----------
name : str
The value to be cleaned
Returns
-------
str
A cleaned value
"""
return name.replace("beta_", "").upper()
def filter_df_by_period(df: pd.DataFrame, period: str = "all") -> pd.DataFrame:
"""Filter dataframe by selected period
Parameters
----------
df: pd.DataFrame
Dataframe to be filtered in terms of time
period : str
Period in which to filter dataframe.
Possible choices are: mtd, qtd, ytd, 3m, 6m, 1y, 3y, 5y, 10y, all
Returns
-------
pd.DataFrame
A cleaned value
"""
if period == "mtd":
return df[df.index.strftime("%Y-%m") == datetime.now().strftime("%Y-%m")]
if period == "qtd":
if datetime.now().month < 4:
return df[
df.index.strftime("%Y-%m") < f"{datetime.now().strftime('%Y')}-04"
]
if datetime.now().month < 7:
return df[
(df.index.strftime("%Y-%m") >= f"{datetime.now().strftime('%Y')}-04")
& (df.index.strftime("%Y-%m") < f"{datetime.now().strftime('%Y')}-07")
]
if datetime.now().month < 10:
return df[
(df.index.strftime("%Y-%m") >= f"{datetime.now().strftime('%Y')}-07")
& (df.index.strftime("%Y-%m") < f"{datetime.now().strftime('%Y')}-10")
]
return df[df.index.strftime("%Y-%m") >= f"{datetime.now().strftime('%Y')}-10"]
if period == "ytd":
return df[df.index.strftime("%Y") == datetime.now().strftime("%Y")]
if period == "3m":
return df[df.index >= (datetime.now() - relativedelta(months=3))]
if period == "6m":
return df[df.index >= (datetime.now() - relativedelta(months=6))]
if period == "1y":
return df[df.index >= (datetime.now() - relativedelta(years=1))]
if period == "3y":
return df[df.index >= (datetime.now() - relativedelta(years=3))]
if period == "5y":
return df[df.index >= (datetime.now() - relativedelta(years=5))]
if period == "10y":
return df[df.index >= (datetime.now() - relativedelta(years=10))]
return df
def make_equal_length(df1: pd.DataFrame, df2: pd.DataFrame):
"""Filter dataframe by selected period
Parameters
----------
df1: pd.DataFrame
The first DataFrame that needs to be compared.
df2: pd.DataFrame
The second DataFrame that needs to be compared.
Returns
-------
df1 and df2
Both DataFrames returned
"""
# Match the DataFrames so they share a similar length
if len(df1.index) > len(df2.index):
df1 = df1.loc[df2.index]
elif len(df2.index) > len(df1.index):
df2 = df2.loc[df1.index]
return df1, df2
def get_region_from_country(country: str) -> str:
"""Get region from country
Parameters
----------
country: str
The country to assign region.
Returns
-------
str
Region to which country belongs.
"""
return REGIONS[country]
def get_info_update_file(ticker: str, file_path: Path, writemode: str) -> List[str]:
"""Get info (Sector, Industry, Country and Region) from ticker and save information in file to access later.
Parameters
----------
ticker: str
The ticker to get information.
file_path: str
The file to save information.
writemode: str
The mode to write into the file, 'w' or 'a'.
Returns
-------
List[str]
List with ticker information.
"""
# Pull ticker info from yf
yf_ticker_info = yf.Ticker(ticker).info
if "sector" in yf_ticker_info.keys():
# Ticker has valid sector
# Replace the dash to UTF-8 readable
ticker_info_list = [
yf_ticker_info["sector"],
yf_ticker_info["industry"].replace("—", "-"),
yf_ticker_info["country"],
get_region_from_country(yf_ticker_info["country"]),
]
with open(file_path, writemode, newline="") as f:
writer = csv.writer(f)
if writemode == "a":
# file already has data, so just append
writer.writerow([ticker] + ticker_info_list)
else:
# file did not exist or as empty, so write headers first
writer.writerow(["Ticker", "Sector", "Industry", "Country", "Region"])
writer.writerow([ticker] + ticker_info_list)
f.close()
return ticker_info_list
# Ticker does not have a valid sector
console.print(f"F:{ticker}", end="")
return ["", "", "", ""]
def get_info_from_ticker(ticker: str) -> list:
"""Get info (Sector, Industry, Country and Region) from ticker.
Parameters
----------
ticker: str
The ticker to get information.
Returns
-------
List[str]
List with ticker information.
"""
filename = "tickers_info.csv"
file_path = Path(str(USER_PORTFOLIO_DATA_DIRECTORY), filename)
if file_path.is_file() and os.stat(file_path).st_size > 0:
# file exists and is not empty, so append if necessary
ticker_info_df = pd.read_csv(file_path)
df_row = ticker_info_df.loc[ticker_info_df["Ticker"] == ticker]
if len(df_row) > 0:
# ticker is in file, just return it
ticker_info_list = list(df_row.iloc[0].drop("Ticker"))
return ticker_info_list
# ticker is not in file, go get it
ticker_info_list = get_info_update_file(ticker, file_path, "a")
return ticker_info_list
# file does not exist or is empty, so write it
ticker_info_list = get_info_update_file(ticker, file_path, "w")
return ticker_info_list
def get_start_date_from_period(period: str) -> date:
"""Get start date of a time period based on the period string.
Parameters
----------
period: str
Period to get start date from (e.g. 10y, 3m, etc.)
Returns
-------
date
Start date of the period.
"""
if period == "10y":
start_date = date.today() + relativedelta(years=-10)
elif period == "5y":
start_date = date.today() + relativedelta(years=-5)
elif period == "3y":
start_date = date.today() + relativedelta(years=-3)
elif period == "1y":
start_date = date.today() + relativedelta(years=-1)
elif period == "6m":
start_date = date.today() + relativedelta(months=-6)
elif period == "3m":
start_date = date.today() + relativedelta(months=-3)
elif period == "ytd":
start_date = date(date.today().year, 1, 1)
elif period == "qtd":
cm = date.today().month
if 3 >= cm >= 1:
start_date = date(date.today().year, 1, 1)
elif 6 >= cm >= 4:
start_date = date(date.today().year, 4, 1)
elif 9 >= cm >= 7:
start_date = date(date.today().year, 7, 1)
elif 12 >= cm >= 10:
start_date = date(date.today().year, 10, 1)
else:
print("Error")
elif period == "mtd":
cur_month = date.today().month
cur_year = date.today().year
start_date = date(cur_year, cur_month, 1)
return start_date | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_helper.py | 0.653569 | 0.352118 | portfolio_helper.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Tuple, Union
import numpy as np
import scipy
import pandas as pd
from sklearn.metrics import r2_score
from openbb_terminal.common.quantitative_analysis import qa_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.portfolio.statics import PERIODS
from openbb_terminal.portfolio import portfolio_helper, metrics_model
from openbb_terminal.portfolio.portfolio_engine import PortfolioEngine
# pylint: disable=E1136,W0201,R0902,C0302
# pylint: disable=unsupported-assignment-operation,redefined-outer-name,too-many-public-methods, consider-using-f-string
logger = logging.getLogger(__name__)
pd.options.mode.chained_assignment = None
@log_start_end(log=logger)
def generate_portfolio(
transactions_file_path: str,
benchmark_symbol: str = "SPY",
full_shares: bool = False,
risk_free_rate: float = 0,
) -> PortfolioEngine:
"""Get PortfolioEngine object
Parameters
----------
transactions_file_path : str
Path to transactions file
benchmark_symbol : str
Benchmark ticker to download data
full_shares : bool
Whether to mimic the portfolio trades exactly (partial shares) or round down the
quantity to the nearest number
risk_free_rate : float
Risk free rate in float format
Returns
-------
PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
"""
transactions = PortfolioEngine.read_transactions(transactions_file_path)
portfolio_engine = PortfolioEngine(transactions)
portfolio_engine.generate_portfolio_data()
portfolio_engine.set_benchmark(symbol=benchmark_symbol, full_shares=full_shares)
portfolio_engine.set_risk_free_rate(risk_free_rate)
return portfolio_engine
@log_start_end(log=logger)
def get_transactions(portfolio_engine: PortfolioEngine) -> pd.DataFrame:
"""Get portfolio transactions
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
Returns
-------
pd.DataFrame
Portfolio transactions
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.show(p)
"""
return portfolio_engine.get_transactions()
@log_start_end(log=logger)
def set_benchmark(
portfolio_engine: PortfolioEngine, symbol: str, full_shares: bool = False
):
"""Load benchmark into portfolio
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
symbol: str
Benchmark symbol to download data
full_shares: bool
Whether to mimic the portfolio trades exactly (partial shares) or round down the
quantity to the nearest number
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.bench(p, symbol="SPY")
"""
portfolio_engine.set_benchmark(symbol=symbol, full_shares=full_shares)
@log_start_end(log=logger)
def set_risk_free_rate(portfolio_engine: PortfolioEngine, risk_free_rate: float):
"""Set risk-free rate
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
risk_free_rate: float
Risk free rate in float format
"""
portfolio_engine.set_risk_free_rate(risk_free_rate=risk_free_rate)
def get_holdings_value(portfolio_engine: PortfolioEngine) -> pd.DataFrame:
"""Get holdings of assets (absolute value)
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
Returns
-------
pd.DataFrame
DataFrame of holdings value
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.holdv(p)
"""
all_holdings = portfolio_engine.historical_trade_data["End Value"][
portfolio_engine.tickers_list
]
all_holdings["Total Value"] = all_holdings.sum(axis=1)
# No need to account for time since this is daily data
all_holdings.index = all_holdings.index.date
return all_holdings
def get_holdings_percentage(
portfolio_engine: PortfolioEngine,
) -> pd.DataFrame:
"""Get holdings of assets (in percentage)
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
Returns
-------
pd.DataFrame
DataFrame of holdings percentage
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.holdp(p)
"""
all_holdings = portfolio_engine.historical_trade_data["End Value"][
portfolio_engine.tickers_list
]
all_holdings = all_holdings.divide(all_holdings.sum(axis=1), axis=0) * 100
# order it a bit more in terms of magnitude
all_holdings = all_holdings[all_holdings.sum().sort_values(ascending=False).index]
return all_holdings
@log_start_end(log=logger)
def get_yearly_returns(
portfolio_engine: PortfolioEngine,
window: str = "all",
) -> pd.DataFrame:
"""Get yearly returns
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
window : str
interval to compare cumulative returns and benchmark
Returns
-------
pd.DataFrame
DataFrame with yearly returns
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.yret(p)
"""
portfolio_returns = portfolio_helper.filter_df_by_period(
portfolio_engine.returns, window
)
benchmark_returns = portfolio_helper.filter_df_by_period(
portfolio_engine.benchmark_returns, window
)
creturns_year_val = list()
breturns_year_val = list()
for year in sorted(set(portfolio_returns.index.year)):
creturns_year = portfolio_returns[portfolio_returns.index.year == year]
cumulative_returns = 100 * metrics_model.cumulative_returns(creturns_year)
creturns_year_val.append(cumulative_returns.values[-1])
breturns_year = benchmark_returns[benchmark_returns.index.year == year]
benchmark_c_returns = 100 * metrics_model.cumulative_returns(breturns_year)
breturns_year_val.append(benchmark_c_returns.values[-1])
df = pd.DataFrame(
{
"Portfolio": pd.Series(
creturns_year_val, index=list(set(portfolio_returns.index.year))
),
"Benchmark": pd.Series(
breturns_year_val, index=list(set(portfolio_returns.index.year))
),
"Difference": pd.Series(
np.array(creturns_year_val) - np.array(breturns_year_val),
index=list(set(portfolio_returns.index.year)),
),
}
)
return df
@log_start_end(log=logger)
def get_monthly_returns(
portfolio_engine: PortfolioEngine,
window: str = "all",
) -> pd.DataFrame:
"""Get monthly returns
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
window : str
interval to compare cumulative returns and benchmark
Returns
-------
pd.DataFrame
DataFrame with monthly returns
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.mret(p)
"""
portfolio_returns = portfolio_helper.filter_df_by_period(
portfolio_engine.returns, window
)
benchmark_returns = portfolio_helper.filter_df_by_period(
portfolio_engine.benchmark_returns, window
)
creturns_month_val = list()
breturns_month_val = list()
for year in sorted(list(set(portfolio_returns.index.year))):
creturns_year = portfolio_returns[portfolio_returns.index.year == year]
creturns_val = list()
for i in range(1, 13):
creturns_year_month = creturns_year[creturns_year.index.month == i]
creturns_year_month_val = 100 * metrics_model.cumulative_returns(
creturns_year_month
)
if creturns_year_month.empty:
creturns_val.append(0)
else:
creturns_val.append(creturns_year_month_val.values[-1])
creturns_month_val.append(creturns_val)
breturns_year = benchmark_returns[benchmark_returns.index.year == year]
breturns_val = list()
for i in range(1, 13):
breturns_year_month = breturns_year[breturns_year.index.month == i]
breturns_year_month_val = 100 * metrics_model.cumulative_returns(
breturns_year_month
)
if breturns_year_month.empty:
breturns_val.append(0)
else:
breturns_val.append(breturns_year_month_val.values[-1])
breturns_month_val.append(breturns_val)
monthly_returns = pd.DataFrame(
creturns_month_val,
index=sorted(list(set(portfolio_returns.index.year))),
columns=[
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
],
)
bench_monthly_returns = pd.DataFrame(
breturns_month_val,
index=sorted(list(set(benchmark_returns.index.year))),
columns=[
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
],
)
return monthly_returns, bench_monthly_returns
@log_start_end(log=logger)
def get_daily_returns(
portfolio_engine: PortfolioEngine,
window: str = "all",
) -> pd.DataFrame:
"""Get daily returns
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
window : str
interval to compare cumulative returns and benchmark
Returns
-------
pd.DataFrame
DataFrame with daily returns
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.dret(p)
"""
portfolio_returns = portfolio_helper.filter_df_by_period(
portfolio_engine.returns, window
)
benchmark_returns = portfolio_helper.filter_df_by_period(
portfolio_engine.benchmark_returns, window
)
df = portfolio_returns.to_frame()
df = df.join(benchmark_returns)
df.index = df.index.date
df.columns = ["portfolio", "benchmark"]
return df
def join_allocation(
portfolio: pd.DataFrame, benchmark: pd.DataFrame, column: str
) -> pd.DataFrame:
"""Help method to join portfolio and benchmark allocation by column
Parameters
----------
portfolio: pd.DataFrame
Portfolio allocation
benchmark: pd.DataFrame
Benchmark allocation
column: str
Column to join DataFrames
Returns
-------
pd.DataFrame
DataFrame with portfolio and benchmark allocations
"""
combined = pd.merge(portfolio, benchmark, on=column, how="left")
combined["Difference"] = combined["Portfolio"] - combined["Benchmark"]
combined = combined.replace(np.nan, "-")
combined = combined.replace(0, "-")
return combined
def get_distribution_returns(
portfolio_engine: PortfolioEngine,
window: str = "all",
) -> pd.DataFrame:
"""Display daily returns
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
window : str
interval to compare cumulative returns and benchmark
Returns
-------
pd.DataFrame
DataFrame of returns distribution
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.distr(p)
"""
portfolio_returns = portfolio_helper.filter_df_by_period(
portfolio_engine.returns, window
)
benchmark_returns = portfolio_helper.filter_df_by_period(
portfolio_engine.benchmark_returns, window
)
df = pd.DataFrame(portfolio_returns).join(pd.DataFrame(benchmark_returns))
df.columns.values[0] = "portfolio"
df.columns.values[1] = "benchmark"
return df
@log_start_end(log=logger)
def get_maximum_drawdown(
portfolio_engine: PortfolioEngine, is_returns: bool = False
) -> Tuple[pd.DataFrame, pd.Series]:
"""Calculate the drawdown (MDD) of historical series. Note that the calculation is done
on cumulative returns (or prices). The definition of drawdown is
DD = (current value - rolling maximum) / rolling maximum
Parameters
----------
data: pd.Series
Series of input values
is_returns: bool
Flag to indicate inputs are returns
Returns
-------
pd.Series
Holdings series
pd.Series
Drawdown series
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.maxdd(p)
"""
holdings: pd.Series = portfolio_engine.portfolio_value
if is_returns:
holdings = (1 + holdings).cumprod()
rolling_max = holdings.cummax()
drawdown = (holdings - rolling_max) / rolling_max
return holdings, drawdown
def get_rolling_volatility(
portfolio_engine: PortfolioEngine, window: str = "1y"
) -> pd.DataFrame:
"""Get rolling volatility
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
window : str
Rolling window size to use
Possible options: mtd, qtd, ytd, 1d, 5d, 10d, 1m, 3m, 6m, 1y, 3y, 5y, 10y
Returns
-------
pd.DataFrame
Rolling volatility DataFrame
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.rvol(p)
"""
portfolio_rvol = metrics_model.rolling_volatility(portfolio_engine.returns, window)
if portfolio_rvol.empty:
return pd.DataFrame()
benchmark_rvol = metrics_model.rolling_volatility(
portfolio_engine.benchmark_returns, window
)
if benchmark_rvol.empty:
return pd.DataFrame()
df = pd.DataFrame(portfolio_rvol).join(pd.DataFrame(benchmark_rvol))
df.columns.values[0] = "portfolio"
df.columns.values[1] = "benchmark"
return df
def get_rolling_sharpe(
portfolio_engine: pd.DataFrame, risk_free_rate: float = 0, window: str = "1y"
) -> pd.DataFrame:
"""Get rolling sharpe ratio
Parameters
----------
portfolio_returns : pd.Series
Series of portfolio returns
risk_free_rate : float
Risk free rate
window : str
Rolling window to use
Possible options: mtd, qtd, ytd, 1d, 5d, 10d, 1m, 3m, 6m, 1y, 3y, 5y, 10y
Returns
-------
pd.DataFrame
Rolling sharpe ratio DataFrame
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.rsharpe(p)
"""
portfolio_rsharpe = metrics_model.rolling_sharpe(
portfolio_engine.returns, risk_free_rate, window
)
if portfolio_rsharpe.empty:
return pd.DataFrame()
benchmark_rsharpe = metrics_model.rolling_sharpe(
portfolio_engine.benchmark_returns, risk_free_rate, window
)
if benchmark_rsharpe.empty:
return pd.DataFrame()
df = pd.DataFrame(portfolio_rsharpe).join(pd.DataFrame(benchmark_rsharpe))
df.columns.values[0] = "portfolio"
df.columns.values[1] = "benchmark"
return df
def get_rolling_sortino(
portfolio_engine: PortfolioEngine,
risk_free_rate: float = 0,
window: str = "1y",
) -> pd.DataFrame:
"""Get rolling sortino
Parameters
----------
portfolio : PortfolioEngine
PortfolioEngine object
window: str
interval for window to consider
Possible options: mtd, qtd, ytd, 1d, 5d, 10d, 1m, 3m, 6m, 1y, 3y, 5y, 10y
risk_free_rate: float
Value to use for risk free rate in sharpe/other calculations
Returns
-------
pd.DataFrame
Rolling sortino ratio DataFrame
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.rsort(p)
"""
portfolio_rsortino = metrics_model.rolling_sortino(
portfolio_engine.returns, risk_free_rate, window
)
if portfolio_rsortino.empty:
return pd.DataFrame()
benchmark_rsortino = metrics_model.rolling_sortino(
portfolio_engine.benchmark_returns, risk_free_rate, window
)
if benchmark_rsortino.empty:
return pd.DataFrame()
df = pd.DataFrame(portfolio_rsortino).join(pd.DataFrame(benchmark_rsortino))
df.columns.values[0] = "portfolio"
df.columns.values[1] = "benchmark"
return df
@log_start_end(log=logger)
def get_rolling_beta(
portfolio_engine: PortfolioEngine,
window: str = "1y",
) -> pd.DataFrame:
"""Get rolling beta using portfolio and benchmark returns
Parameters
----------
portfolio : PortfolioEngine
PortfolioEngine object
window: string
Interval used for rolling values.
Possible options: mtd, qtd, ytd, 1d, 5d, 10d, 1m, 3m, 6m, 1y, 3y, 5y, 10y.
Returns
-------
pd.DataFrame
DataFrame of the portfolio's rolling beta
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.rbeta(p)
"""
df = metrics_model.rolling_beta(
portfolio_engine.returns, portfolio_engine.benchmark_returns, window
)
return df
def get_summary(
portfolio_engine: PortfolioEngine,
window: str = "all",
risk_free_rate: float = 0,
) -> pd.DataFrame:
"""Get portfolio and benchmark returns summary
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
window : str
interval to compare cumulative returns and benchmark
risk_free_rate : float
Risk free rate for calculations
Returns
-------
pd.DataFrame
DataFrame with portfolio and benchmark returns summary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.summary(p)
"""
portfolio_returns = portfolio_helper.filter_df_by_period(
portfolio_engine.returns, window
)
benchmark_returns = portfolio_helper.filter_df_by_period(
portfolio_engine.benchmark_returns, window
)
metrics = {
"Volatility": [portfolio_returns.std(), benchmark_returns.std()],
"Skew": [
scipy.stats.skew(portfolio_returns),
scipy.stats.skew(benchmark_returns),
],
"Kurtosis": [
scipy.stats.kurtosis(portfolio_returns),
scipy.stats.kurtosis(benchmark_returns),
],
"Maximum Drawdown": [
metrics_model.maximum_drawdown(portfolio_returns),
metrics_model.maximum_drawdown(benchmark_returns),
],
"Sharpe ratio": [
metrics_model.sharpe_ratio(portfolio_returns, risk_free_rate),
metrics_model.sharpe_ratio(benchmark_returns, risk_free_rate),
],
"Sortino ratio": [
metrics_model.sortino_ratio(portfolio_returns, risk_free_rate),
metrics_model.sortino_ratio(benchmark_returns, risk_free_rate),
],
"R2 Score": [
r2_score(portfolio_returns, benchmark_returns),
r2_score(portfolio_returns, benchmark_returns),
],
}
summary = pd.DataFrame(
metrics.values(), index=metrics.keys(), columns=["Portfolio", "Benchmark"]
)
summary["Difference"] = summary["Portfolio"] - summary["Benchmark"]
summary.loc["Volatility"] = summary.loc["Volatility"].apply("{:.2%}".format)
summary.loc["Maximum Drawdown"] = summary.loc["Maximum Drawdown"].apply(
"{:.2%}".format
)
summary.loc["R2 Score"] = summary.loc["R2 Score"].apply("{:.2%}".format)
return summary
@log_start_end(log=logger)
def get_assets_allocation(
portfolio_engine: PortfolioEngine,
tables: bool = False,
limit: int = 10,
recalculate: bool = False,
) -> Union[pd.DataFrame, Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]]:
"""Display portfolio asset allocation compared to the benchmark
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
tables: bool
Whether to include separate allocation tables
limit: int
The amount of assets you wish to show, by default this is set to 10
recalculate: bool
Flag to force recalculate allocation if already exists
Returns
-------
Union[pd.DataFrame, Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]]
DataFrame with combined allocation plus individual allocation if tables is `True`.
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.alloc.assets(p)
"""
portfolio_engine.calculate_allocation(category="Asset", recalculate=recalculate)
benchmark_allocation = portfolio_engine.benchmark_assets_allocation.iloc[:limit]
portfolio_allocation = portfolio_engine.portfolio_assets_allocation.iloc[:limit]
combined = join_allocation(portfolio_allocation, benchmark_allocation, "Symbol")
if tables:
return combined, portfolio_allocation, benchmark_allocation
return combined
def get_sectors_allocation(
portfolio_engine: PortfolioEngine,
limit: int = 10,
tables: bool = False,
recalculate: bool = False,
):
"""Display portfolio sector allocation compared to the benchmark
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
tables: bool
Whether to include separate allocation tables
limit: int
The amount of assets you wish to show, by default this is set to 10
recalculate: bool
Flag to force recalculate allocation if already exists
Returns
-------
Union[pd.DataFrame, Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]]
DataFrame with combined allocation plus individual allocation if tables is `True`.
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.alloc.sectors(p)
"""
portfolio_engine.calculate_allocation(category="Sector", recalculate=recalculate)
benchmark_allocation = portfolio_engine.benchmark_sectors_allocation.iloc[:limit]
portfolio_allocation = portfolio_engine.portfolio_sectors_allocation.iloc[:limit]
combined = join_allocation(portfolio_allocation, benchmark_allocation, "Sector")
if tables:
return combined, portfolio_allocation, benchmark_allocation
return combined
def get_countries_allocation(
portfolio_engine: PortfolioEngine,
limit: int = 10,
tables: bool = False,
recalculate: bool = False,
):
"""Display portfolio country allocation compared to the benchmark
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
tables: bool
Whether to include separate allocation tables
limit: int
The amount of assets you wish to show, by default this is set to 10
recalculate: bool
Flag to force recalculate allocation if already exists
Returns
-------
Union[pd.DataFrame, Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]]
DataFrame with combined allocation plus individual allocation if tables is `True`.
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.alloc.countries(p)
"""
portfolio_engine.calculate_allocation(category="Country", recalculate=recalculate)
benchmark_allocation = portfolio_engine.benchmark_countries_allocation.iloc[:limit]
portfolio_allocation = portfolio_engine.portfolio_countries_allocation.iloc[:limit]
combined = join_allocation(portfolio_allocation, benchmark_allocation, "Country")
if tables:
return combined, portfolio_allocation, benchmark_allocation
return combined
def get_regions_allocation(
portfolio_engine: PortfolioEngine,
limit: int = 10,
tables: bool = False,
recalculate: bool = False,
):
"""Display portfolio region allocation compared to the benchmark
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
tables: bool
Whether to include separate allocation tables
limit: int
The amount of assets you wish to show, by default this is set to 10
recalculate: bool
Flag to force recalculate allocation if already exists
Returns
-------
Union[pd.DataFrame, Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]]
DataFrame with combined allocation plus individual allocation if tables is `True`.
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.alloc.regions(p)
"""
portfolio_engine.calculate_allocation(category="Region", recalculate=recalculate)
benchmark_allocation = portfolio_engine.benchmark_regions_allocation.iloc[:limit]
portfolio_allocation = portfolio_engine.portfolio_regions_allocation.iloc[:limit]
combined = join_allocation(portfolio_allocation, benchmark_allocation, "Region")
if tables:
return combined, portfolio_allocation, benchmark_allocation
return combined
@log_start_end(log=logger)
def get_r2_score(portfolio_engine: PortfolioEngine) -> pd.DataFrame:
"""Get R2 Score for portfolio and benchmark selected
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
Returns
-------
pd.DataFrame
DataFrame with R2 Score between portfolio and benchmark for different periods
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.rsquare(p)
"""
vals = list()
for period in PERIODS:
vals.append(
round(
r2_score(
portfolio_helper.filter_df_by_period(
portfolio_engine.returns, period
),
portfolio_helper.filter_df_by_period(
portfolio_engine.benchmark_returns, period
),
),
3,
)
)
return pd.DataFrame(vals, index=PERIODS, columns=["R2 Score"])
@log_start_end(log=logger)
def get_skewness(portfolio_engine: PortfolioEngine) -> pd.DataFrame:
"""Get skewness for portfolio and benchmark selected
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
Returns
-------
pd.DataFrame
DataFrame with skewness for portfolio and benchmark for different periods
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.skew(p)
"""
vals = list()
for period in PERIODS:
vals.append(
[
round(
scipy.stats.skew(
portfolio_helper.filter_df_by_period(
portfolio_engine.returns, period
)
),
3,
),
round(
scipy.stats.skew(
portfolio_helper.filter_df_by_period(
portfolio_engine.benchmark_returns, period
)
),
3,
),
]
)
return pd.DataFrame(vals, index=PERIODS, columns=["Portfolio", "Benchmark"])
@log_start_end(log=logger)
def get_kurtosis(portfolio_engine: PortfolioEngine) -> pd.DataFrame:
"""Get kurtosis for portfolio and benchmark selected
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
Returns
-------
pd.DataFrame
DataFrame with kurtosis for portfolio and benchmark for different periods
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.kurtosis(p)
"""
vals = list()
for period in PERIODS:
vals.append(
[
round(
scipy.stats.kurtosis(
portfolio_helper.filter_df_by_period(
portfolio_engine.returns, period
)
),
3,
),
round(
scipy.stats.skew(
portfolio_helper.filter_df_by_period(
portfolio_engine.benchmark_returns, period
)
),
3,
),
]
)
return pd.DataFrame(vals, index=PERIODS, columns=["Portfolio", "Benchmark"])
@log_start_end(log=logger)
def get_stats(portfolio_engine: PortfolioEngine, window: str = "all") -> pd.DataFrame:
"""Get stats for portfolio and benchmark selected based on a certain interval
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
window : str
interval to consider. Choices are: mtd, qtd, ytd, 3m, 6m, 1y, 3y, 5y, 10y, all
Returns
-------
pd.DataFrame
DataFrame with overall stats for portfolio and benchmark for a certain period
"""
df = (
portfolio_helper.filter_df_by_period(portfolio_engine.returns, window)
.describe()
.to_frame()
.join(
portfolio_helper.filter_df_by_period(
portfolio_engine.benchmark_returns, window
).describe()
)
)
df.columns = ["Portfolio", "Benchmark"]
return df
@log_start_end(log=logger)
def get_volatility(portfolio_engine: PortfolioEngine) -> pd.DataFrame:
"""Get volatility for portfolio and benchmark selected
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
Returns
-------
pd.DataFrame
DataFrame with volatility for portfolio and benchmark for different periods
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.volatility(p)
"""
vals = list()
for period in PERIODS:
port_rets = portfolio_helper.filter_df_by_period(
portfolio_engine.returns, period
)
bench_rets = portfolio_helper.filter_df_by_period(
portfolio_engine.benchmark_returns, period
)
vals.append(
[
round(
port_rets.std() * (len(port_rets) ** 0.5),
3,
),
round(
bench_rets.std() * (len(bench_rets) ** 0.5),
3,
),
]
)
return pd.DataFrame(
vals,
index=PERIODS,
columns=["Portfolio [%]", "Benchmark [%]"],
)
@log_start_end(log=logger)
def get_sharpe_ratio(
portfolio_engine: PortfolioEngine, risk_free_rate: float = 0
) -> pd.DataFrame:
"""Get sharpe ratio for portfolio and benchmark selected
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
risk_free_rate: float
Risk free rate value
Returns
-------
pd.DataFrame
DataFrame with sharpe ratio for portfolio and benchmark for different periods
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.sharpe(p)
"""
vals = list()
for period in PERIODS:
vals.append(
[
round(
metrics_model.sharpe_ratio(
portfolio_helper.filter_df_by_period(
portfolio_engine.returns, period
),
risk_free_rate,
),
3,
),
round(
metrics_model.sharpe_ratio(
portfolio_helper.filter_df_by_period(
portfolio_engine.benchmark_returns, period
),
risk_free_rate,
),
3,
),
]
)
return pd.DataFrame(vals, index=PERIODS, columns=["Portfolio", "Benchmark"])
@log_start_end(log=logger)
def get_sortino_ratio(
portfolio_engine: PortfolioEngine, risk_free_rate: float = 0
) -> pd.DataFrame:
"""Get sortino ratio for portfolio and benchmark selected
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
risk_free_rate: float
Risk free rate value
Returns
-------
pd.DataFrame
DataFrame with sortino ratio for portfolio and benchmark for different periods
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.sortino(p)
"""
vals = list()
for period in PERIODS:
vals.append(
[
round(
metrics_model.sortino_ratio(
portfolio_helper.filter_df_by_period(
portfolio_engine.returns, period
),
risk_free_rate,
),
3,
),
round(
metrics_model.sortino_ratio(
portfolio_helper.filter_df_by_period(
portfolio_engine.benchmark_returns, period
),
risk_free_rate,
),
3,
),
]
)
return pd.DataFrame(vals, index=PERIODS, columns=["Portfolio", "Benchmark"])
@log_start_end(log=logger)
def get_maximum_drawdown_ratio(portfolio_engine: PortfolioEngine) -> pd.DataFrame:
"""Get maximum drawdown ratio for portfolio and benchmark selected
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
Returns
-------
pd.DataFrame
DataFrame with maximum drawdown for portfolio and benchmark for different periods
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.maxdrawdown(p)
"""
vals = list()
for period in PERIODS:
vals.append(
[
round(
metrics_model.maximum_drawdown(
portfolio_helper.filter_df_by_period(
portfolio_engine.returns, period
)
),
3,
),
round(
metrics_model.maximum_drawdown(
portfolio_helper.filter_df_by_period(
portfolio_engine.benchmark_returns, period
)
),
3,
),
]
)
return pd.DataFrame(vals, index=PERIODS, columns=["Portfolio", "Benchmark"])
@log_start_end(log=logger)
def get_gaintopain_ratio(portfolio_engine: PortfolioEngine) -> pd.DataFrame:
"""Get Pain-to-Gain ratio based on historical data
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
Returns
-------
pd.DataFrame
DataFrame of the portfolio's gain-to-pain ratio
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.gaintopain(p)
"""
gtp_period_df = metrics_model.get_gaintopain_ratio(
portfolio_engine.historical_trade_data,
portfolio_engine.benchmark_trades,
portfolio_engine.benchmark_returns,
)
return gtp_period_df
@log_start_end(log=logger)
def get_tracking_error(
portfolio_engine: PortfolioEngine, window: int = 252
) -> Tuple[pd.DataFrame, pd.Series]:
"""Get tracking error
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
window: int
Interval used for rolling values
Returns
-------
pd.DataFrame
DataFrame of tracking errors during different time windows
pd.Series
Series of rolling tracking error
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.trackerr(p)
"""
trackr_period_df, trackr_rolling = metrics_model.get_tracking_error(
portfolio_engine.returns, portfolio_engine.benchmark_returns, window
)
return trackr_period_df, trackr_rolling
@log_start_end(log=logger)
def get_information_ratio(portfolio_engine: PortfolioEngine) -> pd.DataFrame:
"""Get information ratio
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
Returns
-------
pd.DataFrame
DataFrame of the information ratio during different time periods
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.information(p)
"""
ir_period_df = metrics_model.get_information_ratio(
portfolio_engine.returns,
portfolio_engine.historical_trade_data,
portfolio_engine.benchmark_trades,
portfolio_engine.benchmark_returns,
)
return ir_period_df
@log_start_end(log=logger)
def get_tail_ratio(
portfolio_engine: PortfolioEngine, window: int = 252
) -> Tuple[pd.DataFrame, pd.Series, pd.Series]:
"""Get tail ratio
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
window: int
Interval used for rolling values
Returns
-------
pd.DataFrame
DataFrame of the portfolios and the benchmarks tail ratio during different time windows
pd.Series
Series of the portfolios rolling tail ratio
pd.Series
Series of the benchmarks rolling tail ratio
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.tail(p)
"""
tailr_period_df, portfolio_tr, benchmark_tr = metrics_model.get_tail_ratio(
portfolio_engine.returns, portfolio_engine.benchmark_returns, window
)
return tailr_period_df, portfolio_tr, benchmark_tr
@log_start_end(log=logger)
def get_common_sense_ratio(portfolio_engine: PortfolioEngine) -> pd.DataFrame:
"""Get common sense ratio
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
Returns
-------
pd.DataFrame
DataFrame of the portfolios and the benchmarks common sense ratio during different time periods
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.commonsense(p)
"""
csr_period_df = metrics_model.get_common_sense_ratio(
portfolio_engine.returns,
portfolio_engine.historical_trade_data,
portfolio_engine.benchmark_trades,
portfolio_engine.benchmark_returns,
)
return csr_period_df
@log_start_end(log=logger)
def get_jensens_alpha(
portfolio_engine: PortfolioEngine, risk_free_rate: float = 0, window: str = "1y"
) -> Tuple[pd.DataFrame, pd.Series]:
"""Get jensen's alpha
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
window: str
Interval used for rolling values
risk_free_rate: float
Risk free rate
Returns
-------
pd.DataFrame
DataFrame of jensens's alpha during different time windows
pd.Series
Series of jensens's alpha data
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.jensens(p)
"""
ja_period_df, ja_rolling = metrics_model.jensens_alpha(
portfolio_engine.returns,
portfolio_engine.historical_trade_data,
portfolio_engine.benchmark_trades,
portfolio_engine.benchmark_returns,
risk_free_rate,
window,
)
return ja_period_df, ja_rolling
@log_start_end(log=logger)
def get_calmar_ratio(
portfolio_engine: PortfolioEngine, window: int = 756
) -> Tuple[pd.DataFrame, pd.Series]:
"""Get calmar ratio
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
window: int
Interval used for rolling values
Returns
-------
pd.DataFrame
DataFrame of calmar ratio of the benchmark and portfolio during different time periods
pd.Series
Series of calmar ratio data
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.calmar(p)
"""
cr_period_df, cr_rolling = metrics_model.get_calmar_ratio(
portfolio_engine.returns,
portfolio_engine.historical_trade_data,
portfolio_engine.benchmark_trades,
portfolio_engine.benchmark_returns,
window,
)
return cr_period_df, cr_rolling
@log_start_end(log=logger)
def get_kelly_criterion(portfolio_engine: PortfolioEngine) -> pd.DataFrame:
"""Get kelly criterion
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
Returns
-------
pd.DataFrame
DataFrame of kelly criterion of the portfolio during different time periods
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.kelly(p)
"""
kc_period_df = metrics_model.get_kelly_criterion(
portfolio_engine.returns, portfolio_engine.portfolio_trades
)
return kc_period_df
@log_start_end(log=logger)
def get_payoff_ratio(portfolio_engine: PortfolioEngine) -> pd.DataFrame:
"""Get payoff ratio
Returns
-------
pd.DataFrame
DataFrame of payoff ratio of the portfolio during different time periods
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.payoff(p)
During some time periods there were no losing trades. Thus some values could not be calculated.
"""
pr_period_ratio = metrics_model.get_payoff_ratio(portfolio_engine.portfolio_trades)
return pr_period_ratio
@log_start_end(log=logger)
def get_profit_factor(portfolio_engine: PortfolioEngine) -> pd.DataFrame:
"""Get profit factor
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
Returns
-------
pd.DataFrame
DataFrame of profit factor of the portfolio during different time periods
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.metric.profitfactor(p)
During some time periods there were no losing trades. Thus some values could not be calculated.
"""
pf_period_df = metrics_model.get_profit_factor(portfolio_engine.portfolio_trades)
return pf_period_df
@log_start_end(log=logger)
def get_performance_vs_benchmark(
portfolio_engine: PortfolioEngine,
show_all_trades: bool = False,
) -> pd.DataFrame:
"""Get portfolio performance vs the benchmark
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
show_all_trades: bool
Whether to also show all trades made and their performance (default is False)
Returns
-------
pd.DataFrame
DataFrame with portfolio performance vs the benchmark
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.perf(p)
"""
portfolio_trades = portfolio_engine.portfolio_trades
benchmark_trades = portfolio_engine.benchmark_trades
portfolio_trades.index = pd.to_datetime(portfolio_trades["Date"].values)
benchmark_trades.index = pd.to_datetime(benchmark_trades["Date"].values)
if show_all_trades:
# Combine DataFrames
combined = pd.concat(
[
portfolio_trades[
["Date", "Ticker", "Portfolio Value", "Portfolio % Return"]
],
benchmark_trades[["Benchmark Value", "Benchmark % Return"]],
],
axis=1,
)
# Calculate alpha
combined["Alpha"] = (
combined["Portfolio % Return"] - combined["Benchmark % Return"]
)
combined["Date"] = pd.to_datetime(combined["Date"]).dt.date
return combined
# Calculate total value and return
total_investment_difference = (
portfolio_trades["Portfolio Investment"].sum()
- benchmark_trades["Benchmark Investment"].sum()
)
total_value_difference = (
portfolio_trades["Portfolio Value"].sum()
- benchmark_trades["Benchmark Value"].sum()
)
total_portfolio_return = (
portfolio_trades["Portfolio Value"].sum()
/ portfolio_trades["Portfolio Investment"].sum()
) - 1
total_benchmark_return = (
benchmark_trades["Benchmark Value"].sum()
/ benchmark_trades["Benchmark Investment"].sum()
) - 1
total_abs_return_difference = (
portfolio_trades["Portfolio Value"].sum()
- portfolio_trades["Portfolio Investment"].sum()
) - (
benchmark_trades["Benchmark Value"].sum()
- benchmark_trades["Benchmark Investment"].sum()
)
totals = pd.DataFrame.from_dict(
{
"Total Investment": [
portfolio_trades["Portfolio Investment"].sum(),
benchmark_trades["Benchmark Investment"].sum(),
total_investment_difference,
],
"Total Value": [
portfolio_trades["Portfolio Value"].sum(),
benchmark_trades["Benchmark Value"].sum(),
total_value_difference,
],
"Total % Return": [
f"{total_portfolio_return:.2%}",
f"{total_benchmark_return:.2%}",
f"{total_portfolio_return - total_benchmark_return:.2%}",
],
"Total Abs Return": [
portfolio_trades["Portfolio Value"].sum()
- portfolio_trades["Portfolio Investment"].sum(),
benchmark_trades["Benchmark Value"].sum()
- benchmark_trades["Benchmark Investment"].sum(),
total_abs_return_difference,
],
},
orient="index",
columns=["Portfolio", "Benchmark", "Difference"],
)
return totals.replace(0, "-")
@log_start_end(log=logger)
def get_var(
portfolio_engine: PortfolioEngine,
use_mean: bool = False,
adjusted_var: bool = False,
student_t: bool = False,
percentile: float = 99.9,
) -> pd.DataFrame:
"""Get portfolio VaR
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
use_mean: bool
if one should use the data mean return
adjusted_var: bool
if one should have VaR adjusted for skew and kurtosis (Cornish-Fisher-Expansion)
student_t: bool
If one should use the student-t distribution
percentile: float
var percentile (%)
Returns
-------
pd.DataFrame
DataFrame with portfolio VaR
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.var(p)
"""
return qa_model.get_var(
data=portfolio_engine.returns,
use_mean=use_mean,
adjusted_var=adjusted_var,
student_t=student_t,
percentile=percentile,
portfolio=True,
)
@log_start_end(log=logger)
def get_es(
portfolio_engine: PortfolioEngine,
use_mean: bool = False,
distribution: str = "normal",
percentile: float = 99.9,
) -> pd.DataFrame:
"""Get portfolio expected shortfall
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
use_mean:
if one should use the data mean return
distribution: str
choose distribution to use: logistic, laplace, normal
percentile: float
es percentile (%)
Returns
-------
pd.DataFrame
DataFrame with portfolio expected shortfall
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.es(p)
"""
return qa_model.get_es(
data=portfolio_engine.returns,
use_mean=use_mean,
distribution=distribution,
percentile=percentile,
portfolio=True,
)
@log_start_end(log=logger)
def get_omega(
portfolio_engine: PortfolioEngine,
threshold_start: float = 0,
threshold_end: float = 1.5,
) -> pd.DataFrame:
"""Get omega ratio
Parameters
----------
portfolio_engine: PortfolioEngine
PortfolioEngine class instance, this will hold transactions and perform calculations.
Use `portfolio.load` to create a PortfolioEngine.
threshold_start: float
annualized target return threshold start of plotted threshold range
threshold_end: float
annualized target return threshold end of plotted threshold range
Returns
-------
pd.DataFrame
DataFrame with portfolio omega ratio
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.load("openbb_terminal/miscellaneous/portfolio_examples/holdings/example.csv")
>>> output = openbb.portfolio.om(p)
"""
return qa_model.get_omega(
data=portfolio_engine.returns,
threshold_start=threshold_start,
threshold_end=threshold_end,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_model.py | 0.926166 | 0.396594 | portfolio_model.py | pypi |
__docformat__ = "numpy"
import argparse
import logging
from pathlib import Path
from typing import List
import pandas as pd
from openbb_terminal import feature_flags as obbff
from openbb_terminal.core.config.paths import USER_PORTFOLIO_DATA_DIRECTORY
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal.decorators import log_start_end
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.portfolio.portfolio_analysis import (
portfolio_model,
portfolio_view,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
portfolios_path = USER_PORTFOLIO_DATA_DIRECTORY / "portfolios"
port_types = [".csv", ".json", ".xlsx"]
possible_paths = {
portpath.name: portpath
for port_type in port_types
for portpath in portfolios_path.rglob(f"*.{port_type}")
}
possible_paths.update(
{
portpath.name: portpath
for port_type in port_types
for portpath in (Path(__file__).parent / "portfolios").rglob(f"*.{port_type}")
}
)
class PortfolioAnalysisController(BaseController):
"""Portfolio Controller"""
CHOICES_COMMANDS = [
"view",
"load",
"group",
]
PATH = "/portfolio/pa/"
def __init__(self, queue: List[str] = None):
super().__init__(queue)
self.portfolio_name = ""
self.portfolio = pd.DataFrame()
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = {c: {} for c in self.controller_choices}
choices["support"] = self.SUPPORT_CHOICES
choices["about"] = self.ABOUT_CHOICES
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
help_text = f"""[cmds]
view view available portfolios
load load portfolio from a file[/cmds]
[param]Portfolio: [/param]{self.portfolio_name}[cmds]
group view holdings grouped by parameter[/cmds]
"""
console.print(text=help_text, menu="Portfolio - Portfolio Analysis")
@log_start_end(log=logger)
def call_load(self, other_args):
"""Process load command"""
parser = argparse.ArgumentParser(
prog="load",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Function to get portfolio from predefined "
"csv/json/xlsx file inside portfolios folder",
epilog="usage: load file_name",
)
parser.add_argument(
"-s",
"--sector",
action="store_true",
default=False,
help="Add sector to dataframe",
dest="sector",
)
parser.add_argument(
"-c",
"--country",
action="store_true",
default=False,
help="Add country to dataframe",
dest="country",
)
parser.add_argument(
"--no_last_price",
action="store_false",
default=True,
help="Don't add last price from yfinance",
dest="last_price",
)
parser.add_argument(
"--nan",
action="store_true",
default=False,
help="Show nan entries",
dest="show_nan",
)
parser.add_argument(
"-p",
"--path",
default="my_portfolio.csv",
choices=possible_paths,
help="Path to portfolio file",
dest="path",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-p")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
self.portfolio_name = ns_parser.path
self.portfolio = portfolio_model.load_portfolio(
full_path=possible_paths[ns_parser.path],
sector=ns_parser.sector,
country=ns_parser.country,
last_price=ns_parser.last_price,
show_nan=ns_parser.show_nan,
)
if not self.portfolio.empty:
console.print(f"Successfully loaded: {self.portfolio_name}\n")
@log_start_end(log=logger)
def call_group(self, other_args):
"""Process group command"""
parser = argparse.ArgumentParser(
prog="group",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Displays portfolio grouped by a given column",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-g")
parser.add_argument(
"-g",
"--group",
type=str,
dest="group",
default="Ticker",
choices=self.portfolio.columns,
help="Column to group by",
)
parser.add_argument(
"-a",
"--allocation",
action="store_true",
default=False,
help="Add allocation column in % to dataframe",
dest="allocation",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if "value" in self.portfolio.columns:
portfolio_view.display_group_holdings(
portfolio=self.portfolio,
group_column=ns_parser.group,
allocation=ns_parser.allocation,
)
else:
console.print(
"'value' column not in portfolio. "
"Either add manually or load without --no_last_price flag\n"
)
@log_start_end(log=logger)
def call_view(self, other_args):
parser = argparse.ArgumentParser(
prog="view",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Show available portfolios to load.",
)
parser.add_argument(
"-format",
choices=["csv", "json", "xlsx", "all"],
help="Format of portfolios to view. 'csv' will show all csv files available, etc.",
default="all",
dest="file_format",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
available_ports = list(possible_paths)
if ns_parser.file_format != "all":
available_ports = [
port
for port in available_ports
if port.endswith(ns_parser.file_format)
]
console.print("\nAvailable Portfolios:\n")
for port in available_ports:
console.print(port) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_analysis/pa_controller.py | 0.591841 | 0.162247 | pa_controller.py | pypi |
__docformat__ = "numpy"
import difflib
from typing import List
from datetime import datetime, timedelta
import yfinance as yf
import pandas as pd
from openbb_terminal.stocks.sector_industry_analysis import financedatabase_model
def get_stocks(symbols: List[str], start: datetime) -> pd.DataFrame:
"""Gets historic data for list of tickers
Parameters
----------
symbols : List[str]
Tickers to get data for
start : datetime
First date in stock filtered dataframe
Returns
-------
data : pd.DataFrame
Historic daily prices for stocks
"""
df = yf.download(
tickers=symbols,
start=start - timedelta(days=365),
interval="1d",
progress=False,
)
df = df["Adj Close"]
if len(symbols) > 1:
df.columns = df.columns.str.lower()
arrays = [["Close" for _ in df.columns], [x.lower() for x in df.columns]]
tuples = list(zip(*arrays))
headers = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
df.columns = headers
if len(symbols) == 1:
df = df.to_frame(name=("Close", symbols[0]))
return df
def get_dividends(symbols: List[str]) -> pd.DataFrame:
"""Past dividends for list of tickers
Parameters
----------
symbols : List[str]
Tickers to get data for
Returns
-------
data : pd.DataFrame
Historic dividends for stocks
"""
dfs = []
for ticker in symbols:
tick = yf.Ticker(ticker)
df = tick.dividends.to_frame(name=("Dividend", ticker))
dfs.append(df)
return pd.concat(dfs)
def get_market(start: datetime, ticker: str = "SPY") -> pd.DataFrame:
"""Get historical data for market asset
Parameters
----------
ticker : str
Ticker to get data for
start : datetime
First date in stock filtered dataframe
Returns
-------
data : pd.DataFrame
Historic prices for SPY
"""
tick = yf.Ticker(ticker)
df = tick.history(
start=start - timedelta(days=365),
interval="1d",
)["Close"]
return df.to_frame(name=("Market", "Close"))
def get_country(ticker):
country = "NA"
data = yf.utils.get_json(f"https://finance.yahoo.com/quote/{ticker}")
if "summaryProfile" in data:
country = data["summaryProfile"]["country"]
if country not in financedatabase_model.get_countries():
similar_cmd = difflib.get_close_matches(
country,
financedatabase_model.get_countries(),
n=1,
cutoff=0.7,
)
if similar_cmd:
country = similar_cmd[0]
return country | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_analysis/yfinance_model.py | 0.837885 | 0.433622 | yfinance_model.py | pypi |
__docformat__ = "numpy"
from datetime import datetime
from typing import List
from reportlab.lib import colors
from reportlab.pdfgen import canvas
from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet
from reportlab.platypus import Paragraph, Table, TableStyle
def base_format(report: canvas.Canvas, header: str) -> None:
"""Applies a base page format to each page
Parameters
----------
report : canvas.Canvas
The report to be formatted
header : str
The header for the page
"""
report.setLineWidth(0.3)
report.setFont("Helvetica", 12)
report.drawString(30, 760, "OpenBB Terminal")
report.drawString(500, 760, datetime.now().strftime("%Y/%m/%d"))
report.drawString(275, 750, "Annual Report")
report.line(50, 730, 580, 730)
report.setFont("Helvetica", 20)
report.drawString(50, 705, header)
report.setFont("Helvetica", 12)
def draw_paragraph(
report: canvas.Canvas, msg: str, x: int, y: int, max_width: int, max_height: int
) -> None:
"""Draws a given paragraph
Parameters
----------
report : canvas.Canvas
The report to be formatted
msg : str
The contents of the paragraph
x : int
The x coordinate for the paragraph
y : int
The y coordinate for the paragraph
max_width : int
The maximum width allowed for the paragraph
max_height : int
The maximum height allowed for the paragraph
"""
message_style = ParagraphStyle("Normal")
message = msg.replace("\n", "<br />")
paragraph = Paragraph(message, style=message_style)
_, h = paragraph.wrap(max_width, max_height)
paragraph.drawOn(report, x, y - h)
def draw_table(
report: canvas.Canvas,
header_txt: str,
aW: int,
aH: int,
x: int,
data: List[List[str]],
) -> None:
"""Draw a table at given coordinates
Parameters
----------
report : canvas.Canvas
The report to be formatted
header_txt : str
The header for the table
aW : int
The width for the table
aH : int
The height for the table
x : int
The x coordinate for the table
data : List[List[str]]
Data to show
"""
style = getSampleStyleSheet()["BodyText"]
header = Paragraph(f"<bold><font size=14>{header_txt}</font></bold>", style)
t = Table(data)
t.setStyle(
TableStyle(
[
("BOX", (0, 0), (-1, -1), 0.25, colors.black),
("INNERGRID", (0, 0), (-1, -1), 0.25, colors.black),
]
)
)
for each in range(len(data)):
bg_color = colors.whitesmoke if each % 2 == 0 else colors.lightgrey
t.setStyle(TableStyle([("BACKGROUND", (0, each), (-1, each), bg_color)]))
_, h = header.wrap(aW, aH)
header.drawOn(report, x, aH)
aH = aH - h
_, h = t.wrap(aW, aH)
t.drawOn(report, x, aH - h) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_analysis/reportlab_helpers.py | 0.94672 | 0.29105 | reportlab_helpers.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
import yfinance as yf
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import print_rich_table
from openbb_terminal.portfolio.portfolio_analysis import yfinance_model
# pylint: disable=no-member,unsupported-assignment-operation,unsubscriptable-object
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def load_portfolio(
full_path: str,
sector: bool = False,
country: bool = False,
last_price: bool = False,
show_nan: bool = True,
) -> pd.DataFrame:
"""Loads a portfolio file into a dataframe and adds sector and last price
Parameters
----------
full_path : str
Path to portfolio file.
sector : bool, optional
Boolean to indicate getting sector from yfinance, by default False
country : bool, optional
Boolean to indicate getting country from yfinance, by default False
last_price : bool, optional
Boolean to indicate getting last price from yfinance, by default False
show_nan : bool, optional
Boolean to indicate dropping nan values, by default True
Returns
-------
pd.DataFrame
Dataframe containing portfolio
"""
if full_path.endswith(".csv"):
df = pd.read_csv(full_path)
elif full_path.endswith(".json"):
df = pd.read_json(full_path)
elif full_path.endswith(".xlsx"):
df = pd.read_excel(full_path, engine="openpyxl")
if sector:
df["sector"] = df.apply(
lambda row: yf.Ticker(row.Ticker).info["sector"]
if "sector" in yf.Ticker(row.Ticker).info.keys()
else "yf Other",
axis=1,
)
if country:
country_dict = {
tick: yfinance_model.get_country(tick) for tick in df.Ticker.unique()
}
df["Country"] = df["Ticker"].map(country_dict)
if last_price:
df["last_price"] = df.apply(
lambda row: yf.Ticker(row.Ticker)
.history(period="1d")["Close"][-1]
.round(2),
axis=1,
)
df["value"] = df["Shares"] * df["last_price"]
if not show_nan:
df = df.dropna(axis=1)
print_rich_table(df, title="Portfolio", headers=list(df.columns))
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_analysis/portfolio_model.py | 0.765067 | 0.30562 | portfolio_model.py | pypi |
__docformat__ = "numpy"
import argparse
from typing import Any
import pandas as pd
from openbb_terminal.portfolio.portfolio_optimization.statics import (
RISK_CHOICES,
OPTIMIZATION_PARAMETERS,
TERMINAL_TEMPLATE_MAP,
)
from openbb_terminal.rich_config import console
# These are all the possible yfinance properties
valid_property_infos = [
"previousClose",
"regularMarketOpen",
"twoHundredDayAverage",
"trailingAnnualDividendYield",
"payoutRatio",
"volume24Hr",
"regularMarketDayHigh",
"navPrice",
"averageDailyVolume10Day",
"totalAssets",
"regularMarketPreviousClose",
"fiftyDayAverage",
"trailingAnnualDividendRate",
"open",
"toCurrency",
"averageVolume10days",
"expireDate",
"yield",
"algorithm",
"dividendRate",
"exDividendDate",
"beta",
"circulatingSupply",
"regularMarketDayLow",
"priceHint",
"currency",
"trailingPE",
"regularMarketVolume",
"lastMarket",
"maxSupply",
"openInterest",
"marketCap",
"volumeAllCurrencies",
"strikePrice",
"averageVolume",
"priceToSalesTrailing12Months",
"dayLow",
"ask",
"ytdReturn",
"askSize",
"volume",
"fiftyTwoWeekHigh",
"forwardPE",
"fromCurrency",
"fiveYearAvgDividendYield",
"fiftyTwoWeekLow",
"bid",
"dividendYield",
"bidSize",
"dayHigh",
"annualHoldingsTurnover",
"enterpriseToRevenue",
"beta3Year",
"profitMargins",
"enterpriseToEbitda",
"52WeekChange",
"morningStarRiskRating",
"forwardEps",
"revenueQuarterlyGrowth",
"sharesOutstanding",
"fundInceptionDate",
"annualReportExpenseRatio",
"bookValue",
"sharesShort",
"sharesPercentSharesOut",
"fundFamily",
"lastFiscalYearEnd",
"heldPercentInstitutions",
"netIncomeToCommon",
"trailingEps",
"lastDividendValue",
"SandP52WeekChange",
"priceToBook",
"heldPercentInsiders",
"shortRatio",
"sharesShortPreviousMonthDate",
"floatShares",
"enterpriseValue",
"threeYearAverageReturn",
"lastSplitFactor",
"legalType",
"lastDividendDate",
"morningStarOverallRating",
"earningsQuarterlyGrowth",
"pegRatio",
"lastCapGain",
"shortPercentOfFloat",
"sharesShortPriorMonth",
"impliedSharesOutstanding",
"fiveYearAverageReturn",
"regularMarketPrice",
]
def check_valid_property_type(check_property: str) -> str:
"""Check that the property selected is valid"""
if check_property in valid_property_infos:
return check_property
raise argparse.ArgumentTypeError(f"{check_property} is not a valid info")
def dict_to_df(d: dict) -> pd.DataFrame:
"""Convert a dictionary to a DataFrame
Parameters
----------
d : dict
Dictionary to convert
Returns
-------
pd.DataFrame
DataFrame with dictionary
"""
if not d:
return pd.DataFrame()
df = pd.DataFrame.from_dict(data=d, orient="index", columns=["value"])
return df
def validate_risk_measure(risk_measure: str, warning: bool = True) -> str:
"""Check that the risk measure selected is valid
Parameters
----------
risk_measure : str
Risk measure to check
Returns
-------
str
Validated risk measure
"""
if risk_measure.lower() in RISK_CHOICES:
return RISK_CHOICES[risk_measure.lower()]
if warning:
console.print("[yellow]Risk measure not found. Using 'MV'.[/yellow]")
return "MV"
def get_kwarg(key: str, kwargs: dict, default: Any = None) -> Any:
"""Get a key from kwargs
If key is in kwargs, returns it.
Otherwise, if default provided, returns it.
Otherwise, if key is in OPTIMIZATION_PARAMETERS, returns it.
Parameters
----------
key : str
The key to be searched
kwargs : dict
The kwargs to be searched
default : Any
The default value to be returned if the key is not found
Returns
-------
Any
The value of the key if it exists, else None
"""
if key in kwargs:
return kwargs[key]
if default:
return default
# TODO: Remove this line when mapping between template and terminal is not needed
template_key = TERMINAL_TEMPLATE_MAP.get(key, key)
PARAMETER = OPTIMIZATION_PARAMETERS.get(template_key)
if PARAMETER is None:
return default
return PARAMETER.default | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_optimization/optimizer_helper.py | 0.815894 | 0.47244 | optimizer_helper.py | pypi |
__docformat__ = "numpy"
# pylint: disable=abstract-class-instantiated
import logging
import pandas as pd
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def load_configuration(excel_file: str = ""):
"""
Load in the Excel file to determine the configuration that needs to be set.
Parameters
----------
excel_file: str
The location of the Excel file that needs to be loaded.
Returns
-------
configuration: dictionary
Returns a dictionary with the configurations set.
"""
# Read in the Optimization template
df = pd.read_excel(
excel_file,
sheet_name="Optimization",
skiprows=2,
usecols="B:D",
names=["Parameter", "Value", "Description"],
index_col="Parameter",
)
# Remove completely empty NaN rows
cleaned_df = df.dropna(axis="rows", thresh=2)
# Filter out any general columns
filtered_df = cleaned_df[cleaned_df["Description"] != "Description"]
# Convert to Dictionary
configuration = filtered_df.to_dict()
return configuration["Value"], configuration["Description"]
@log_start_end(log=logger)
def load_allocation(excel_file: str = ""):
"""
Load in the Excel file to determine the allocation that needs to be set.
Parameters
----------
excel_file: str
The location of the Excel file that needs to be loaded.
Returns
-------
tickers: list
Returns a list with ticker symbols
categories: dictionary
Returns a dictionary that specifies each category
"""
if str(excel_file).endswith(".xlsx"):
categories = pd.read_excel(excel_file, sheet_name="Allocation", usecols="A:G")
categories = categories.dropna(axis="rows")
elif str(excel_file).endswith(".csv"):
categories = pd.read_excel(excel_file)
categories = categories.dropna(axis="rows")
else:
console.print("Only Excel (.xlsx and .csv) files are accepted.\n")
return [], {}
categories.columns = [
col.upper().strip().replace(" ", "_") for col in categories.columns
]
categories = categories.apply(lambda x: x.astype(str).str.upper())
categories = categories[~categories.index.duplicated(keep="first")]
try:
categories.set_index("TICKER", inplace=True)
categories.sort_index(inplace=True)
except KeyError:
console.print("Allocation table needs a TICKER column\n")
return [], {}
tickers = list(categories.index)
tickers.sort()
categories = categories.to_dict()
return tickers, categories
@log_start_end(log=logger)
def load_bl_views(excel_file: str = ""):
"""
Load a Excel file with views for Black Litterman model.
Parameters
----------
excel_file: str
The location of the Excel file that needs to be loaded.
Returns
-------
p_views: list
Returns a list with p_views matrix
q_views: list
Returns a list with q_views matrix
"""
if str(excel_file).endswith(".xlsx"):
try:
p_views = pd.read_excel(excel_file, sheet_name="p_views", index_col=0)
p_views = p_views.fillna(0)
p_views = p_views.dropna(axis="rows")
except KeyError:
console.print("Excel file needs a p_views sheet\n")
return {}, {}
try:
q_views = pd.read_excel(excel_file, sheet_name="q_views", index_col=0)
q_views = q_views.dropna(axis="rows")
except KeyError:
console.print("Excel file needs a p_views sheet\n")
return {}, {}
else:
console.print("Only Excel (.xlsx) files are accepted.\n")
return {}, {}
p_views = p_views.T.sort_index()
p_views = p_views.T.to_csv(index=False, header=0).replace("\n", ";")
p_views = p_views[:-1]
p_views = [[float(item) for item in row.split(",")] for row in p_views.split(";")]
q_views = q_views.to_csv(index=False, header=0).replace("\n", ",")
q_views = q_views[:-1]
q_views = [float(item) for item in q_views.split(",")]
return p_views, q_views
@log_start_end(log=logger)
def excel_bl_views(file: str, stocks: str, n: int = 3):
"""
Create an Excel file with required format to build n views for Black Litterman cmd.
Parameters
----------
stocks: str
List of stocks used to build the Black Litterman model views.
n: int
The number of views that will be created.
Returns
-------
file: excel
Returns a list with ticker symbols
"""
if len(stocks) < 2:
console.print("Please have at least 2 loaded tickers to create views.\n")
p_views = [[""] * len(stocks) for i in range(n)]
p_views_df = pd.DataFrame(p_views, columns=stocks)
q_views = [[""] for i in range(n)]
q_views_df = pd.DataFrame(q_views, columns=["Returns"])
if file.endswith(".xlsx"):
pass
else:
file += ".xlsx"
with pd.ExcelWriter(file) as writer:
p_views_df.to_excel(writer, sheet_name="p_views")
q_views_df.to_excel(writer, sheet_name="q_views") | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_optimization/excel_model.py | 0.802826 | 0.342352 | excel_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
import tempfile
from calendar import monthrange
from datetime import date
from typing import List
import numpy as np
import pandas as pd
import yfinance as yf
from dateutil.relativedelta import relativedelta, FR
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
# pylint: disable=R0912, E1101
yf_info_choices = [
"previousClose",
"regularMarketOpen",
"twoHundredDayAverage",
"trailingAnnualDividendYield",
"payoutRatio",
"volume24Hr",
"regularMarketDayHigh",
"navPrice",
"averageDailyVolume10Day",
"totalAssets",
"regularMarketPreviousClose",
"fiftyDayAverage",
"trailingAnnualDividendRate",
"open",
"toCurrency",
"averageVolume10days",
"expireDate",
"yield",
"algorithm",
"dividendRate",
"exDividendDate",
"beta",
"circulatingSupply",
"regularMarketDayLow",
"priceHint",
"currency",
"trailingPE",
"regularMarketVolume",
"lastMarket",
"maxSupply",
"openInterest",
"marketCap",
"volumeAllCurrencies",
"strikePrice",
"averageVolume",
"priceToSalesTrailing12Months",
"dayLow",
"ask",
"ytdReturn",
"askSize",
"volume",
"fiftyTwoWeekHigh",
"forwardPE",
"fromCurrency",
"fiveYearAvgDividendYield",
"fiftyTwoWeekLow",
"bid",
"dividendYield",
"bidSize",
"dayHigh",
"annualHoldingsTurnover",
"enterpriseToRevenue",
"beta3Year",
"profitMargins",
"enterpriseToEbitda",
"52WeekChange",
"morningStarRiskRating",
"forwardEps",
"revenueQuarterlyGrowth",
"sharesOutstanding",
"fundInceptionDate",
"annualReportExpenseRatio",
"bookValue",
"sharesShort",
"sharesPercentSharesOut",
"heldPercentInstitutions",
"netIncomeToCommon",
"trailingEps",
"lastDividendValue",
"SandP52WeekChange",
"priceToBook",
"heldPercentInsiders",
"shortRatio",
"sharesShortPreviousMonthDate",
"floatShares",
"enterpriseValue",
"fundFamily",
"threeYearAverageReturn",
"lastSplitFactor",
"legalType",
"lastDividendDate",
"morningStarOverallRating",
"earningsQuarterlyGrowth",
"pegRatio",
"lastCapGain",
"shortPercentOfFloat",
"sharesShortPriorMonth",
"impliedSharesOutstanding",
"fiveYearAverageReturn",
"regularMarketPrice",
]
@log_start_end(log=logger)
def process_stocks(
symbols: List[str], interval: str = "3mo", start_date: str = "", end_date: str = ""
) -> pd.DataFrame:
"""Get adjusted closing price for each stock in the list
Parameters
----------
symbols: List[str]
List of tickers to get historical data for
interval: str
interval to get data from yfinance, personalized
start_date: str
If not using interval, start date string (YYYY-MM-DD)
end_date: str
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
Returns
-------
stock_closes: DataFrame
DataFrame containing daily (adjusted) close prices for each stock in list
"""
period_choices = [
"1d",
"5d",
"1mo",
"3mo",
"6mo",
"1y",
"2y",
"5y",
"10y",
"ytd",
"max",
]
directory = "gst_temp_files"
parent_dir = tempfile.gettempdir()
path = os.path.join(parent_dir, directory)
if os.path.isdir(path) is False:
os.mkdir(path)
stock_closes = None
if start_date != "":
if end_date == "":
end_ = date.today()
else:
end_ = date.fromisoformat(end_date)
# Check if end date is on weekend
if end_.weekday() >= 5:
end_ = end_ + relativedelta(weekday=FR(-1))
end_date = end_.strftime("%Y-%m-%d")
# Creating temporal file name
name = os.path.join(path, "Stocks " + start_date + " to " + end_date + ".pkl")
# Checking if exist
if os.path.exists(name):
stock_closes_0 = pd.read_pickle(name)
list_of_stocks_0 = list(set(symbols) - set(stock_closes_0.columns))
else:
stock_closes_0 = None
list_of_stocks_0 = symbols
# Download assets that are not in temporal file
if list_of_stocks_0 == []:
stock_closes = stock_closes_0.copy()
else:
stock_prices = yf.download(
list_of_stocks_0,
start=start_date,
end=end_date,
progress=False,
group_by="ticker",
)
else:
if interval in period_choices:
# Setting temporal file name
name = os.path.join(
path,
"Stocks " + interval + " " + date.today().strftime("%Y-%m-%d") + ".pkl",
)
# Creating if exist
if os.path.exists(name):
stock_closes_0 = pd.read_pickle(name)
list_of_stocks_0 = list(set(symbols) - set(stock_closes_0.columns))
else:
stock_closes_0 = None
list_of_stocks_0 = symbols
# Download assets that are not in temporal file
if list_of_stocks_0 == []:
stock_closes = stock_closes_0.copy()
else:
stock_prices = yf.download(
list_of_stocks_0, period=interval, progress=False, group_by="ticker"
)
else:
end_ = date.today()
if end_.weekday() >= 5:
end_ = end_ + relativedelta(weekday=FR(-1))
if interval.find("d") >= 1:
days = int(interval[:-1])
start_ = end_ - relativedelta(days=days)
elif interval.find("w") >= 1:
weeks = int(interval[:-1])
start_ = end_ - relativedelta(weeks=weeks)
elif interval.find("mo") >= 1:
months = int(interval[:-2])
start_ = end_ - relativedelta(months=months)
elif interval.find("y") >= 1:
years = int(interval[:-1])
start_ = end_ - relativedelta(years=years)
else:
# console.print(
# "Please use an adequate interval."
# )
return None
start_date = start_.strftime("%Y-%m-%d")
end_date = end_.strftime("%Y-%m-%d")
# Creating temporal file name
name = os.path.join(
path, "Stocks " + start_date + " to " + end_date + ".pkl"
)
# Checking if temporal file exists
if os.path.exists(name):
stock_closes_0 = pd.read_pickle(name)
list_of_stocks_0 = list(set(symbols) - set(stock_closes_0.columns))
else:
stock_closes_0 = None
list_of_stocks_0 = symbols
# Download assets that are not in temporal file
if list_of_stocks_0 == []:
stock_closes = stock_closes_0.copy()
else:
stock_prices = yf.download(
list_of_stocks_0,
start=start_date,
end=end_date,
progress=False,
group_by="ticker",
)
if stock_closes is None:
if len(list_of_stocks_0) == 1:
stock_closes = stock_prices.loc[:, ["Adj Close"]]
stock_closes.columns = list_of_stocks_0
else:
stock_closes = stock_prices.loc[:, (slice(None), "Adj Close")]
stock_closes.columns = stock_closes.columns.get_level_values(0)
if list_of_stocks_0 != []:
stock_closes = pd.concat([stock_closes, stock_closes_0], axis=1)
stock_closes.to_pickle(name)
stock_closes = stock_closes[symbols]
return stock_closes
@log_start_end(log=logger)
def process_returns(
data: pd.DataFrame,
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
) -> pd.DataFrame:
"""Process stock prices to calculate returns and delete outliers
Parameters
----------
data: pd.DataFrame
DataFrame of stock prices
log_returns: bool
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str or int
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: str or float
Max percentage of nan values accepted per asset to be included in
returns
threshold: str or float
Value used to replace outliers that are higher to threshold in daily returns.
method: str
Method used to fill nan values. Default value is 'time'. For more information see
`interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`_.
Returns
-------
stock_returns: DataFrame
DataFrame containing daily (adjusted) close prices for each stock in list
"""
# Interpolate nan values
stock_returns = data.copy()
stock_returns = stock_returns.set_index(pd.DatetimeIndex(stock_returns.index))
stock_returns.interpolate(method=method, axis=0, inplace=True)
# Select stocks with low number of nans
selected_stocks = np.isnan(stock_returns).sum(axis=0)
selected_stocks = np.where(selected_stocks <= maxnan * stock_returns.shape[0])[0]
stock_returns = stock_returns.iloc[:, selected_stocks]
# Replace values above and below threshold
if threshold > 0:
stock_returns = stock_returns.pct_change()
stock_returns.mask(stock_returns > threshold, threshold, inplace=True)
stock_returns.mask(stock_returns < -threshold, -threshold, inplace=True)
s = stock_returns.isna().idxmin().tolist()
j = 0
for i in s:
stock_returns.iloc[stock_returns.index.get_loc(i) - 1, j] = 0
j += 1
stock_returns = stock_returns + 1
stock_returns = stock_returns.cumprod()
# Change the frequency of the data
if freq.upper() == "D":
pass
elif freq.upper() in ["W", "M"]:
last_day = stock_returns.index[-1]
stock_returns = stock_returns.resample(freq).last()
if freq.upper() == ["W"]:
if last_day.weekday() < 4:
stock_returns = stock_returns.iloc[:-1, :]
if freq.upper() == ["M"]:
if monthrange(last_day.year, last_day.month)[1] - last_day.day <= 5:
stock_returns = stock_returns.iloc[:-1, :]
# Calculate returns
if log_returns is True:
stock_returns = np.log(stock_returns)
stock_returns = stock_returns.diff().dropna()
else:
stock_returns = stock_returns.pct_change().dropna()
return stock_returns | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_optimization/yahoo_finance_model.py | 0.747616 | 0.378804 | yahoo_finance_model.py | pypi |
from typing import List, Optional
from openbb_terminal.helper_funcs import get_rf
from openbb_terminal.portfolio.portfolio_optimization.parameters.Parameter import (
Parameter,
)
PERIOD_CHOICES = [
"1d",
"5d",
"1mo",
"3mo",
"6mo",
"1y",
"2y",
"3y",
"5y",
"10y",
"ytd",
"max",
]
MEAN_RISK_CHOICES = [
"MV",
"MAD",
"MSV",
"FLPM",
"SLPM",
"CVaR",
"EVaR",
"WR",
"ADD",
"UCI",
"CDaR",
"EDaR",
"MDD",
]
RISK_PARITY_CHOICES = [
"MV",
"MAD",
"MSV",
"FLPM",
"SLPM",
"CVaR",
"EVaR",
"CDaR",
"EDaR",
"UCI",
]
REL_RISK_PARITY_CHOICES = [
"A",
"B",
"C",
]
HCP_CHOICES = [
"MV",
"MAD",
"GMD",
"MSV",
"VaR",
"CVaR",
"TG",
"EVaR",
"RG",
"CVRG",
"TGRG",
"WR",
"FLPM",
"SLPM",
"MDD",
"ADD",
"DaR",
"CDaR",
"EDaR",
"UCI",
"MDD_Rel",
"ADD_Rel",
"DaR_Rel",
"CDaR_Rel",
"EDaR_Rel",
"UCI_Rel",
]
RISK_CHOICES = {
"mv": "MV",
"mad": "MAD",
"gmd": "GMD",
"msv": "MSV",
"var": "VaR",
"cvar": "CVaR",
"tg": "TG",
"evar": "EVaR",
"rg": "RG",
"cvrg": "CVRG",
"tgrg": "TGRG",
"wr": "WR",
"flpm": "FLPM",
"slpm": "SLPM",
"mdd": "MDD",
"add": "ADD",
"dar": "DaR",
"cdar": "CDaR",
"edar": "EDaR",
"uci": "UCI",
"mdd_rel": "MDD_Rel",
"add_rel": "ADD_Rel",
"dar_rel": "DaR_Rel",
"cdar_rel": "CDaR_Rel",
"edar_rel": "EDaR_Rel",
"uci_rel": "UCI_Rel",
}
MEAN_CHOICES = [
"hist",
"ewma1",
"ewma2",
]
CODEPENDENCE_CHOICES = [
"pearson",
"spearman",
"abs_pearson",
"abs_spearman",
"distance",
"mutual_info",
"tail",
]
COVARIANCE_CHOICES = [
"hist",
"ewma1",
"ewma2",
"ledoit",
"oas",
"shrunk",
"gl",
"jlogo",
"fixed",
"spectral",
"shrink",
]
OBJECTIVE_CHOICES = [
"MinRisk",
"Utility",
"Sharpe",
"MaxRet",
]
NCO_OBJECTIVE_CHOICES = [
"MinRisk",
"Utility",
"Sharpe",
"ERC",
]
LINKAGE_CHOICES = [
"single",
"complete",
"average",
"weighted",
"centroid",
"median",
"ward",
"dbht",
]
BINS_CHOICES = [
"KN",
"FD",
"SC",
"HGR",
]
FREQ_CHOICES = [
"d",
"w",
"m",
]
METHOD_CHOICES = [
"linear",
"time",
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"barycentric",
]
TIME_FACTOR = {
"D": 252.0,
"W": 52.0,
"M": 12.0,
}
RISK_NAMES = {
"mv": "volatility",
"mad": "mean absolute deviation",
"gmd": "gini mean difference",
"msv": "semi standard deviation",
"var": "value at risk (VaR)",
"cvar": "conditional value at risk (CVaR)",
"tg": "tail gini",
"evar": "entropic value at risk (EVaR)",
"rg": "range",
"cvrg": "CVaR range",
"tgrg": "tail gini range",
"wr": "worst realization",
"flpm": "first lower partial moment",
"slpm": "second lower partial moment",
"mdd": "maximum drawdown uncompounded",
"add": "average drawdown uncompounded",
"dar": "drawdown at risk (DaR) uncompounded",
"cdar": "conditional drawdown at risk (CDaR) uncompounded",
"edar": "entropic drawdown at risk (EDaR) uncompounded",
"uci": "ulcer index uncompounded",
"mdd_rel": "maximum drawdown compounded",
"add_rel": "average drawdown compounded",
"dar_rel": "drawdown at risk (DaR) compounded",
"cdar_rel": "conditional drawdown at risk (CDaR) compounded",
"edar_rel": "entropic drawdown at risk (EDaR) compounded",
"uci_rel": "ulcer index compounded",
}
DRAWDOWNS = [
"MDD",
"ADD",
"DaR",
"CDaR",
"EDaR",
"UCI",
"MDD_Rel",
"ADD_Rel",
"DaR_Rel",
"CDaR_Rel",
"EDaR_Rel",
"UCI_Rel",
]
NAN_FILL_METHOD_CHOICES = [
"linear",
"time",
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"barycentric",
]
TECHNIQUE_CHOICES = [
"maxsharpe",
"minrisk",
"maxutil",
"maxret",
"maxdiv",
"maxdecorr",
"ef",
"equal",
"mktcap",
"dividend",
"blacklitterman",
"riskparity",
"relriskparity",
"hrp",
"herc",
"nco",
]
# Parameter template files
OPTIMIZATION_PARAMETERS = {
"historic_period": Parameter(
name="historic_period",
type_=str,
default="3y",
choices=PERIOD_CHOICES,
),
"start_period": Parameter(
name="start_period",
type_=str,
default="",
),
"end_period": Parameter(
name="end_period",
type_=str,
default="",
),
"log_returns": Parameter(
name="log_returns",
type_=bool,
default=False,
),
"return_frequency": Parameter(
name="return_frequency",
type_=str,
default="d",
choices=FREQ_CHOICES,
),
"max_nan": Parameter(
name="max_nan",
type_=float,
default=0.05,
),
"threshold_value": Parameter(
name="threshold_value",
type_=float,
default=0.3,
),
"nan_fill_method": Parameter(
name="nan_fill_method",
type_=str,
default="time",
choices=NAN_FILL_METHOD_CHOICES,
),
"risk_free": Parameter(
name="risk_free",
type_=float,
default=get_rf(),
),
"significance_level": Parameter(
name="significance_level",
type_=float,
default=0.05,
),
"technique": Parameter(
name="technique",
type_=str,
default="maxsharpe",
choices=TECHNIQUE_CHOICES,
),
"risk_measure": Parameter(
name="risk_measure",
type_=str,
default="MV",
choices=MEAN_RISK_CHOICES + RISK_PARITY_CHOICES + HCP_CHOICES,
),
"target_return": Parameter(
name="target_return",
type_=float,
default=-1.0,
),
"target_risk": Parameter(
name="target_risk",
type_=float,
default=-1.0,
),
"expected_return": Parameter(
name="expected_return",
type_=str,
default="hist",
choices=MEAN_CHOICES,
),
"covariance": Parameter(
name="covariance",
type_=str,
default="hist",
choices=COVARIANCE_CHOICES,
),
"smoothing_factor_ewma": Parameter(
name="smoothing_factor_ewma",
type_=float,
default=0.94,
),
"long_allocation": Parameter(
name="long_allocation",
type_=float,
default=1.0,
),
"short_allocation": Parameter(
name="short_allocation",
type_=float,
default=0.0,
),
"risk_aversion": Parameter(
name="risk_aversion",
type_=float,
default=1.0,
),
"amount_portfolios": Parameter(
name="amount_portfolios",
type_=int,
default=100,
),
"random_seed": Parameter(
name="random_seed",
type_=int,
default=123,
),
"tangency": Parameter(
name="tangency",
type_=bool,
default=False,
),
"risk_contribution": Parameter(
name="risk_contribution",
type_=Optional[List[str]],
default=None,
),
"risk_parity_model": Parameter(
name="risk_parity_model",
type_=str,
default="A",
choices=REL_RISK_PARITY_CHOICES,
),
"penal_factor": Parameter(
name="penal_factor",
type_=float,
default=1.0,
),
"p_views": Parameter(
name="p_views",
type_=Optional[List[List[float]]],
default=None,
),
"q_views": Parameter(
name="q_views",
type_=Optional[List[List[float]]],
default=None,
),
"delta": Parameter(
name="delta",
type_=Optional[float],
default=None,
),
"equilibrium": Parameter(
name="equilibrium",
type_=bool,
default=True,
),
"optimize": Parameter(
name="optimize",
type_=bool,
default=True,
),
"co_dependence": Parameter(
name="co_dependence",
type_=str,
default="pearson",
choices=CODEPENDENCE_CHOICES,
),
"cvar_simulations_losses": Parameter(
name="cvar_simulations_losses",
type_=int,
default=100,
),
"cvar_simulations_gains": Parameter(
name="cvar_simulations_gains",
type_=Optional[int],
default=None,
),
"cvar_significance": Parameter(
name="cvar_significance",
type_=Optional[float],
default=None,
),
"linkage": Parameter(
name="linkage",
type_=str,
default="single",
choices=LINKAGE_CHOICES,
),
"amount_clusters": Parameter(
name="amount_clusters",
type_=Optional[int],
default=None,
),
"max_clusters": Parameter(
name="max_clusters",
type_=int,
default=10,
),
"amount_bins": Parameter(
name="amount_bins",
type_=str,
default="KN",
choices=BINS_CHOICES,
),
"alpha_tail": Parameter(
name="alpha_tail",
type_=float,
default=0.05,
),
"leaf_order": Parameter(
name="leaf_order",
type_=bool,
default=True,
),
"objective": Parameter(
name="objective",
type_=str,
default="MinRisk",
choices=OBJECTIVE_CHOICES + NCO_OBJECTIVE_CHOICES,
),
}
# Model functions
# TODO: The dictionary below should not be needed, we can input defaults in the Parameter directly
# But since template variable names are different from functions argument variable names
# we need to map them before removing this dictionary. When we standardize the names, we can remove this.
TERMINAL_TEMPLATE_MAP = {
"a_sim": "cvar_simulations_losses",
"alpha": "significance_level",
"alpha_tail": "alpha_tail",
"n_portfolios": "amount_portfolios",
"b_sim": "cvar_simulations_gains",
"beta": "cvar_significance",
"bins_info": "amount_bins",
"codependence": "co_dependence",
"covariance": "covariance",
"d_ewma": "smoothing_factor_ewma",
"delta": "delta",
"end_date": "end_period",
"equilibrium": "equilibrium",
"freq": "return_frequency",
"interval": "historic_period",
"k": "amount_clusters",
"leaf_order": "leaf_order",
"linkage": "linkage",
"log_returns": "log_returns",
"max_k": "max_clusters",
"maxnan": "max_nan",
"mean": "expected_return",
"method": "nan_fill_method",
"objective": "objective",
"optimize": "optimize",
"p_views": "p_views",
"penal_factor": "penal_factor",
"q_views": "q_views",
"seed": "random_seed",
"risk_aversion": "risk_aversion",
"risk_cont": "risk_contribution",
"risk_free_rate": "risk_free",
"risk_measure": "risk_measure",
"version": "risk_parity_model",
"start_date": "start_period",
"tangency": "tangency",
"target_return": "target_return",
"target_risk": "target_risk",
"technique": "technique",
"threshold": "threshold_value",
"value": "long_allocation",
"value_short": "short_allocation",
} | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_optimization/statics.py | 0.84367 | 0.395222 | statics.py | pypi |
__docformat__ = "numpy"
# pylint: disable=too-many-lines,too-many-instance-attributes
import argparse
import logging
from typing import List, Dict, Tuple
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal import feature_flags as obbff
from openbb_terminal import parent_classes
from openbb_terminal.core.config.paths import (
MISCELLANEOUS_DIRECTORY,
USER_EXPORTS_DIRECTORY,
USER_PORTFOLIO_DATA_DIRECTORY,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
check_non_negative,
get_rf,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.portfolio.portfolio_optimization import excel_model
from openbb_terminal.portfolio.portfolio_optimization import (
optimizer_helper,
optimizer_model,
optimizer_view,
statics,
)
from openbb_terminal.portfolio.portfolio_optimization import yahoo_finance_model
from openbb_terminal.portfolio.portfolio_optimization.parameters import (
params_controller,
params_view,
)
from openbb_terminal.rich_config import console, MenuText
logger = logging.getLogger(__name__)
def add_arguments(parser_update, parser, not_in_list):
parser_dict = vars(parser)
for i in parser_dict["_actions"]:
data_dict = vars(i)
variables = list(data_dict.keys())
if variables[0] == "option_strings" and data_dict["dest"] not in not_in_list:
args = [data_dict["option_strings"][0] + "-sa"]
if len(data_dict["option_strings"]) == 2:
args.append(data_dict["option_strings"][1] + "-sa")
if len(data_dict["option_strings"]) in [1, 2]:
parser_update.add_argument(
*args,
type=data_dict["type"],
default=data_dict["default"],
dest=data_dict["dest"] + "_sa",
choices=data_dict["choices"],
help=data_dict["help"],
)
def check_input(
input_type: str, input_list: List[str], available_list: List[str]
) -> List[str]:
"""Check if input is valid
Parameters
----------
input_type : str
Type of input
input_list : List[str]
List of input
available_list : List[str]
List of available input
Returns
-------
List[str]
Valid categories
"""
valid: List[str] = []
for i in input_list:
if i in available_list:
valid.append(i)
else:
console.print(f"[red]{input_type} '{i}' not available.[/red]\n")
return valid
def get_valid_portfolio_categories(
input_portfolios: List[str],
available_portfolios: Dict,
input_categories: List[str],
available_categories: Dict,
) -> Tuple[List[str], List[str]]:
"""Get valid portfolios and categories
Parameters
----------
input_portfolios : List[str]
List of input portfolios
available_portfolios : Dict
Dict of available portfolios
input_categories : List[str]
List of input categories
available_categories : Dict
Dict of available categories
Returns
-------
Tuple[List[str], List[str]]
Valid portfolios and categories
"""
portfolios_list = list(set(available_portfolios.keys()))
categories_list = list(set(available_categories.keys()))
if not portfolios_list:
portfolio_msg = "None. Perform some optimization to build a portfolio."
else:
if not input_portfolios:
console.print("[yellow]Please select at least one portfolio.[/yellow]\n")
portfolio_msg = ", ".join(portfolios_list)
if not categories_list:
categories_msg = "None. Attribute some categories in the loaded file."
else:
categories_msg = ", ".join(categories_list)
console.print(
f"[yellow]Current Portfolios: [/yellow]{portfolio_msg}\n",
)
console.print(
f"[yellow]Current Categories: [/yellow]{categories_msg}\n",
)
valid_portfolios = check_input(
input_type="Portfolio",
input_list=input_portfolios,
available_list=portfolios_list,
)
valid_categories = check_input(
input_type="Category",
input_list=input_categories,
available_list=categories_list,
)
return valid_portfolios, valid_categories
class PortfolioOptimizationController(BaseController):
"""Portfolio Optimization Controller class"""
DEFAULT_PORTFOLIO_DIRECTORY = MISCELLANEOUS_DIRECTORY / "portfolio_examples"
DEFAULT_ALLOCATION_DIRECTORY = DEFAULT_PORTFOLIO_DIRECTORY / "allocation"
DEFAULT_OPTIMIZATION_DIRECTORY = DEFAULT_PORTFOLIO_DIRECTORY / "optimization"
FILE_TYPE_LIST = ["xlsx", "ini"]
CHOICES_COMMANDS = [
"show",
"rpf",
"load",
"plot",
"equal",
"mktcap",
"dividend",
"property",
"maxsharpe",
"minrisk",
"maxutil",
"maxret",
"maxdiv",
"maxdecorr",
"blacklitterman",
"riskparity",
"relriskparity",
"hrp",
"herc",
"nco",
"ef",
"file",
]
CHOICES_MENUS = ["params"]
PATH = "/portfolio/po/"
CHOICES_GENERATION = True
files_available: List = list()
@classmethod
def build_allocation_file_map(cls) -> dict:
allocation_file_map = {
filepath.name: filepath
for file_type in cls.FILE_TYPE_LIST
for filepath in cls.DEFAULT_ALLOCATION_DIRECTORY.rglob(f"*.{file_type}")
}
allocation_file_map.update(
{
filepath.name: filepath
for file_type in cls.FILE_TYPE_LIST
for filepath in (USER_PORTFOLIO_DATA_DIRECTORY / "allocation").rglob(
f"*.{file_type}"
)
}
)
return allocation_file_map
@classmethod
def build_optimization_file_map(cls) -> dict:
optimization_file_map = {
filepath.name: filepath
for file_type in cls.FILE_TYPE_LIST
for filepath in cls.DEFAULT_OPTIMIZATION_DIRECTORY.rglob(f"*.{file_type}")
}
optimization_file_map.update(
{
filepath.name: filepath
for file_type in cls.FILE_TYPE_LIST
for filepath in (USER_PORTFOLIO_DATA_DIRECTORY / "optimization").rglob(
f"*.{file_type}"
)
}
)
return optimization_file_map
def __init__(
self,
tickers: List[str] = None,
portfolios: Dict = None,
categories: Dict = None,
queue: List[str] = None,
):
"""Constructor"""
super().__init__(queue)
self.current_model = None
if tickers:
self.tickers = list(set(tickers))
self.tickers.sort()
else:
self.tickers = list()
if portfolios:
self.portfolios = dict(portfolios)
else:
self.portfolios = dict()
if categories:
self.categories = dict(categories)
self.available_categories = list(self.categories.keys())
else:
self.categories = dict()
self.available_categories = list()
self.count = 0
self.current_portfolio = ""
self.allocation_file_map = self.build_allocation_file_map()
self.optimization_file_map = self.build_optimization_file_map()
self.current_file = ""
self.params: Dict = {}
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
self.choices = choices
self.completer = NestedCompleter.from_nested_dict(choices)
def update_runtime_choices(self):
if session and obbff.USE_PROMPT_TOOLKIT:
if self.portfolios:
self.choices["show"]["--portfolios"] = {
c: {} for c in list(self.portfolios.keys())
}
self.choices["rpf"]["--portfolios"] = {
c: {} for c in list(self.portfolios.keys())
}
self.choices["plot"]["--portfolios"] = {
c: {} for c in list(self.portfolios.keys())
}
self.completer = NestedCompleter.from_nested_dict(self.choices)
def print_help(self):
"""Print help"""
mt = MenuText("portfolio/po/")
mt.add_cmd("load")
mt.add_raw("\n")
mt.add_param("_loaded", self.current_portfolio or "")
mt.add_raw("\n")
mt.add_param("_tickers", ", ".join(self.tickers))
mt.add_param("_categories", ", ".join(self.available_categories))
mt.add_raw("\n")
mt.add_cmd("file")
mt.add_menu("params")
mt.add_raw("\n")
mt.add_param("_parameter", self.current_file)
mt.add_raw("\n")
mt.add_info("_mean_risk_optimization_")
mt.add_cmd("maxsharpe", self.tickers)
mt.add_cmd("minrisk", self.tickers)
mt.add_cmd("maxutil", self.tickers)
mt.add_cmd("maxret", self.tickers)
mt.add_cmd("maxdiv", self.tickers)
mt.add_cmd("maxdecorr", self.tickers)
mt.add_cmd("blacklitterman", self.tickers)
mt.add_cmd("ef", self.tickers)
mt.add_raw("\n")
mt.add_info("_risk_parity_optimization_")
mt.add_cmd("riskparity", self.tickers)
mt.add_cmd("relriskparity", self.tickers)
mt.add_raw("\n")
mt.add_info("_hierarchical_clustering_models_")
mt.add_cmd("hrp", self.tickers)
mt.add_cmd("herc", self.tickers)
mt.add_cmd("nco", self.tickers)
mt.add_raw("\n")
mt.add_info("_other_optimization_techniques_")
mt.add_cmd("equal", self.tickers)
mt.add_cmd("mktcap", self.tickers)
mt.add_cmd("dividend", self.tickers)
mt.add_cmd("property", self.tickers)
mt.add_raw("\n")
mt.add_param("_optimized_portfolio", ", ".join(self.portfolios.keys()))
mt.add_raw("\n")
mt.add_cmd("rpf", bool(self.portfolios.keys()))
mt.add_cmd("show", bool(self.portfolios.keys()))
mt.add_cmd("plot", bool(self.portfolios.keys()))
console.print(text=mt.menu_text, menu="Portfolio - Portfolio Optimization")
# pylint: disable=too-many-arguments
def po_parser(
self,
parser,
rm: bool = False,
mt: bool = False,
ct: bool = False,
p: bool = False,
s: bool = False,
e: bool = False,
lr: bool = False,
freq: bool = False,
mn: bool = False,
th: bool = False,
r: bool = False,
a: bool = False,
v: bool = True,
name: str = "",
):
"""Holds common parser arguments to eliminate repetition"""
if rm:
parser.add_argument(
"-rm",
"--risk-measure",
default=self.params["risk_measure"]
if "risk_measure" in self.params
else "MV",
dest="risk_measure",
help="""Risk measure used to optimize the portfolio. Possible values are:
'MV' : Variance
'MAD' : Mean Absolute Deviation
'MSV' : Semi Variance (Variance of negative returns)
'FLPM' : First Lower Partial Moment
'SLPM' : Second Lower Partial Moment
'CVaR' : Conditional Value at Risk
'EVaR' : Entropic Value at Risk
'WR' : Worst Realization
'ADD' : Average Drawdown of uncompounded returns
'UCI' : Ulcer Index of uncompounded returns
'CDaR' : Conditional Drawdown at Risk of uncompounded returns
'EDaR' : Entropic Drawdown at Risk of uncompounded returns
'MDD' : Maximum Drawdown of uncompounded returns
""",
choices=statics.MEAN_RISK_CHOICES,
)
if mt:
parser.add_argument(
"-mt",
"--method",
default=self.params["nan_fill_method"]
if "nan_fill_method" in self.params
else "time",
dest="nan_fill_method",
help="""Method used to fill nan values in time series, by default time.
Possible values are:
'linear': linear interpolation
'time': linear interpolation based on time index
'nearest': use nearest value to replace nan values
'zero': spline of zeroth order
'slinear': spline of first order
'quadratic': spline of second order
'cubic': spline of third order
'barycentric': builds a polynomial that pass for all points""",
choices=statics.METHOD_CHOICES,
metavar="METHOD",
)
if ct:
parser.add_argument(
"-ct",
"--categories",
dest="categories",
type=lambda s: [str(item).upper() for item in s.split(",")],
default=self.available_categories,
help="Show selected categories",
)
if p:
parser.add_argument(
"-p",
"--period",
default=self.params["historic_period"]
if "historic_period" in self.params
else "3y",
dest="historic_period",
help="""Period to get yfinance data from.
Possible frequency strings are:
'd': means days, for example '252d' means 252 days
'w': means weeks, for example '52w' means 52 weeks
'mo': means months, for example '12mo' means 12 months
'y': means years, for example '1y' means 1 year
'ytd': downloads data from beginning of year to today
'max': downloads all data available for each asset""",
choices=statics.PERIOD_CHOICES,
metavar="PERIOD",
)
if s:
parser.add_argument(
"-s",
"--start",
default=self.params["start_period"]
if "start_period" in self.params
else "",
dest="start_period",
help="""Start date to get yfinance data from. Must be in
'YYYY-MM-DD' format""",
)
if e:
parser.add_argument(
"-e",
"--end",
default=self.params["end_period"]
if "end_period" in self.params
else "",
dest="end_period",
help="""End date to get yfinance data from. Must be in
'YYYY-MM-DD' format""",
)
if lr:
parser.add_argument(
"-lr",
"--log-returns",
action="store_true",
default=self.params["log_returns"]
if "log_returns" in self.params
else False,
dest="log_returns",
help="If use logarithmic or arithmetic returns to calculate returns",
)
if freq:
parser.add_argument(
"--freq",
default=self.params["return_frequency"]
if "return_frequency" in self.params
else "d",
dest="return_frequency",
help="""Frequency used to calculate returns. Possible values are:
'd': for daily returns
'w': for weekly returns
'm': for monthly returns
""",
choices=statics.FREQ_CHOICES,
)
if mn:
parser.add_argument(
"-mn",
"--maxnan",
type=float,
default=self.params["max_nan"] if "max_nan" in self.params else 0.05,
dest="max_nan",
help="""Max percentage of nan values accepted per asset to be
considered in the optimization process""",
)
if th:
parser.add_argument(
"-th",
"--threshold",
type=float,
default=self.params["threshold_value"]
if "threshold_value" in self.params
else 0.30,
dest="threshold_value",
help="""Value used to replace outliers that are higher to threshold
in absolute value""",
)
if r:
parser.add_argument(
"-r",
"--risk-free-rate",
type=float,
dest="risk_free",
default=self.params["risk_free"]
if "risk_free" in self.params
else get_rf(),
help="""Risk-free rate of borrowing/lending. The period of the
risk-free rate must be annual""",
)
if a:
parser.add_argument(
"-a",
"--alpha",
type=float,
default=self.params["significance_level"]
if "significance_level" in self.params
else 0.05,
dest="significance_level",
help="Significance level of CVaR, EVaR, CDaR and EDaR",
)
if v:
parser.add_argument(
"-v",
"--value",
default=self.params["long_allocation"]
if "long_allocation" in self.params
else 1,
type=float,
dest="long_allocation",
help="Amount to allocate to portfolio",
)
if name:
parser.add_argument(
"--name",
type=str,
dest="name",
default=name + str(self.count),
help="Save portfolio with personalized or default name",
)
return parser
def custom_reset(self):
"""Class specific component of reset command"""
objects_to_reload = ["portfolio", "po"]
if self.current_portfolio:
objects_to_reload.append(f"load {self.current_portfolio}")
if self.current_file:
objects_to_reload.append(f"file {self.current_file}")
return objects_to_reload
@log_start_end(log=logger)
def call_file(self, other_args: List[str]):
"""Process file command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="file",
description="Select parameter file to use",
)
parser.add_argument(
"-f",
"--file",
required="-h" not in other_args,
nargs="+",
dest="file",
help="Parameter file to be used",
choices=self.optimization_file_map.keys(),
metavar="FILE",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "--file")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
self.current_file = " ".join(ns_parser.file)
if self.current_file in self.optimization_file_map:
file_location = self.optimization_file_map[self.current_file]
else:
file_location = self.current_file # type: ignore
self.params, self.current_model = params_view.load_file(file_location)
@log_start_end(log=logger)
def call_params(self, _):
"""Process params command"""
self.queue = self.load_class(
params_controller.ParametersController,
self.current_file,
self.queue,
self.params,
self.current_model,
)
self.current_file = parent_classes.controllers[
"/portfolio/po/params/"
].current_file
self.current_model = parent_classes.controllers[
"/portfolio/po/params/"
].current_model
self.params = parent_classes.controllers["/portfolio/po/params/"].params
@log_start_end(log=logger)
def call_show(self, other_args: List[str]):
"""Show saved portfolios"""
parser = argparse.ArgumentParser(
add_help=False,
prog="show",
description="""Show selected saved portfolios""",
)
parser.add_argument(
"-pf",
"--portfolios",
dest="portfolios",
type=lambda s: [str(item).upper() for item in s.split(",")],
default=[],
help="Show selected saved portfolios",
)
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-pf")
parser = self.po_parser(parser, ct=True)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
portfolios, categories = get_valid_portfolio_categories(
input_portfolios=ns_parser.portfolios,
available_portfolios=self.portfolios,
input_categories=ns_parser.categories,
available_categories=self.categories,
)
for p in portfolios:
console.print("[yellow]Portfolio[/yellow]: " + p + "\n")
optimizer_view.display_show(
weights=self.portfolios[p],
tables=categories,
categories_dict=self.categories,
)
@log_start_end(log=logger)
def call_rpf(self, other_args: List[str]):
"""Remove one portfolio"""
parser = argparse.ArgumentParser(
add_help=False,
prog="rpf",
description="""Remove one of the portfolios""",
)
parser.add_argument(
"-pf",
"--portfolios",
dest="portfolios",
type=lambda s: [str(item).upper() for item in s.split(",")],
default=[],
help="portfolios to be removed from the saved portfolios",
)
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-pf")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
portfolios = set(self.portfolios.keys())
for portfolio in ns_parser.portfolios:
if portfolio in portfolios:
self.portfolios.pop(portfolio)
portfolios.remove(portfolio)
console.print(f"[yellow]Removed '{portfolio}'.[/yellow]")
else:
console.print(f"[red]Portfolio '{portfolio}' does not exist.[/red]")
if self.portfolios:
console.print(
f"\n[yellow]Current Portfolios: [/yellow]{('None', ', '.join(portfolios))[bool(portfolios)]}"
)
self.update_runtime_choices()
@log_start_end(log=logger)
def call_load(self, other_args: List[str]):
"""Load file with stocks tickers and categories"""
parser = argparse.ArgumentParser(
add_help=False,
prog="load",
description="""Load file of stocks tickers with optional categories""",
)
parser.add_argument(
"-f",
"--file",
required="-h" not in other_args,
nargs="+",
dest="file",
help="Allocation file to be used",
choices=self.allocation_file_map.keys(),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "--file")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
filename = " ".join(ns_parser.file)
if filename in self.allocation_file_map:
file_location = self.allocation_file_map[filename]
else:
file_location = filename # type: ignore
self.tickers, self.categories = excel_model.load_allocation(file_location)
self.available_categories = list(self.categories.keys())
if "CURRENT_INVESTED_AMOUNT" in self.available_categories:
self.available_categories.remove("CURRENT_INVESTED_AMOUNT")
self.portfolios = dict()
self.update_runtime_choices()
self.current_portfolio = filename
@log_start_end(log=logger)
def call_plot(self, other_args: List[str]):
"""Process plot command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="plot",
description="Plot selected charts for portfolios",
)
parser.add_argument(
"-pf",
"--portfolios",
type=lambda s: [str(item).upper() for item in s.split(",")],
default=[],
dest="portfolios",
help="Selected portfolios that will be plotted",
)
parser.add_argument(
"-pi",
"--pie",
action="store_true",
dest="pie",
default=False,
help="Display a pie chart for weights",
)
parser.add_argument(
"-hi",
"--hist",
action="store_true",
dest="hist",
default=False,
help="Display a histogram with risk measures",
)
parser.add_argument(
"-dd",
"--drawdown",
action="store_true",
dest="dd",
default=False,
help="Display a drawdown chart with risk measures",
)
parser.add_argument(
"-rc",
"--rc-chart",
action="store_true",
dest="rc_chart",
default=False,
help="Display a risk contribution chart for assets",
)
parser.add_argument(
"-he",
"--heat",
action="store_true",
dest="heat",
default=False,
help="Display a heatmap of correlation matrix with dendrogram",
)
parser = self.po_parser(
parser,
rm=True,
mt=True,
ct=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
r=True,
a=True,
)
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-pf")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
portfolios, categories = get_valid_portfolio_categories(
input_portfolios=ns_parser.portfolios,
available_portfolios=self.portfolios,
input_categories=ns_parser.categories,
available_categories=self.categories,
)
if not portfolios:
return
if not (
ns_parser.pie
or ns_parser.hist
or ns_parser.dd
or ns_parser.rc_chart
or ns_parser.heat
):
console.print(
"[yellow]Please select at least one chart to plot[/yellow]",
"[yellow]from the following: -pi, -hi, -dd, -rc, -he.[/yellow]",
)
return
stocks = []
for i in portfolios:
stocks += list(self.portfolios[i].keys())
stocks = list(set(stocks))
stocks.sort()
_, stock_returns = optimizer_model.get_equal_weights(
symbols=stocks,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
value=1,
)
if ns_parser.hist or ns_parser.dd:
for i in portfolios:
weights = self.portfolios[i]
weights = dict(
sorted(weights.items(), key=lambda x: x[1], reverse=True)
)
stocks = list(weights.keys())
# hist and dd are transversal to all categories
optimizer_view.additional_plots(
weights=weights,
data=stock_returns[stocks],
portfolio_name=i,
freq=ns_parser.return_frequency,
risk_measure=ns_parser.risk_measure.lower(),
risk_free_rate=ns_parser.risk_free,
alpha=ns_parser.significance_level,
a_sim=100,
beta=ns_parser.significance_level,
b_sim=100,
hist=ns_parser.hist,
dd=ns_parser.dd,
)
if ns_parser.pie or ns_parser.rc_chart or ns_parser.heat:
if not categories:
console.print(
"[yellow]Categories must be provided to use -pi, -rc or -he.[/yellow]"
)
return
for i in portfolios:
weights = self.portfolios[i]
weights = dict(
sorted(weights.items(), key=lambda x: x[1], reverse=True)
)
stocks = list(weights.keys())
# pie, rc_chart and heat apply to each category
for category in categories:
filtered_categories = dict(
filter(
lambda elem: elem[0] in stocks,
self.categories[category].items(),
)
)
optimizer_view.additional_plots(
weights=weights,
data=stock_returns[stocks],
category_dict=filtered_categories,
category=category,
portfolio_name=i,
freq=ns_parser.return_frequency,
risk_measure=ns_parser.risk_measure.lower(),
risk_free_rate=ns_parser.risk_free,
alpha=ns_parser.significance_level,
a_sim=100,
beta=ns_parser.significance_level,
b_sim=100,
pie=ns_parser.pie,
rc_chart=ns_parser.rc_chart,
heat=ns_parser.heat,
)
@log_start_end(log=logger)
def call_equal(self, other_args: List[str]):
"""Process equal command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="equal",
description="Returns an equally weighted portfolio",
)
parser = self.po_parser(
parser,
rm=True,
mt=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
r=True,
a=True,
v=True,
name="NAME_",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 loaded tickers to calculate weights.\n"
)
return
weights = optimizer_view.display_equal_weight(
symbols=self.tickers,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
risk_measure=ns_parser.risk_measure.lower(),
risk_free_rate=ns_parser.risk_free,
alpha=ns_parser.significance_level,
value=ns_parser.long_allocation,
table=True,
)
self.portfolios[ns_parser.name.upper()] = weights
self.count += 1
self.update_runtime_choices()
@log_start_end(log=logger)
def call_mktcap(self, other_args: List[str]):
"""Process mktcap command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="mktcap",
description="Returns a portfolio that is weighted based on Market Cap.",
)
parser = self.po_parser(
parser,
rm=True,
mt=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
r=True,
a=True,
v=True,
name="MKTCAP_",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 stocks selected to perform calculations."
)
return
console.print(
"[yellow]Optimization can take time. Please be patient...\n[/yellow]"
)
weights = optimizer_view.display_property_weighting(
symbols=self.tickers,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
s_property="marketCap",
risk_measure=ns_parser.risk_measure.lower(),
risk_free_rate=ns_parser.risk_free,
alpha=ns_parser.significance_level,
value=ns_parser.long_allocation,
table=True,
)
self.portfolios[ns_parser.name.upper()] = weights
self.count += 1
self.update_runtime_choices()
@log_start_end(log=logger)
def call_dividend(self, other_args: List[str]):
"""Process dividend command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="dividend",
description="Returns a portfolio that is weighted based dividend yield.",
)
parser = self.po_parser(
parser,
rm=True,
mt=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
r=True,
a=True,
v=True,
name="DIVIDEND_",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 stocks selected to perform calculations."
)
return
console.print(
"[yellow]Optimization can take time. Please be patient...\n[/yellow]"
)
weights = optimizer_view.display_property_weighting(
symbols=self.tickers,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
s_property="dividendYield",
risk_measure=ns_parser.risk_measure.lower(),
risk_free_rate=ns_parser.risk_free,
alpha=ns_parser.significance_level,
value=ns_parser.long_allocation,
table=True,
)
self.portfolios[ns_parser.name.upper()] = weights
self.count += 1
self.update_runtime_choices()
@log_start_end(log=logger)
def call_property(self, other_args: List[str]):
"""Process property command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="property",
description="Returns a portfolio that is weighted based on selected property.",
)
parser.add_argument(
"-pr",
"--property",
required=bool("-h" not in other_args),
type=optimizer_helper.check_valid_property_type,
dest="s_property",
choices=yahoo_finance_model.yf_info_choices,
help="""Property info to weight. Use one of yfinance info options.""",
metavar="PROPERTY",
)
parser = self.po_parser(
parser,
rm=True,
mt=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
r=True,
a=True,
v=True,
name="PROPERTY_",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 stocks selected to perform calculations."
)
return
console.print(
"[yellow]Optimization can take time. Please be patient...\n[/yellow]"
)
weights = optimizer_view.display_property_weighting(
symbols=self.tickers,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
s_property=ns_parser.s_property,
risk_measure=ns_parser.risk_measure.lower(),
risk_free_rate=ns_parser.risk_free,
alpha=ns_parser.significance_level,
value=ns_parser.long_allocation,
table=True,
)
self.portfolios[ns_parser.name.upper()] = weights
self.count += 1
self.update_runtime_choices()
@log_start_end(log=logger)
def call_maxsharpe(self, other_args: List[str]):
"""Process maxsharpe command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="maxsharpe",
description="Maximizes the portfolio's return/risk ratio",
)
parser.add_argument(
"-tr",
"--target-return",
dest="target_return",
default=self.params["target_return"]
if "target_return" in self.params
else -1,
type=float,
help="Constraint on minimum level of portfolio's return",
)
parser.add_argument(
"-tk",
"--target-risk",
dest="target_risk",
default=self.params["target_risk"] if "target_risk" in self.params else -1,
type=float,
help="Constraint on maximum level of portfolio's risk",
)
parser.add_argument(
"-m",
"--mean",
default=self.params["expected_return"]
if "expected_return" in self.params
else "hist",
dest="expected_return",
help="Method used to estimate the expected return vector",
choices=statics.MEAN_CHOICES,
)
parser.add_argument(
"-cv",
"--covariance",
default=self.params["covariance"]
if "covariance" in self.params
else "hist",
dest="covariance",
help="""Method used to estimate covariance matrix. Possible values are
'hist': historical method
'ewma1': exponential weighted moving average with adjust=True
'ewma2': exponential weighted moving average with adjust=False
'ledoit': Ledoit and Wolf shrinkage method
'oas': oracle shrinkage method
'shrunk': scikit-learn shrunk method
'gl': graphical lasso method
'jlogo': j-logo covariance
'fixed': takes average of eigenvalues above max Marchenko Pastour limit
'spectral': makes zero eigenvalues above max Marchenko Pastour limit
'shrink': Lopez de Prado's book shrinkage method
""",
choices=statics.COVARIANCE_CHOICES,
)
parser.add_argument(
"-de",
"--d-ewma",
type=float,
default=self.params["smoothing_factor_ewma"]
if "smoothing_factor_ewma" in self.params
else 0.94,
dest="smoothing_factor_ewma",
help="Smoothing factor for ewma estimators",
)
parser.add_argument(
"-vs",
"--value-short",
dest="short_allocation",
help="Amount to allocate to portfolio in short positions",
type=float,
default=self.params["short_allocation"]
if "short_allocation" in self.params
else 0.0,
)
parser = self.po_parser(
parser,
rm=True,
mt=True,
ct=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
r=True,
a=True,
v=True,
name="MAXSHARPE_",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 loaded tickers to calculate weights.\n"
)
return
table = True
if "historic_period_sa" in vars(ns_parser):
table = False
console.print(
"[yellow]Optimization can take time. Please be patient...\n[/yellow]"
)
weights = optimizer_view.display_max_sharpe(
symbols=self.tickers,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
risk_measure=ns_parser.risk_measure.lower(),
risk_free_rate=ns_parser.risk_free,
alpha=ns_parser.significance_level,
target_return=ns_parser.target_return,
target_risk=ns_parser.target_risk,
mean=ns_parser.expected_return.lower(),
covariance=ns_parser.covariance.lower(),
d_ewma=ns_parser.smoothing_factor_ewma,
value=ns_parser.long_allocation,
value_short=ns_parser.short_allocation,
table=table,
)
self.portfolios[ns_parser.name.upper()] = weights
self.count += 1
self.update_runtime_choices()
if table is False:
weights_sa = optimizer_view.display_max_sharpe(
symbols=self.tickers,
interval=ns_parser.historic_period_sa,
start_date=ns_parser.start_period_sa,
end_date=ns_parser.end_period_sa,
log_returns=ns_parser.log_returns_sa,
freq=ns_parser.return_frequency_sa,
maxnan=ns_parser.max_nan_sa,
threshold=ns_parser.threshold_value_sa,
method=ns_parser.nan_fill_method_sa,
risk_measure=ns_parser.risk_measure_sa.lower(),
risk_free_rate=ns_parser.risk_free_sa,
alpha=ns_parser.significance_level_sa,
target_return=ns_parser.target_return_sa,
target_risk=ns_parser.target_risk_sa,
mean=ns_parser.expected_return_sa.lower(),
covariance=ns_parser.covariance_sa.lower(),
d_ewma=ns_parser.smoothing_factor_ewma_sa,
value=ns_parser.long_allocation_sa,
value_short=ns_parser.short_allocation_sa,
table=table,
)
console.print("")
optimizer_view.display_weights_sa(
weights=weights, weights_sa=weights_sa
)
if not ns_parser.categories:
categories = ["ASSET_CLASS", "COUNTRY", "SECTOR", "INDUSTRY"]
else:
categories = ns_parser.categories
for category in categories:
optimizer_view.display_categories_sa(
weights=weights,
weights_sa=weights_sa,
categories=self.categories,
column=category,
title="Category - " + category.title(),
)
@log_start_end(log=logger)
def call_minrisk(self, other_args: List[str]):
"""Process minrisk command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="minrisk",
description="Minimizes portfolio's risk",
)
parser.add_argument(
"-tr",
"--target-return",
dest="target_return",
default=self.params["target_return"]
if "target_return" in self.params
else -1,
type=float,
help="Constraint on minimum level of portfolio's return",
)
parser.add_argument(
"-tk",
"--target-risk",
dest="target_risk",
default=self.params["target_risk"] if "target_risk" in self.params else -1,
type=float,
help="Constraint on maximum level of portfolio's risk",
)
parser.add_argument(
"-m",
"--mean",
default=self.params["expected_return"]
if "expected_return" in self.params
else "hist",
dest="expected_return",
help="Method used to estimate expected returns vector",
choices=statics.MEAN_CHOICES,
)
parser.add_argument(
"-cv",
"--covariance",
default=self.params["covariance"] if "mean_ewma" in self.params else "hist",
dest="covariance",
help="""Method used to estimate covariance matrix. Possible values are
'hist': historical method
'ewma1': exponential weighted moving average with adjust=True
'ewma2': exponential weighted moving average with adjust=False
'ledoit': Ledoit and Wolf shrinkage method
'oas': oracle shrinkage method
'shrunk': scikit-learn shrunk method
'gl': graphical lasso method
'jlogo': j-logo covariance
'fixed': takes average of eigenvalues above max Marchenko Pastour limit
'spectral': makes zero eigenvalues above max Marchenko Pastour limit
'shrink': Lopez de Prado's book shrinkage method
""",
choices=statics.COVARIANCE_CHOICES,
)
parser.add_argument(
"-de",
"--d-ewma",
type=float,
default=self.params["smoothing_factor_ewma"]
if "smoothing_factor_ewma" in self.params
else 0.94,
dest="smoothing_factor_ewma",
help="Smoothing factor for ewma estimators",
)
parser.add_argument(
"-vs",
"--value-short",
type=float,
default=self.params["short_allocation"]
if "short_allocation" in self.params
else 0.0,
dest="short_allocation",
help="Amount to allocate to portfolio in short positions",
)
parser = self.po_parser(
parser,
rm=True,
mt=True,
ct=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
r=True,
a=True,
v=True,
name="MINRISK_",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 loaded tickers to calculate weights.\n"
)
return
table = True
if "historic_period_sa" in vars(ns_parser):
table = False
console.print(
"[yellow]Optimization can take time. Please be patient...\n[/yellow]"
)
weights = optimizer_view.display_min_risk(
symbols=self.tickers,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
risk_measure=ns_parser.risk_measure.lower(),
risk_free_rate=ns_parser.risk_free,
alpha=ns_parser.significance_level,
target_return=ns_parser.target_return,
target_risk=ns_parser.target_risk,
mean=ns_parser.expected_return.lower(),
covariance=ns_parser.covariance.lower(),
d_ewma=ns_parser.smoothing_factor_ewma,
value=ns_parser.long_allocation,
value_short=ns_parser.short_allocation,
table=table,
)
self.portfolios[ns_parser.name.upper()] = weights
self.count += 1
self.update_runtime_choices()
if table is False:
weights_sa = optimizer_view.display_min_risk(
symbols=self.tickers,
interval=ns_parser.historic_period_sa,
start_date=ns_parser.start_period_sa,
end_date=ns_parser.end_period_sa,
log_returns=ns_parser.log_returns_sa,
freq=ns_parser.return_frequency_sa,
maxnan=ns_parser.max_nan_sa,
threshold=ns_parser.threshold_value_sa,
method=ns_parser.nan_fill_method_sa,
risk_measure=ns_parser.risk_measure_sa.lower(),
risk_free_rate=ns_parser.risk_free_sa,
alpha=ns_parser.significance_level_sa,
target_return=ns_parser.target_return_sa,
target_risk=ns_parser.target_risk_sa,
mean=ns_parser.expected_return_sa.lower(),
covariance=ns_parser.covariance_sa.lower(),
d_ewma=ns_parser.smoothing_factor_ewma_sa,
value=ns_parser.long_allocation_sa,
value_short=ns_parser.short_allocation_sa,
table=table,
)
console.print("")
optimizer_view.display_weights_sa(
weights=weights, weights_sa=weights_sa
)
if not ns_parser.categories:
categories = ["ASSET_CLASS", "COUNTRY", "SECTOR", "INDUSTRY"]
else:
categories = ns_parser.categories
for category in categories:
optimizer_view.display_categories_sa(
weights=weights,
weights_sa=weights_sa,
categories=self.categories,
column=category,
title="Category - " + category.title(),
)
@log_start_end(log=logger)
def call_maxutil(self, other_args: List[str]):
"""Process maxutil command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="maxutil",
description="Maximizes a risk averse utility function",
)
parser.add_argument(
"-ra",
"--risk-aversion",
type=float,
dest="risk_aversion",
default=self.params["risk_aversion"]
if "risk_aversion" in self.params
else 1,
help="Risk aversion parameter",
)
parser.add_argument(
"-tr",
"--target-return",
dest="target_return",
default=self.params["target_return"]
if "target_return" in self.params
else -1,
type=float,
help="Constraint on minimum level of portfolio's return",
)
parser.add_argument(
"-tk",
"--target-risk",
dest="target_risk",
default=self.params["target_risk"] if "target_risk" in self.params else -1,
type=float,
help="Constraint on maximum level of portfolio's risk",
)
parser.add_argument(
"-m",
"--mean",
default=self.params["expected_return"]
if "expected_return" in self.params
else "hist",
dest="expected_return",
help="Method used to estimate the expected return vector",
choices=statics.MEAN_CHOICES,
)
parser.add_argument(
"-cv",
"--covariance",
default=self.params["covariance"]
if "covariance" in self.params
else "hist",
dest="covariance",
help="""Method used to estimate covariance matrix. Possible values are
'hist': historical method
'ewma1': exponential weighted moving average with adjust=True
'ewma2': exponential weighted moving average with adjust=False
'ledoit': Ledoit and Wolf shrinkage method
'oas': oracle shrinkage method
'shrunk': scikit-learn shrunk method
'gl': graphical lasso method
'jlogo': j-logo covariance
'fixed': takes average of eigenvalues above max Marchenko Pastour limit
'spectral': makes zero eigenvalues above max Marchenko Pastour limit
'shrink': Lopez de Prado's book shrinkage method
""",
choices=statics.COVARIANCE_CHOICES,
)
parser.add_argument(
"-de",
"--d-ewma",
type=float,
default=self.params["smoothing_factor_ewma"]
if "smoothing_factor_ewma" in self.params
else 0.94,
dest="smoothing_factor_ewma",
help="Smoothing factor for ewma estimators",
)
parser.add_argument(
"-vs",
"--value-short",
dest="short_allocation",
help="Amount to allocate to portfolio in short positions",
type=float,
default=self.params["short_allocation"]
if "short_allocation" in self.params
else 0.0,
)
parser = self.po_parser(
parser,
rm=True,
mt=True,
ct=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
r=True,
a=True,
v=True,
name="MAXUTIL_",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 loaded tickers to calculate weights.\n"
)
return
table = True
if "historic_period_sa" in vars(ns_parser):
table = False
console.print(
"[yellow]Optimization can take time. Please be patient...\n[/yellow]"
)
weights = optimizer_view.display_max_util(
symbols=self.tickers,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
risk_measure=ns_parser.risk_measure.lower(),
risk_free_rate=ns_parser.risk_free,
risk_aversion=ns_parser.risk_aversion,
alpha=ns_parser.significance_level,
target_return=ns_parser.target_return,
target_risk=ns_parser.target_risk,
mean=ns_parser.expected_return.lower(),
covariance=ns_parser.covariance.lower(),
d_ewma=ns_parser.smoothing_factor_ewma,
value=ns_parser.long_allocation,
value_short=ns_parser.short_allocation,
table=table,
)
self.portfolios[ns_parser.name.upper()] = weights
self.count += 1
self.update_runtime_choices()
if table is False:
weights_sa = optimizer_view.display_max_util(
symbols=self.tickers,
interval=ns_parser.historic_period_sa,
start_date=ns_parser.start_period_sa,
end_date=ns_parser.end_period_sa,
log_returns=ns_parser.log_returns_sa,
freq=ns_parser.return_frequency_sa,
maxnan=ns_parser.max_nan_sa,
threshold=ns_parser.threshold_value_sa,
method=ns_parser.nan_fill_method_sa,
risk_measure=ns_parser.risk_measure_sa.lower(),
risk_free_rate=ns_parser.risk_free_sa,
risk_aversion=ns_parser.risk_aversion_sa,
alpha=ns_parser.significance_level_sa,
target_return=ns_parser.target_return_sa,
target_risk=ns_parser.target_risk_sa,
mean=ns_parser.expected_return_sa.lower(),
covariance=ns_parser.covariance_sa.lower(),
d_ewma=ns_parser.smoothing_factor_ewma_sa,
value=ns_parser.long_allocation_sa,
value_short=ns_parser.short_allocation_sa,
table=table,
)
console.print("")
optimizer_view.display_weights_sa(
weights=weights, weights_sa=weights_sa
)
if not ns_parser.categories:
categories = ["ASSET_CLASS", "COUNTRY", "SECTOR", "INDUSTRY"]
else:
categories = ns_parser.categories
for category in categories:
optimizer_view.display_categories_sa(
weights=weights,
weights_sa=weights_sa,
categories=self.categories,
column=category,
title="Category - " + category.title(),
)
@log_start_end(log=logger)
def call_maxret(self, other_args: List[str]):
"""Process maxret command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="maxret",
description="Maximizes the portfolio's return ",
)
parser.add_argument(
"-tr",
"--target-return",
dest="target_return",
default=self.params["target_return"]
if "target_return" in self.params
else -1,
type=float,
help="Constraint on minimum level of portfolio's return",
)
parser.add_argument(
"-tk",
"--target-risk",
dest="target_risk",
default=self.params["target_risk"] if "target_risk" in self.params else -1,
type=float,
help="Constraint on maximum level of portfolio's risk",
)
parser.add_argument(
"-m",
"--mean",
default=self.params["expected_return"]
if "expected_return" in self.params
else "hist",
dest="expected_return",
help="Method used to estimate the expected return vector",
choices=statics.MEAN_CHOICES,
)
parser.add_argument(
"-cv",
"--covariance",
default=self.params["covariance"]
if "covariance" in self.params
else "hist",
dest="covariance",
help="""Method used to estimate covariance matrix. Possible values are
'hist': historical method
'ewma1': exponential weighted moving average with adjust=True
'ewma2': exponential weighted moving average with adjust=False
'ledoit': Ledoit and Wolf shrinkage method
'oas': oracle shrinkage method
'shrunk': scikit-learn shrunk method
'gl': graphical lasso method
'jlogo': j-logo covariance
'fixed': takes average of eigenvalues above max Marchenko Pastour limit
'spectral': makes zero eigenvalues above max Marchenko Pastour limit
'shrink': Lopez de Prado's book shrinkage method
""",
choices=statics.COVARIANCE_CHOICES,
)
parser.add_argument(
"-de",
"--d-ewma",
type=float,
default=self.params["smoothing_factor_ewma"]
if "smoothing_factor_ewma" in self.params
else 0.94,
dest="smoothing_factor_ewma",
help="Smoothing factor for ewma estimators",
)
parser.add_argument(
"-vs",
"--value-short",
dest="short_allocation",
help="Amount to allocate to portfolio in short positions",
type=float,
default=self.params["short_allocation"]
if "short_allocation" in self.params
else 0.0,
)
parser = self.po_parser(
parser,
rm=True,
mt=True,
ct=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
r=True,
a=True,
v=True,
name="MAXRET_",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 loaded tickers to calculate weights.\n"
)
return
table = True
if "historic_period_sa" in vars(ns_parser):
table = False
console.print(
"[yellow]Optimization can take time. Please be patient...\n[/yellow]"
)
weights = optimizer_view.display_max_ret(
symbols=self.tickers,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
risk_measure=ns_parser.risk_measure.lower(),
risk_free_rate=ns_parser.risk_free,
alpha=ns_parser.significance_level,
target_return=ns_parser.target_return,
target_risk=ns_parser.target_risk,
mean=ns_parser.expected_return.lower(),
covariance=ns_parser.covariance.lower(),
d_ewma=ns_parser.smoothing_factor_ewma,
value=ns_parser.long_allocation,
value_short=ns_parser.short_allocation,
table=table,
)
self.portfolios[ns_parser.name.upper()] = weights
self.count += 1
self.update_runtime_choices()
if table is False:
weights_sa = optimizer_view.display_max_ret(
symbols=self.tickers,
interval=ns_parser.historic_period_sa,
start_date=ns_parser.start_period_sa,
end_date=ns_parser.end_period_sa,
log_returns=ns_parser.log_returns_sa,
freq=ns_parser.return_frequency_sa,
maxnan=ns_parser.max_nan_sa,
threshold=ns_parser.threshold_value_sa,
method=ns_parser.nan_fill_method_sa,
risk_measure=ns_parser.risk_measure_sa.lower(),
risk_free_rate=ns_parser.risk_free_sa,
alpha=ns_parser.significance_level_sa,
target_return=ns_parser.target_return_sa,
target_risk=ns_parser.target_risk_sa,
mean=ns_parser.expected_return_sa.lower(),
covariance=ns_parser.covariance_sa.lower(),
d_ewma=ns_parser.smoothing_factor_ewma_sa,
value=ns_parser.long_allocation_sa,
value_short=ns_parser.short_allocation_sa,
table=table,
)
console.print("")
optimizer_view.display_weights_sa(
weights=weights, weights_sa=weights_sa
)
if not ns_parser.categories:
categories = ["ASSET_CLASS", "COUNTRY", "SECTOR", "INDUSTRY"]
else:
categories = ns_parser.categories
for category in categories:
optimizer_view.display_categories_sa(
weights=weights,
weights_sa=weights_sa,
categories=self.categories,
column=category,
title="Category - " + category.title(),
)
@log_start_end(log=logger)
def call_maxdiv(self, other_args: List[str]):
"""Process maxdiv command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="maxdiv",
description="Maximizes the portfolio's diversification ratio",
)
parser.add_argument(
"-cv",
"--covariance",
default=self.params["covariance"]
if "covariance" in self.params
else "hist",
dest="covariance",
help="""Method used to estimate covariance matrix. Possible values are
'hist': historical method
'ewma1': exponential weighted moving average with adjust=True
'ewma2': exponential weighted moving average with adjust=False
'ledoit': Ledoit and Wolf shrinkage method
'oas': oracle shrinkage method
'shrunk': scikit-learn shrunk method
'gl': graphical lasso method
'jlogo': j-logo covariance
'fixed': takes average of eigenvalues above max Marchenko Pastour limit
'spectral': makes zero eigenvalues above max Marchenko Pastour limit
'shrink': Lopez de Prado's book shrinkage method
""",
choices=statics.COVARIANCE_CHOICES,
)
parser.add_argument(
"-de",
"--d-ewma",
type=float,
default=self.params["smoothing_factor_ewma"]
if "smoothing_factor_ewma" in self.params
else 0.94,
dest="smoothing_factor_ewma",
help="Smoothing factor for ewma estimators",
)
parser.add_argument(
"-vs",
"--value-short",
dest="short_allocation",
help="Amount to allocate to portfolio in short positions",
type=float,
default=self.params["short_allocation"]
if "short_allocation" in self.params
else 0.0,
)
parser = self.po_parser(
parser,
mt=True,
ct=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
v=True,
name="MAXDIV_",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 loaded tickers to calculate weights.\n"
)
return
table = True
if "historic_period_sa" in vars(ns_parser):
table = False
console.print(
"[yellow]Optimization can take time. Please be patient...\n[/yellow]"
)
weights = optimizer_view.display_max_div(
symbols=self.tickers,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
covariance=ns_parser.covariance.lower(),
d_ewma=ns_parser.smoothing_factor_ewma,
value=ns_parser.long_allocation,
value_short=ns_parser.short_allocation,
table=table,
)
self.portfolios[ns_parser.name.upper()] = weights
self.count += 1
self.update_runtime_choices()
if table is False:
weights_sa = optimizer_view.display_max_div(
symbols=self.tickers,
interval=ns_parser.historic_period_sa,
start_date=ns_parser.start_period_sa,
end_date=ns_parser.end_period_sa,
log_returns=ns_parser.log_returns_sa,
freq=ns_parser.return_frequency_sa,
maxnan=ns_parser.max_nan_sa,
threshold=ns_parser.threshold_value_sa,
method=ns_parser.nan_fill_method_sa,
covariance=ns_parser.covariance_sa.lower(),
d_ewma=ns_parser.smoothing_factor_ewma_sa,
value=ns_parser.long_allocation_sa,
value_short=ns_parser.short_allocation_sa,
table=table,
)
console.print("")
optimizer_view.display_weights_sa(
weights=weights, weights_sa=weights_sa
)
if not ns_parser.categories:
categories = ["ASSET_CLASS", "COUNTRY", "SECTOR", "INDUSTRY"]
else:
categories = ns_parser.categories
for category in categories:
optimizer_view.display_categories_sa(
weights=weights,
weights_sa=weights_sa,
categories=self.categories,
column=category,
title="Category - " + category.title(),
)
@log_start_end(log=logger)
def call_maxdecorr(self, other_args: List[str]):
"""Process maxdecorr command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="maxdecorr",
description="Maximizes the portfolio's decorrelation",
)
parser.add_argument(
"-cv",
"--covariance",
default=self.params["covariance"]
if "covariance" in self.params
else "hist",
dest="covariance",
help="""Method used to estimate covariance matrix. Possible values are
'hist': historical method
'ewma1': exponential weighted moving average with adjust=True
'ewma2': exponential weighted moving average with adjust=False
'ledoit': Ledoit and Wolf shrinkage method
'oas': oracle shrinkage method
'shrunk': scikit-learn shrunk method
'gl': graphical lasso method
'jlogo': j-logo covariance
'fixed': takes average of eigenvalues above max Marchenko Pastour limit
'spectral': makes zero eigenvalues above max Marchenko Pastour limit
'shrink': Lopez de Prado's book shrinkage method
""",
choices=statics.COVARIANCE_CHOICES,
)
parser.add_argument(
"-de",
"--d-ewma",
type=float,
default=self.params["smoothing_factor_ewma"]
if "smoothing_factor_ewma" in self.params
else 0.94,
dest="smoothing_factor_ewma",
help="Smoothing factor for ewma estimators",
)
parser.add_argument(
"-vs",
"--value-short",
dest="short_allocation",
help="Amount to allocate to portfolio in short positions",
type=float,
default=self.params["short_allocation"]
if "short_allocation" in self.params
else 0.0,
)
parser = self.po_parser(
parser,
mt=True,
ct=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
v=True,
name="MAXDECORR_",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 loaded tickers to calculate weights.\n"
)
return
table = True
if "historic_period_sa" in vars(ns_parser):
table = False
console.print(
"[yellow]Optimization can take time. Please be patient...\n[/yellow]"
)
weights = optimizer_view.display_max_decorr(
symbols=self.tickers,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
covariance=ns_parser.covariance.lower(),
d_ewma=ns_parser.smoothing_factor_ewma,
value=ns_parser.long_allocation,
value_short=ns_parser.short_allocation,
table=table,
)
self.portfolios[ns_parser.name.upper()] = weights
self.count += 1
self.update_runtime_choices()
if table is False:
weights_sa = optimizer_view.display_max_decorr(
symbols=self.tickers,
interval=ns_parser.historic_period_sa,
start_date=ns_parser.start_period_sa,
end_date=ns_parser.end_period_sa,
log_returns=ns_parser.log_returns_sa,
freq=ns_parser.return_frequency_sa,
maxnan=ns_parser.max_nan_sa,
threshold=ns_parser.threshold_value_sa,
method=ns_parser.nan_fill_method_sa,
covariance=ns_parser.covariance_sa.lower(),
d_ewma=ns_parser.smoothing_factor_ewma_sa,
value=ns_parser.long_allocation_sa,
value_short=ns_parser.short_allocation_sa,
table=table,
)
console.print("")
optimizer_view.display_weights_sa(
weights=weights, weights_sa=weights_sa
)
if not ns_parser.categories:
categories = ["ASSET_CLASS", "COUNTRY", "SECTOR", "INDUSTRY"]
else:
categories = ns_parser.categories
for category in categories:
optimizer_view.display_categories_sa(
weights=weights,
weights_sa=weights_sa,
categories=self.categories,
column=category,
title="Category - " + category.title(),
)
@log_start_end(log=logger)
def call_blacklitterman(self, other_args: List[str]):
"""Process blacklitterman command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="blacklitterman",
description="Optimize portfolio using Black Litterman estimates",
)
parser.add_argument(
"-bm",
"--benchmark",
type=str,
default=None,
dest="benchmark",
help="portfolio name from current portfolio list",
)
parser.add_argument(
"-o",
"--objective",
default=self.params["objective"]
if "objective" in self.params
else "Sharpe",
dest="objective",
help="Objective function used to optimize the portfolio",
choices=statics.OBJECTIVE_CHOICES,
)
parser.add_argument(
"-pv",
"--p-views",
type=lambda s: [
[float(item) for item in row.split(",")] for row in s.split(";")
],
default=self.params["p_views"] if "p_views" in self.params else None,
dest="p_views",
help="matrix P of analyst views",
)
parser.add_argument(
"-qv",
"--q-views",
type=lambda s: [float(item) for item in s.split(",")],
default=self.params["q_views"] if "q_views" in self.params else None,
dest="q_views",
help="matrix Q of analyst views",
)
parser.add_argument(
"-ra",
"--risk-aversion",
type=float,
dest="risk_aversion",
default=self.params["risk_aversion"]
if "risk_aversion" in self.params
else 1,
help="Risk aversion parameter",
)
parser.add_argument(
"-d",
"--delta",
default=self.params["delta"] if "delta" in self.params else None,
dest="delta",
type=float,
help="Risk aversion factor of Black Litterman model",
)
parser.add_argument(
"-eq",
"--equilibrium",
action="store_true",
default=self.params["equilibrium"]
if "equilibrium" in self.params
else True,
dest="equilibrium",
help="""If True excess returns are based on equilibrium market portfolio, if False
excess returns are calculated as historical returns minus risk free rate.
""",
)
parser.add_argument(
"-op",
"--optimize",
action="store_false",
default=self.params["optimize"] if "optimize" in self.params else True,
dest="optimize",
help="""If True Black Litterman estimates are used as inputs of mean variance model,
if False returns equilibrium weights from Black Litterman model
""",
)
parser.add_argument(
"-vs",
"--value-short",
type=float,
default=self.params["short_allocation"]
if "short_allocation" in self.params
else 0.0,
dest="short_allocation",
help="Amount to allocate to portfolio in short positions",
)
parser.add_argument(
"--file",
type=lambda s: s if s.endswith(".xlsx") or len(s) == 0 else s + ".xlsx",
dest="file",
default="",
help="Upload an Excel file with views for Black Litterman model",
)
parser.add_argument(
"--download",
type=lambda s: s if s.endswith(".xlsx") or len(s) == 0 else s + ".xlsx",
dest="download",
default="",
help="Create a template to design Black Litterman model views",
)
parser = self.po_parser(
parser,
mt=True,
ct=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
r=True,
v=True,
name="BL_",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 loaded tickers to calculate weights.\n"
)
return
if len(ns_parser.download) > 0:
file = (
USER_EXPORTS_DIRECTORY / "portfolio" / "views" / ns_parser.download
)
excel_model.excel_bl_views(file=file, stocks=self.tickers, n=1)
return
if ns_parser.file:
excel_file = USER_PORTFOLIO_DATA_DIRECTORY / "views" / ns_parser.file
p_views, q_views = excel_model.load_bl_views(excel_file=excel_file)
else:
p_views = ns_parser.p_views
q_views = ns_parser.q_views
if ns_parser.benchmark is None:
benchmark = None
else:
benchmark = self.portfolios[ns_parser.benchmark.upper()]
table = True
if "historic_period_sa" in vars(ns_parser):
table = False
console.print(
"[yellow]Optimization can take time. Please be patient...\n[/yellow]"
)
weights = optimizer_view.display_black_litterman(
symbols=self.tickers,
p_views=p_views,
q_views=q_views,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
benchmark=benchmark,
objective=ns_parser.objective.lower(),
risk_free_rate=ns_parser.risk_free,
risk_aversion=ns_parser.risk_aversion,
delta=ns_parser.delta,
equilibrium=ns_parser.equilibrium,
optimize=ns_parser.optimize,
value=ns_parser.long_allocation,
value_short=ns_parser.short_allocation,
table=table,
)
self.portfolios[ns_parser.name.upper()] = weights
self.count += 1
self.update_runtime_choices()
if table is False:
if ns_parser.file_sa:
excel_file = (
USER_PORTFOLIO_DATA_DIRECTORY / "views" / ns_parser.file_sa
)
p_views_sa, q_views_sa = excel_model.load_bl_views(
excel_file=excel_file
)
else:
p_views_sa = ns_parser.p_views_sa
q_views_sa = ns_parser.q_views_sa
weights_sa = optimizer_view.display_black_litterman(
symbols=self.tickers,
p_views=p_views_sa,
q_views=q_views_sa,
interval=ns_parser.historic_period_sa,
start_date=ns_parser.start_period_sa,
end_date=ns_parser.end_period_sa,
log_returns=ns_parser.log_returns_sa,
freq=ns_parser.return_frequency_sa,
maxnan=ns_parser.max_nan_sa,
threshold=ns_parser.threshold_value_sa,
method=ns_parser.nan_fill_method_sa,
benchmark=benchmark,
objective=ns_parser.objective_sa.lower(),
risk_free_rate=ns_parser.risk_free_sa,
risk_aversion=ns_parser.risk_aversion_sa,
delta=ns_parser.delta_sa,
equilibrium=ns_parser.equilibrium_sa,
optimize=ns_parser.optimize_sa,
value=ns_parser.long_allocation_sa,
value_short=ns_parser.short_allocation_sa,
table=table,
)
console.print("")
optimizer_view.display_weights_sa(
weights=weights, weights_sa=weights_sa
)
if not ns_parser.categories:
categories = ["ASSET_CLASS", "COUNTRY", "SECTOR", "INDUSTRY"]
else:
categories = ns_parser.categories
for category in categories:
optimizer_view.display_categories_sa(
weights=weights,
weights_sa=weights_sa,
categories=self.categories,
column=category,
title="Category - " + category.title(),
)
@log_start_end(log=logger)
def call_ef(self, other_args):
"""Process ef command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ef",
description="""This function plots random portfolios based on their
risk and returns and shows the efficient frontier.""",
)
parser.add_argument(
"-vs",
"--value-short",
dest="short_allocation",
help="Amount to allocate to portfolio in short positions",
type=float,
default=self.params["short_allocation"]
if "short_allocation" in self.params
else 0.0,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-n")
parser.add_argument(
"-n",
"--number-portfolios",
default=self.params["amount_portfolios"]
if "amount_portfolios" in self.params
else 100,
type=check_non_negative,
dest="amount_portfolios",
help="Number of portfolios to simulate",
)
parser.add_argument(
"-se",
"--seed",
default=self.params["random_seed"] if "random_seed" in self.params else 123,
type=check_non_negative,
dest="random_seed",
help="Seed used to generate random portfolios",
)
parser.add_argument(
"-t",
"--tangency",
action="store_true",
dest="tangency",
default=self.params["tangency"] if "tangency" in self.params else False,
help="Adds the optimal line with the risk-free asset",
)
parser.add_argument(
"--no_plot",
action="store_false",
dest="plot_tickers",
default=True,
help="Whether or not to plot the tickers for the assets provided",
)
parser = self.po_parser(
parser,
rm=True,
mt=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
r=True,
a=True,
v=True,
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 loaded tickers to calculate weights.\n"
)
return
optimizer_view.display_ef(
symbols=self.tickers,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
risk_measure=ns_parser.risk_measure.lower(),
risk_free_rate=ns_parser.risk_free,
alpha=ns_parser.significance_level,
value=ns_parser.long_allocation,
value_short=ns_parser.short_allocation,
n_portfolios=ns_parser.amount_portfolios,
seed=ns_parser.random_seed,
tangency=ns_parser.tangency,
plot_tickers=ns_parser.plot_tickers,
)
@log_start_end(log=logger)
def call_riskparity(self, other_args: List[str]):
"""Process riskparity command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="riskparity",
description="""Build a risk parity portfolio based on risk
budgeting approach""",
)
parser.add_argument(
"-rm",
"--risk-measure",
default=self.params["risk_measure"]
if "risk_measure" in self.params
else "MV",
dest="risk_measure",
help="""Risk measure used to optimize the portfolio. Possible values are:
'MV' : Variance
'MAD' : Mean Absolute Deviation
'MSV' : Semi Variance (Variance of negative returns)
'FLPM' : First Lower Partial Moment
'SLPM' : Second Lower Partial Moment
'CVaR' : Conditional Value at Risk
'EVaR' : Entropic Value at Risk
'UCI' : Ulcer Index of uncompounded returns
'CDaR' : Conditional Drawdown at Risk of uncompounded returns
'EDaR' : Entropic Drawdown at Risk of uncompounded returns
""",
choices=statics.RISK_PARITY_CHOICES,
metavar="RISK-MEASURE",
)
parser.add_argument(
"-rc",
"--risk-cont",
type=lambda s: [float(item) for item in s.split(",")],
default=self.params["risk_contribution"]
if "risk_contribution" in self.params
else None,
dest="risk_contribution",
help="vector of risk contribution constraint",
)
parser.add_argument(
"-tr",
"--target-return",
dest="target_return",
default=self.params["target_return"]
if "target_return" in self.params
else -1,
type=float,
help="Constraint on minimum level of portfolio's return",
)
parser.add_argument(
"-de",
"--d-ewma",
type=float,
default=self.params["smoothing_factor_ewma"]
if "smoothing_factor_ewma" in self.params
else 0.94,
dest="smoothing_factor_ewma",
help="Smoothing factor for ewma estimators",
)
parser = self.po_parser(
parser,
mt=True,
ct=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
r=True,
a=True,
v=True,
name="RP_",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 loaded tickers to calculate weights.\n"
)
return
table = True
if "historic_period_sa" in vars(ns_parser):
table = False
console.print(
"[yellow]Optimization can take time. Please be patient...\n[/yellow]"
)
weights = optimizer_view.display_risk_parity(
symbols=self.tickers,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
risk_measure=ns_parser.risk_measure.lower(),
risk_cont=ns_parser.risk_contribution,
risk_free_rate=ns_parser.risk_free,
alpha=ns_parser.significance_level,
target_return=ns_parser.target_return,
value=ns_parser.long_allocation,
table=table,
)
self.portfolios[ns_parser.name.upper()] = weights
self.count += 1
self.update_runtime_choices()
if table is False:
weights_sa = optimizer_view.display_risk_parity(
symbols=self.tickers,
interval=ns_parser.historic_period_sa,
start_date=ns_parser.start_period_sa,
end_date=ns_parser.end_period_sa,
log_returns=ns_parser.log_returns_sa,
freq=ns_parser.return_frequency_sa,
maxnan=ns_parser.max_nan_sa,
threshold=ns_parser.threshold_value_sa,
method=ns_parser.nan_fill_method_sa,
risk_measure=ns_parser.risk_measure_sa.lower(),
risk_cont=ns_parser.risk_contribution_sa,
risk_free_rate=ns_parser.risk_free_sa,
alpha=ns_parser.significance_level_sa,
target_return=ns_parser.target_return_sa,
value=ns_parser.long_allocation_sa,
table=table,
)
console.print("")
optimizer_view.display_weights_sa(
weights=weights, weights_sa=weights_sa
)
if not ns_parser.categories:
categories = ["ASSET_CLASS", "COUNTRY", "SECTOR", "INDUSTRY"]
else:
categories = ns_parser.categories
for category in categories:
optimizer_view.display_categories_sa(
weights=weights,
weights_sa=weights_sa,
categories=self.categories,
column=category,
title="Category - " + category.title(),
)
@log_start_end(log=logger)
def call_relriskparity(self, other_args: List[str]):
"""Process relriskparity command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="relriskparity",
description="""Build a relaxed risk parity portfolio based on
least squares approach""",
)
parser.add_argument(
"-ve",
"--version",
default=self.params["risk_parity_model"]
if "risk_parity_model" in self.params
else "A",
dest="risk_parity_model",
help="""version of relaxed risk parity model: Possible values are:
'A': risk parity without regularization and penalization constraints
'B': with regularization constraint but without penalization constraint
'C': with regularization and penalization constraints""",
choices=statics.REL_RISK_PARITY_CHOICES,
metavar="VERSION",
)
parser.add_argument(
"-rc",
"--risk-cont",
type=lambda s: [float(item) for item in s.split(",")],
default=self.params["risk_contribution"]
if "risk_contribution" in self.params
else None,
dest="risk_contribution",
help="Vector of risk contribution constraints",
)
parser.add_argument(
"-pf",
"--penal-factor",
type=float,
dest="penal_factor",
default=self.params["penal_factor"] if "penal_factor" in self.params else 1,
help="""The penalization factor of penalization constraints. Only
used with version 'C'.""",
)
parser.add_argument(
"-tr",
"--target-return",
dest="target_return",
default=self.params["target_return"]
if "target_return" in self.params
else -1,
type=float,
help="Constraint on minimum level of portfolio's return",
)
parser.add_argument(
"-de",
"--d-ewma",
type=float,
default=self.params["smoothing_factor_ewma"]
if "smoothing_factor_ewma" in self.params
else 0.94,
dest="smoothing_factor_ewma",
help="Smoothing factor for ewma estimators",
)
parser = self.po_parser(
parser,
mt=True,
ct=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
v=True,
name="RRP_",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 loaded tickers to calculate weights.\n"
)
return
table = True
if "historic_period_sa" in vars(ns_parser):
table = False
console.print(
"[yellow]Optimization can take time. Please be patient...\n[/yellow]"
)
weights = optimizer_view.display_rel_risk_parity(
symbols=self.tickers,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
version=ns_parser.risk_parity_model,
risk_cont=ns_parser.risk_contribution,
penal_factor=ns_parser.penal_factor,
target_return=ns_parser.target_return,
d_ewma=ns_parser.smoothing_factor_ewma,
value=ns_parser.long_allocation,
table=table,
)
self.portfolios[ns_parser.name.upper()] = weights
self.count += 1
self.update_runtime_choices()
if table is False:
weights_sa = optimizer_view.display_rel_risk_parity(
symbols=self.tickers,
interval=ns_parser.historic_period_sa,
start_date=ns_parser.start_period_sa,
end_date=ns_parser.end_period_sa,
log_returns=ns_parser.log_returns_sa,
freq=ns_parser.return_frequency_sa,
maxnan=ns_parser.max_nan_sa,
threshold=ns_parser.threshold_value_sa,
method=ns_parser.nan_fill_method_sa,
version=ns_parser.risk_parity_model_sa,
risk_cont=ns_parser.risk_contribution_sa,
penal_factor=ns_parser.penal_factor_sa,
target_return=ns_parser.target_return_sa,
d_ewma=ns_parser.smoothing_factor_ewma_sa,
value=ns_parser.long_allocation_sa,
table=table,
)
console.print("")
optimizer_view.display_weights_sa(
weights=weights, weights_sa=weights_sa
)
if not ns_parser.categories:
categories = ["ASSET_CLASS", "COUNTRY", "SECTOR", "INDUSTRY"]
else:
categories = ns_parser.categories
for category in categories:
optimizer_view.display_categories_sa(
weights=weights,
weights_sa=weights_sa,
categories=self.categories,
column=category,
title="Category - " + category.title(),
)
@log_start_end(log=logger)
def call_hrp(self, other_args: List[str]):
"""Process hierarchical risk parity command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="hrp",
description="Builds a hierarchical risk parity portfolio",
)
parser.add_argument(
"-cd",
"--codependence",
default=self.params["co_dependence"]
if "co_dependence" in self.params
else "pearson",
dest="co_dependence",
help="""The codependence or similarity matrix used to build the
distance metric and clusters. Possible values are:
'pearson': pearson correlation matrix
'spearman': spearman correlation matrix
'abs_pearson': absolute value of pearson correlation matrix
'abs_spearman': absolute value of spearman correlation matrix
'distance': distance correlation matrix
'mutual_info': mutual information codependence matrix
'tail': tail index codependence matrix""",
choices=statics.CODEPENDENCE_CHOICES,
)
parser.add_argument(
"-cv",
"--covariance",
default=self.params["covariance"]
if "covariance" in self.params
else "hist",
dest="covariance",
help="""Method used to estimate covariance matrix. Possible values are
'hist': historical method
'ewma1': exponential weighted moving average with adjust=True
'ewma2': exponential weighted moving average with adjust=False
'ledoit': Ledoit and Wolf shrinkage method
'oas': oracle shrinkage method
'shrunk': scikit-learn shrunk method
'gl': graphical lasso method
'jlogo': j-logo covariance
'fixed': takes average of eigenvalues above max Marchenko Pastour limit
'spectral': makes zero eigenvalues above max Marchenko Pastour limit
'shrink': Lopez de Prado's book shrinkage method
""",
choices=statics.COVARIANCE_CHOICES,
)
parser.add_argument(
"-rm",
"--risk-measure",
default=self.params["risk_measure"]
if "risk_measure" in self.params
else "MV",
dest="risk_measure",
help="""Risk measure used to optimize the portfolio. Possible values are:
'MV' : Variance
'MAD' : Mean Absolute Deviation
'GMD' : Gini Mean Difference
'MSV' : Semi Variance (Variance of negative returns)
'FLPM' : First Lower Partial Moment
'SLPM' : Second Lower Partial Moment
'VaR' : Value at Risk
'CVaR' : Conditional Value at Risk
'TG' : Tail Gini
'EVaR' : Entropic Value at Risk
'WR' : Worst Realization
'RG' : Range
'CVRG' : CVaR Range
'TGRG' : Tail Gini Range
'ADD' : Average Drawdown of uncompounded returns
'UCI' : Ulcer Index of uncompounded returns
'DaR' : Drawdown at Risk of uncompounded returns
'CDaR' : Conditional Drawdown at Risk of uncompounded returns
'EDaR' : Entropic Drawdown at Risk of uncompounded returns
'MDD' : Maximum Drawdown of uncompounded returns
'ADD_Rel' : Average Drawdown of compounded returns
'UCI_Rel' : Ulcer Index of compounded returns
'DaR_Rel' : Drawdown at Risk of compounded returns
'CDaR_Rel' : Conditional Drawdown at Risk of compounded returns
'EDaR_Rel' : Entropic Drawdown at Risk of compounded returns
'MDD_Rel' : Maximum Drawdown of compounded returns
""",
choices=statics.HCP_CHOICES,
metavar="RISK-MEASURE",
)
parser.add_argument(
"-as",
"--a-sim",
type=int,
default=self.params["cvar_simulations_losses"]
if "cvar_simulations_losses" in self.params
else 100,
dest="cvar_simulations_losses",
help="""Number of CVaRs used to approximate Tail Gini of losses.
The default is 100""",
)
parser.add_argument(
"-b",
"--beta",
type=float,
default=self.params["cvar_significance"]
if "cvar_significance" in self.params
else None,
dest="cvar_significance",
help="""Significance level of CVaR and Tail Gini of gains. If
empty it duplicates alpha""",
)
parser.add_argument(
"-bs",
"--b-sim",
type=int,
default=self.params["cvar_simulations_gains"]
if "cvar_simulations_gains" in self.params
else None,
dest="cvar_simulations_gains",
help="""Number of CVaRs used to approximate Tail Gini of gains.
If empty it duplicates a_sim value""",
)
parser.add_argument(
"-lk",
"--linkage",
default=self.params["linkage"] if "linkage" in self.params else "single",
dest="linkage",
help="Linkage method of hierarchical clustering",
choices=statics.LINKAGE_CHOICES,
metavar="LINKAGE",
)
parser.add_argument(
"-k",
type=int,
default=self.params["amount_clusters"]
if "amount_clusters" in self.params
else None,
dest="amount_clusters",
help="Number of clusters specified in advance",
)
parser.add_argument(
"-mk",
"--max-k",
type=int,
default=self.params["max_clusters"]
if "max_clusters" in self.params
else 10,
dest="max_clusters",
help="""Max number of clusters used by the two difference gap
statistic to find the optimal number of clusters. If k is
empty this value is used""",
)
parser.add_argument(
"-bi",
"--bins-info",
default=self.params["amount_bins"]
if "amount_bins" in self.params
else "KN",
dest="amount_bins",
help="Number of bins used to calculate the variation of information",
choices=statics.BINS_CHOICES,
)
parser.add_argument(
"-at",
"--alpha-tail",
type=float,
default=self.params["alpha_tail"] if "alpha_tail" in self.params else 0.05,
dest="alpha_tail",
help="""Significance level for lower tail dependence index, only
used when when codependence value is 'tail' """,
)
parser.add_argument(
"-lo",
"--leaf-order",
default=self.params["leaf_order"] if "leaf_order" in self.params else True,
dest="leaf_order",
help="""Indicates if the cluster are ordered so that the distance
between successive leaves is minimal""",
)
parser.add_argument(
"-de",
"--d-ewma",
type=float,
default=self.params["smoothing_factor_ewma"]
if "smoothing_factor_ewma" in self.params
else 0.94,
dest="smoothing_factor_ewma",
help="Smoothing factor for ewma estimators",
)
parser = self.po_parser(
parser,
mt=True,
ct=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
r=True,
a=True,
v=True,
name="_HRP",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 loaded tickers to calculate weights.\n"
)
return
table = True
if "historic_period_sa" in vars(ns_parser):
table = False
console.print(
"[yellow]Optimization can take time. Please be patient...\n[/yellow]"
)
weights = optimizer_view.display_hrp(
symbols=self.tickers,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
codependence=ns_parser.co_dependence.lower(),
covariance=ns_parser.covariance.lower(),
risk_measure=ns_parser.risk_measure.lower(),
risk_free_rate=ns_parser.risk_free,
alpha=ns_parser.significance_level,
a_sim=ns_parser.cvar_simulations_losses,
beta=ns_parser.cvar_significance,
b_sim=ns_parser.cvar_simulations_gains,
linkage=ns_parser.linkage.lower(),
k=ns_parser.amount_clusters,
max_k=ns_parser.max_clusters,
bins_info=ns_parser.amount_bins.upper(),
alpha_tail=ns_parser.alpha_tail,
leaf_order=ns_parser.leaf_order,
d_ewma=ns_parser.smoothing_factor_ewma,
value=ns_parser.long_allocation,
table=table,
)
self.portfolios[ns_parser.name.upper()] = weights
self.count += 1
self.update_runtime_choices()
if table is False:
weights_sa = optimizer_view.display_hrp(
symbols=self.tickers,
interval=ns_parser.historic_period_sa,
start_date=ns_parser.start_period_sa,
end_date=ns_parser.end_period_sa,
log_returns=ns_parser.log_returns_sa,
freq=ns_parser.return_frequency_sa,
maxnan=ns_parser.max_nan_sa,
threshold=ns_parser.threshold_value_sa,
method=ns_parser.nan_fill_method_sa,
codependence=ns_parser.co_dependence_sa.lower(),
covariance=ns_parser.covariance_sa.lower(),
risk_measure=ns_parser.risk_measure_sa.lower(),
risk_free_rate=ns_parser.risk_free_sa,
alpha=ns_parser.significance_level_sa,
a_sim=ns_parser.cvar_simulations_losses_sa,
beta=ns_parser.cvar_significance_sa,
b_sim=ns_parser.cvar_simulations_gains_sa,
linkage=ns_parser.linkage_sa.lower(),
k=ns_parser.amount_clusters_sa,
max_k=ns_parser.max_clusters_sa,
bins_info=ns_parser.amount_bins_sa.upper(),
alpha_tail=ns_parser.alpha_tail_sa,
leaf_order=ns_parser.leaf_order_sa,
d_ewma=ns_parser.smoothing_factor_ewma_sa,
value=ns_parser.long_allocation_sa,
table=table,
)
console.print("")
optimizer_view.display_weights_sa(
weights=weights, weights_sa=weights_sa
)
if not ns_parser.categories:
categories = ["ASSET_CLASS", "COUNTRY", "SECTOR", "INDUSTRY"]
else:
categories = ns_parser.categories
for category in categories:
optimizer_view.display_categories_sa(
weights=weights,
weights_sa=weights_sa,
categories=self.categories,
column=category,
title="Category - " + category.title(),
)
@log_start_end(log=logger)
def call_herc(self, other_args: List[str]):
"""Process hierarchical equal risk contribution command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="herc",
description="Builds a hierarchical equal risk contribution portfolio",
)
parser.add_argument(
"-cd",
"--codependence",
default="pearson",
dest="co_dependence",
help="""The codependence or similarity matrix used to build the
distance metric and clusters. Possible values are:
'pearson': pearson correlation matrix
'spearman': spearman correlation matrix
'abs_pearson': absolute value of pearson correlation matrix
'abs_spearman': absolute value of spearman correlation matrix
'distance': distance correlation matrix
'mutual_info': mutual information codependence matrix
'tail': tail index codependence matrix""",
choices=statics.CODEPENDENCE_CHOICES,
)
parser.add_argument(
"-cv",
"--covariance",
default=self.params["covariance"]
if "covariance" in self.params
else "hist",
dest="covariance",
help="""Method used to estimate covariance matrix. Possible values are
'hist': historical method
'ewma1': exponential weighted moving average with adjust=True
'ewma2': exponential weighted moving average with adjust=False
'ledoit': Ledoit and Wolf shrinkage method
'oas': oracle shrinkage method
'shrunk': scikit-learn shrunk method
'gl': graphical lasso method
'jlogo': j-logo covariance
'fixed': takes average of eigenvalues above max Marchenko Pastour limit
'spectral': makes zero eigenvalues above max Marchenko Pastour limit
'shrink': Lopez de Prado's book shrinkage method
""",
choices=statics.COVARIANCE_CHOICES,
)
parser.add_argument(
"-rm",
"--risk-measure",
default=self.params["risk_measure"]
if "risk_measure" in self.params
else "MV",
dest="risk_measure",
help="""Risk measure used to optimize the portfolio. Possible values are:
'MV' : Variance
'MAD' : Mean Absolute Deviation
'GMD' : Gini Mean Difference
'MSV' : Semi Variance (Variance of negative returns)
'FLPM' : First Lower Partial Moment
'SLPM' : Second Lower Partial Moment
'VaR' : Value at Risk
'CVaR' : Conditional Value at Risk
'TG' : Tail Gini
'EVaR' : Entropic Value at Risk
'WR' : Worst Realization
'RG' : Range
'CVRG' : CVaR Range
'TGRG' : Tail Gini Range
'ADD' : Average Drawdown of uncompounded returns
'UCI' : Ulcer Index of uncompounded returns
'DaR' : Drawdown at Risk of uncompounded returns
'CDaR' : Conditional Drawdown at Risk of uncompounded returns
'EDaR' : Entropic Drawdown at Risk of uncompounded returns
'MDD' : Maximum Drawdown of uncompounded returns
'ADD_Rel' : Average Drawdown of compounded returns
'UCI_Rel' : Ulcer Index of compounded returns
'DaR_Rel' : Drawdown at Risk of compounded returns
'CDaR_Rel' : Conditional Drawdown at Risk of compounded returns
'EDaR_Rel' : Entropic Drawdown at Risk of compounded returns
'MDD_Rel' : Maximum Drawdown of compounded returns
""",
choices=statics.HCP_CHOICES,
metavar="RISK-MEASURE",
)
parser.add_argument(
"-as",
"--a-sim",
type=int,
default=self.params["cvar_simulations_losses"]
if "cvar_simulations_losses" in self.params
else 100,
dest="cvar_simulations_losses",
help="""Number of CVaRs used to approximate Tail Gini of losses.
The default is 100""",
)
parser.add_argument(
"-b",
"--beta",
type=float,
default=self.params["cvar_significance"]
if "cvar_significance" in self.params
else None,
dest="cvar_significance",
help="""Significance level of CVaR and Tail Gini of gains. If
empty it duplicates alpha""",
)
parser.add_argument(
"-bs",
"--b-sim",
type=int,
default=self.params["cvar_simulations_gains"]
if "cvar_simulations_gains" in self.params
else None,
dest="cvar_simulations_gains",
help="""Number of CVaRs used to approximate Tail Gini of gains.
If empty it duplicates a_sim value""",
)
parser.add_argument(
"-lk",
"--linkage",
default=self.params["linkage"] if "linkage" in self.params else "single",
dest="linkage",
help="Linkage method of hierarchical clustering",
choices=statics.LINKAGE_CHOICES,
metavar="LINKAGE",
)
parser.add_argument(
"-k",
type=int,
default=self.params["amount_clusters"]
if "amount_clusters" in self.params
else None,
dest="amount_clusters",
help="Number of clusters specified in advance",
)
parser.add_argument(
"-mk",
"--max-k",
type=int,
default=self.params["max_clusters"]
if "max_clusters" in self.params
else 10,
dest="max_clusters",
help="""Max number of clusters used by the two difference gap
statistic to find the optimal number of clusters. If k is
empty this value is used""",
)
parser.add_argument(
"-bi",
"--bins-info",
default=self.params["amount_bins"]
if "amount_bins" in self.params
else "KN",
dest="amount_bins",
help="Number of bins used to calculate the variation of information",
choices=statics.BINS_CHOICES,
)
parser.add_argument(
"-at",
"--alpha-tail",
type=float,
default=self.params["alpha_tail"] if "alpha_tail" in self.params else 0.05,
dest="alpha_tail",
help="""Significance level for lower tail dependence index, only
used when when codependence value is 'tail' """,
)
parser.add_argument(
"-lo",
"--leaf-order",
default=self.params["leaf_order"] if "leaf_order" in self.params else True,
dest="leaf_order",
help="""Indicates if the cluster are ordered so that the distance
between successive leaves is minimal""",
)
parser.add_argument(
"-de",
"--d-ewma",
type=float,
default=self.params["smoothing_factor_ewma"]
if "smoothing_factor_ewma" in self.params
else 0.94,
dest="smoothing_factor_ewma",
help="Smoothing factor for ewma estimators",
)
parser = self.po_parser(
parser,
mt=True,
ct=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
r=True,
a=True,
v=True,
name="HERC_",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 loaded tickers to calculate weights.\n"
)
return
table = True
if "historic_period_sa" in vars(ns_parser):
table = False
console.print(
"[yellow]Optimization can take time. Please be patient...\n[/yellow]"
)
weights = optimizer_view.display_herc(
symbols=self.tickers,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
codependence=ns_parser.co_dependence.lower(),
covariance=ns_parser.covariance.lower(),
risk_measure=ns_parser.risk_measure.lower(),
risk_free_rate=ns_parser.risk_free,
alpha=ns_parser.significance_level,
a_sim=ns_parser.cvar_simulations_losses,
beta=ns_parser.cvar_significance,
b_sim=ns_parser.cvar_simulations_gains,
linkage=ns_parser.linkage.lower(),
k=ns_parser.amount_clusters,
max_k=ns_parser.max_clusters,
bins_info=ns_parser.amount_bins.upper(),
alpha_tail=ns_parser.alpha_tail,
leaf_order=ns_parser.leaf_order,
d_ewma=ns_parser.smoothing_factor_ewma,
value=ns_parser.long_allocation,
table=table,
)
self.portfolios[ns_parser.name.upper()] = weights
self.count += 1
self.update_runtime_choices()
if table is False:
weights_sa = optimizer_view.display_herc(
symbols=self.tickers,
interval=ns_parser.historic_period_sa,
start_date=ns_parser.start_period_sa,
end_date=ns_parser.end_period_sa,
log_returns=ns_parser.log_returns_sa,
freq=ns_parser.return_frequency_sa,
maxnan=ns_parser.max_nan_sa,
threshold=ns_parser.threshold_value_sa,
method=ns_parser.nan_fill_method_sa,
codependence=ns_parser.co_dependence_sa.lower(),
covariance=ns_parser.covariance_sa.lower(),
risk_measure=ns_parser.risk_measure_sa.lower(),
risk_free_rate=ns_parser.risk_free_sa,
alpha=ns_parser.significance_level_sa,
a_sim=ns_parser.cvar_simulations_losses_sa,
beta=ns_parser.cvar_significance_sa,
b_sim=ns_parser.cvar_simulations_gains_sa,
linkage=ns_parser.linkage_sa.lower(),
k=ns_parser.amount_clusters_sa,
max_k=ns_parser.max_clusters_sa,
bins_info=ns_parser.amount_bins_sa.upper(),
alpha_tail=ns_parser.alpha_tail_sa,
leaf_order=ns_parser.leaf_order_sa,
d_ewma=ns_parser.smoothing_factor_ewma_sa,
value=ns_parser.long_allocation_sa,
table=table,
)
console.print("")
optimizer_view.display_weights_sa(
weights=weights, weights_sa=weights_sa
)
if not ns_parser.categories:
categories = ["ASSET_CLASS", "COUNTRY", "SECTOR", "INDUSTRY"]
else:
categories = ns_parser.categories
for category in categories:
optimizer_view.display_categories_sa(
weights=weights,
weights_sa=weights_sa,
categories=self.categories,
column=category,
title="Category - " + category.title(),
)
@log_start_end(log=logger)
def call_nco(self, other_args: List[str]):
"""Process nested clustered optimization command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="nco",
description="Builds a nested clustered optimization portfolio",
)
parser.add_argument(
"-cd",
"--codependence",
default=self.params["co_dependence"]
if "co_dependence" in self.params
else "pearson",
dest="co_dependence",
help="""The codependence or similarity matrix used to build the
distance metric and clusters. Possible values are:
'pearson': pearson correlation matrix
'spearman': spearman correlation matrix
'abs_pearson': absolute value of pearson correlation matrix
'abs_spearman': absolute value of spearman correlation matrix
'distance': distance correlation matrix
'mutual_info': mutual information codependence matrix
'tail': tail index codependence matrix""",
choices=statics.CODEPENDENCE_CHOICES,
)
parser.add_argument(
"-cv",
"--covariance",
default=self.params["covariance"]
if "covariance" in self.params
else "hist",
dest="covariance",
help="""Method used to estimate covariance matrix. Possible values are
'hist': historical method
'ewma1': exponential weighted moving average with adjust=True
'ewma2': exponential weighted moving average with adjust=False
'ledoit': Ledoit and Wolf shrinkage method
'oas': oracle shrinkage method
'shrunk': scikit-learn shrunk method
'gl': graphical lasso method
'jlogo': j-logo covariance
'fixed': takes average of eigenvalues above max Marchenko Pastour limit
'spectral': makes zero eigenvalues above max Marchenko Pastour limit
'shrink': Lopez de Prado's book shrinkage method
""",
choices=statics.COVARIANCE_CHOICES,
)
parser.add_argument(
"-o",
"--objective",
default=self.params["objective"]
if "objective" in self.params
else "MinRisk",
dest="objective",
help="Objective function used to optimize the portfolio",
choices=statics.NCO_OBJECTIVE_CHOICES,
)
parser.add_argument(
"-ra",
"--risk-aversion",
type=float,
dest="risk_aversion",
default=self.params["long_allocation"]
if "long_allocation" in self.params
else 1,
help="Risk aversion parameter",
)
parser.add_argument(
"-lk",
"--linkage",
default=self.params["linkage"] if "linkage" in self.params else "single",
dest="linkage",
help="Linkage method of hierarchical clustering",
choices=statics.LINKAGE_CHOICES,
metavar="LINKAGE",
)
parser.add_argument(
"-k",
type=int,
default=self.params["amount_clusters"]
if "amount_clusters" in self.params
else None,
dest="amount_clusters",
help="Number of clusters specified in advance",
)
parser.add_argument(
"-mk",
"--max-k",
type=int,
default=self.params["max_clusters"]
if "max_clusters" in self.params
else 10,
dest="max_clusters",
help="""Max number of clusters used by the two difference gap
statistic to find the optimal number of clusters. If k is
empty this value is used""",
)
parser.add_argument(
"-bi",
"--bins-info",
default=self.params["amount_bins"]
if "amount_bins" in self.params
else "KN",
dest="amount_bins",
help="Number of bins used to calculate the variation of information",
choices=statics.BINS_CHOICES,
)
parser.add_argument(
"-at",
"--alpha-tail",
type=float,
default=self.params["alpha_tail"] if "alpha_tail" in self.params else 0.05,
dest="alpha_tail",
help="""Significance level for lower tail dependence index, only
used when when codependence value is 'tail' """,
)
parser.add_argument(
"-lo",
"--leaf-order",
action="store_true",
default=self.params["leaf_order"] if "leaf_order" in self.params else True,
dest="leaf_order",
help="""indicates if the cluster are ordered so that the distance
between successive leaves is minimal""",
)
parser.add_argument(
"-de",
"--d-ewma",
type=float,
default=self.params["smoothing_factor_ewma"]
if "smoothing_factor_ewma" in self.params
else 0.94,
dest="smoothing_factor_ewma",
help="Smoothing factor for ewma estimators",
)
parser = self.po_parser(
parser,
rm=True,
mt=True,
ct=True,
p=True,
s=True,
e=True,
lr=True,
freq=True,
mn=True,
th=True,
r=True,
a=True,
v=True,
name="NCO_",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if len(self.tickers) < 2:
console.print(
"Please have at least 2 loaded tickers to calculate weights.\n"
)
return
table = True
if "historic_period_sa" in vars(ns_parser):
table = False
console.print(
"[yellow]Optimization can take time. Please be patient...\n[/yellow]"
)
weights = optimizer_view.display_nco(
symbols=self.tickers,
interval=ns_parser.historic_period,
start_date=ns_parser.start_period,
end_date=ns_parser.end_period,
log_returns=ns_parser.log_returns,
freq=ns_parser.return_frequency,
maxnan=ns_parser.max_nan,
threshold=ns_parser.threshold_value,
method=ns_parser.nan_fill_method,
codependence=ns_parser.co_dependence.lower(),
covariance=ns_parser.covariance.lower(),
objective=ns_parser.objective.lower(),
risk_measure=ns_parser.risk_measure.lower(),
risk_free_rate=ns_parser.risk_free,
risk_aversion=ns_parser.risk_aversion,
alpha=ns_parser.significance_level,
linkage=ns_parser.linkage.lower(),
k=ns_parser.amount_clusters,
max_k=ns_parser.max_clusters,
bins_info=ns_parser.amount_bins.upper(),
alpha_tail=ns_parser.alpha_tail,
leaf_order=ns_parser.leaf_order,
d_ewma=ns_parser.smoothing_factor_ewma,
value=ns_parser.long_allocation,
table=table,
)
self.portfolios[ns_parser.name.upper()] = weights
self.count += 1
self.update_runtime_choices()
if table is False:
weights_sa = optimizer_view.display_nco(
symbols=self.tickers,
interval=ns_parser.historic_period_sa,
start_date=ns_parser.start_period_sa,
end_date=ns_parser.end_period_sa,
log_returns=ns_parser.log_returns_sa,
freq=ns_parser.return_frequency_sa,
maxnan=ns_parser.max_nan_sa,
threshold=ns_parser.threshold_value_sa,
method=ns_parser.nan_fill_method_sa,
codependence=ns_parser.co_dependence_sa.lower(),
covariance=ns_parser.covariance_sa.lower(),
objective=ns_parser.objective_sa.lower(),
risk_measure=ns_parser.risk_measure_sa.lower(),
risk_free_rate=ns_parser.risk_free_sa,
risk_aversion=ns_parser.risk_aversion_sa,
alpha=ns_parser.significance_level_sa,
linkage=ns_parser.linkage_sa.lower(),
k=ns_parser.amount_clusters_sa,
max_k=ns_parser.max_clusters_sa,
bins_info=ns_parser.amount_bins_sa.upper(),
alpha_tail=ns_parser.alpha_tail_sa,
leaf_order=ns_parser.leaf_order_sa,
d_ewma=ns_parser.smoothing_factor_ewma_sa,
value=ns_parser.long_allocation_sa,
table=table,
)
console.print("")
optimizer_view.display_weights_sa(
weights=weights, weights_sa=weights_sa
)
if not ns_parser.categories:
categories = ["ASSET_CLASS", "COUNTRY", "SECTOR", "INDUSTRY"]
else:
categories = ns_parser.categories
for category in categories:
optimizer_view.display_categories_sa(
weights=weights,
weights_sa=weights_sa,
categories=self.categories,
column=category,
title="Category - " + category.title(),
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_optimization/po_controller.py | 0.705582 | 0.194483 | po_controller.py | pypi |
__docformat__ = "numpy"
# pylint: disable=R0913, C0302, E1101, line-too-long
# flake8: noqa: E501
import logging
from typing import Any, Dict, List, Optional, Tuple, Union
from datetime import date
import numpy as np
from numpy.typing import NDArray
from numpy import floating
import pandas as pd
import riskfolio as rp
from dateutil.relativedelta import relativedelta, FR
import yfinance as yf
from scipy.interpolate import interp1d
from openbb_terminal.decorators import log_start_end
from openbb_terminal.portfolio.portfolio_optimization import (
yahoo_finance_model,
)
from openbb_terminal.portfolio.portfolio_optimization.optimizer_helper import (
get_kwarg,
validate_risk_measure,
valid_property_infos,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
upper_risk = {
"MV": "upperdev",
"MAD": "uppermad",
"MSV": "uppersdev",
"FLPM": "upperflpm",
"SLPM": "upperslpm",
"CVaR": "upperCVaR",
"EVaR": "upperEVaR",
"WR": "upperwr",
"MDD": "uppermdd",
"ADD": "upperadd",
"CDaR": "upperCDaR",
"EDaR": "upperEDaR",
"UCI": "upperuci",
}
objectives_choices = {
"minrisk": "MinRisk",
"sharpe": "Sharpe",
"utility": "Utility",
"maxret": "MaxRet",
"erc": "ERC",
}
risk_names = {
"mv": "volatility",
"mad": "mean absolute deviation",
"gmd": "gini mean difference",
"msv": "semi standard deviation",
"var": "value at risk (VaR)",
"cvar": "conditional value at risk (CVaR)",
"tg": "tail gini",
"evar": "entropic value at risk (EVaR)",
"rg": "range",
"cvrg": "CVaR range",
"tgrg": "tail gini range",
"wr": "worst realization",
"flpm": "first lower partial moment",
"slpm": "second lower partial moment",
"mdd": "maximum drawdown uncompounded",
"add": "average drawdown uncompounded",
"dar": "drawdown at risk (DaR) uncompounded",
"cdar": "conditional drawdown at risk (CDaR) uncompounded",
"edar": "entropic drawdown at risk (EDaR) uncompounded",
"uci": "ulcer index uncompounded",
"mdd_rel": "maximum drawdown compounded",
"add_rel": "average drawdown compounded",
"dar_rel": "drawdown at risk (DaR) compounded",
"cdar_rel": "conditional drawdown at risk (CDaR) compounded",
"edar_rel": "entropic drawdown at risk (EDaR) compounded",
"uci_rel": "ulcer index compounded",
}
risk_choices = {
"mv": "MV",
"mad": "MAD",
"gmd": "GMD",
"msv": "MSV",
"var": "VaR",
"cvar": "CVaR",
"tg": "TG",
"evar": "EVaR",
"rg": "RG",
"cvrg": "CVRG",
"tgrg": "TGRG",
"wr": "WR",
"flpm": "FLPM",
"slpm": "SLPM",
"mdd": "MDD",
"add": "ADD",
"dar": "DaR",
"cdar": "CDaR",
"edar": "EDaR",
"uci": "UCI",
"mdd_rel": "MDD_Rel",
"add_rel": "ADD_Rel",
"dar_rel": "DaR_Rel",
"cdar_rel": "CDaR_Rel",
"edar_rel": "EDaR_Rel",
"uci_rel": "UCI_Rel",
}
time_factor = {
"D": 252.0,
"W": 52.0,
"M": 12.0,
}
dict_conversion = {"period": "historic_period", "start": "start_period"}
@log_start_end(log=logger)
def d_period(interval: str = "1y", start_date: str = "", end_date: str = ""):
"""
Builds a date range string
Parameters
----------
interval : str
interval starting today
start_date: str
If not using interval, start date string (YYYY-MM-DD)
end_date: str
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
"""
extra_choices = {
"ytd": "[Year-to-Date]",
"max": "[All-time]",
}
if start_date == "":
if interval in extra_choices:
p = extra_choices[interval]
else:
if interval[-1] == "d":
p = "[" + interval[:-1] + " Days]"
elif interval[-1] == "w":
p = "[" + interval[:-1] + " Weeks]"
elif interval[-1] == "o":
p = "[" + interval[:-2] + " Months]"
elif interval[-1] == "y":
p = "[" + interval[:-1] + " Years]"
if p[1:3] == "1 ":
p = p.replace("s", "")
else:
if end_date == "":
end_ = date.today()
if end_.weekday() >= 5:
end_ = end_ + relativedelta(weekday=FR(-1))
end_date = end_.strftime("%Y-%m-%d")
p = "[From " + start_date + " to " + end_date + "]"
return p
@log_start_end(log=logger)
def get_equal_weights(
symbols: List[str],
**kwargs,
) -> Union[Tuple[Dict[str, float], pd.DataFrame], None]:
"""Equally weighted portfolio, where weight = 1/# of symbols
Parameters
----------
symbols : List[str]
List of symbols to be included in the portfolio
interval : str, optional
Interval to get data, by default "3y"
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns: bool, optional
If True use log returns, else arithmetic returns, by default False
freq: str, optional
Frequency of returns, by default "D". Options: "D" for daily, "W" for weekly, "M" for monthly
maxnan: float
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float
Value used to replace outliers that are higher than threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate. Returns percentages if set to 1.
Returns
-------
Union[Tuple[Dict[str, float], pd.DataFrame], None]
Dictionary of weights where keys are the tickers, dataframe of stock returns
"""
interval = get_kwarg("interval", kwargs)
start_date = get_kwarg("start_date", kwargs)
end_date = get_kwarg("end_date", kwargs)
log_returns = get_kwarg("log_returns", kwargs)
freq = get_kwarg("freq", kwargs)
maxnan = get_kwarg("maxnan", kwargs)
threshold = get_kwarg("threshold", kwargs)
method = get_kwarg("method", kwargs)
value = get_kwarg("value", kwargs)
stock_prices = yahoo_finance_model.process_stocks(
symbols, interval, start_date, end_date
)
stock_returns = yahoo_finance_model.process_returns(
stock_prices,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
)
weights = {stock: value * round(1 / len(symbols), 5) for stock in symbols}
return weights, stock_returns
@log_start_end(log=logger)
def get_property_weights(
symbols: List[str],
**kwargs,
) -> Tuple[Optional[Dict[str, Any]], Optional[pd.DataFrame]]:
"""Calculate portfolio weights based on selected property
Parameters
----------
symbols : List[str]
List of portfolio stocks
interval : str, optional
interval to get stock data, by default "3mo"
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
s_property : str
Property to weight portfolio by
value : float, optional
Amount of money to allocate
Returns
-------
Tuple[Dict[str, Any], pd.DataFrame]
Dictionary of portfolio weights or allocations
"""
interval = get_kwarg("interval", kwargs)
start_date = get_kwarg("start_date", kwargs)
end_date = get_kwarg("end_date", kwargs)
log_returns = get_kwarg("log_returns", kwargs)
freq = get_kwarg("freq", kwargs)
maxnan = get_kwarg("maxnan", kwargs)
threshold = get_kwarg("threshold", kwargs)
method = get_kwarg("method", kwargs)
value = get_kwarg("value", kwargs)
s_property = get_kwarg("s_property", kwargs, default="marketCap")
stock_prices = yahoo_finance_model.process_stocks(
symbols, interval, start_date, end_date
)
stock_returns = yahoo_finance_model.process_returns(
stock_prices,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
)
prop = {}
prop_sum = 0
for stock in symbols:
stock_prop = yf.Ticker(stock).info[s_property]
if stock_prop is None:
stock_prop = 0
prop[stock] = stock_prop
prop_sum += stock_prop
if prop_sum == 0:
console.print(f"No {s_property} was found on list of tickers provided", "\n")
return None, None
weights = {k: value * v / prop_sum for k, v in prop.items()}
return weights, stock_returns
@log_start_end(log=logger)
def get_mean_risk_portfolio(
symbols: List[str],
**kwargs,
) -> Tuple[Optional[dict], pd.DataFrame]:
"""Builds a mean risk optimal portfolio
Parameters
----------
symbols : List[str]
List of portfolio stocks
interval : str, optional
interval to get stock data, by default "3mo"
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
objective: str
Objective function of the optimization model.
The default is 'Sharpe'. Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'MaxRet': Maximize the expected return of the portfolio.
risk_measure: str, optional
The risk measure used to optimize the portfolio.
The default is 'MV'. Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in annual frequency. Used for
'FLPM' and 'SLPM' and Sharpe objective function. The default is 0.
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function.
The default is 1.
alpha: float, optional
Significance level of CVaR, EVaR, CDaR and EDaR
target_return: float, optional
Constraint on minimum level of portfolio's return.
target_risk: float, optional
Constraint on maximum level of portfolio's risk.
mean: str, optional
The method used to estimate the expected returns.
The default value is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount of money to allocate. The default is 1.
value_short : float, optional
Amount to allocate to portfolio in short positions. The default is 0.
Returns
-------
Tuple[Optional[dict], pd.DataFrame]
Dictionary of portfolio weights,
DataFrame of stock returns.
"""
interval = get_kwarg("interval", kwargs)
start_date = get_kwarg("start_date", kwargs)
end_date = get_kwarg("end_date", kwargs)
log_returns = get_kwarg("log_returns", kwargs)
freq = get_kwarg("freq", kwargs)
maxnan = get_kwarg("maxnan", kwargs)
threshold = get_kwarg("threshold", kwargs)
method = get_kwarg("method", kwargs)
value = get_kwarg("value", kwargs)
value_short = get_kwarg("value_short", kwargs)
risk_measure = get_kwarg("risk_measure", kwargs)
objective = get_kwarg("objective", kwargs)
risk_free_rate = get_kwarg("risk_free_rate", kwargs)
risk_aversion = get_kwarg("risk_aversion", kwargs)
alpha = get_kwarg("alpha", kwargs)
target_return = get_kwarg("target_return", kwargs)
target_risk = get_kwarg("target_risk", kwargs)
mean = get_kwarg("mean", kwargs)
covariance = get_kwarg("covariance", kwargs)
d_ewma = get_kwarg("d_ewma", kwargs)
risk_measure = validate_risk_measure(risk_measure)
stock_prices = yahoo_finance_model.process_stocks(
symbols, interval, start_date, end_date
)
stock_returns = yahoo_finance_model.process_returns(
stock_prices,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
)
if stock_returns.empty:
console.print(
"[red]Not enough data points in range to run calculations.[/red]\n"
)
return {}, pd.DataFrame()
if stock_returns.shape[1] < 2:
console.print(
f"[red]Given the parameters could only get data for '{stock_returns.columns[0]}'.[/red]\n"
"[red]Optimization needs at least two assets.[/red]\n",
)
return {}, pd.DataFrame()
first_day = stock_returns.index[0].strftime("%Y-%m-%d")
console.print(
f"[yellow]First day of data respecting parameters: {first_day}[/yellow]\n"
)
risk_free_rate = risk_free_rate / time_factor[freq.upper()]
try:
# Building the portfolio object
port = rp.Portfolio(returns=stock_returns, alpha=alpha)
# Estimate input parameters:
port.assets_stats(method_mu=mean, method_cov=covariance, d=d_ewma)
# Budget constraints
port.upperlng = value
if value_short > 0:
port.sht = True
port.uppersht = value_short
port.budget = value - value_short
else:
port.budget = value
# Estimate optimal portfolio:
model = "Classic"
hist = True
if target_return > -1:
port.lowerret = float(target_return) / time_factor[freq.upper()]
if target_risk > -1:
if risk_measure not in ["ADD", "MDD", "CDaR", "EDaR", "UCI"]:
setattr(
port,
upper_risk[risk_measure],
float(target_risk) / time_factor[freq.upper()] ** 0.5,
)
else:
setattr(port, upper_risk[risk_measure], float(target_risk))
weights = port.optimization(
model=model,
rm=risk_measure,
obj=objective,
rf=risk_free_rate,
l=risk_aversion,
hist=hist,
)
except Exception as _:
weights = None
if weights is not None:
weights = weights.round(5)
weights = weights.squeeze().to_dict()
return weights, stock_returns
@log_start_end(log=logger)
def get_max_sharpe(
symbols: List[str],
**kwargs,
) -> Tuple[Optional[dict], pd.DataFrame]:
"""
Builds a maximal return/risk ratio portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
risk_measure: str, optional
The risk measure used to optimize the portfolio.
The default is 'MV'. Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns. Used for
'FLPM' and 'SLPM' and Sharpe objective function. The default is 0.
alpha: float, optional
Significance level of CVaR, EVaR, CDaR and EDaR
target_return: float, optional
Constraint on minimum level of portfolio's return.
target_risk: float, optional
Constraint on maximum level of portfolio's risk.
mean: str, optional
The method used to estimate the expected returns.
The default value is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
Returns
-------
Tuple[Optional[dict], pd.DataFrame]
Dictionary of portfolio weights,
DataFrame of stock returns.
"""
weights, stock_returns = get_mean_risk_portfolio(
symbols=symbols,
objective=objectives_choices["sharpe"],
**kwargs,
)
return weights, stock_returns
@log_start_end(log=logger)
def get_min_risk(
symbols: List[str],
**kwargs,
) -> Tuple[Optional[dict], pd.DataFrame]:
"""
Builds a maximal return/risk ratio portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
risk_measure: str, optional
The risk measure used to optimize the portfolio.
The default is 'MV'. Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns. Used for
'FLPM' and 'SLPM' and Sharpe objective function. The default is 0.
alpha: float, optional
Significance level of CVaR, EVaR, CDaR and EDaR
target_return: float, optional
Constraint on minimum level of portfolio's return.
target_risk: float, optional
Constraint on maximum level of portfolio's risk.
mean: str, optional
The method used to estimate the expected returns.
The default value is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
Returns
-------
Tuple[Optional[dict], pd.DataFrame]
Dictionary of portfolio weights,
DataFrame of stock returns.
"""
weights, stock_returns = get_mean_risk_portfolio(
symbols=symbols,
objective=objectives_choices["minrisk"],
**kwargs,
)
return weights, stock_returns
@log_start_end(log=logger)
def get_max_util(
symbols: List[str],
**kwargs,
) -> Tuple[Optional[dict], pd.DataFrame]:
"""
Builds a maximal return/risk ratio portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
risk_measure: str, optional
The risk measure used to optimize the portfolio.
The default is 'MV'. Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns. Used for
'FLPM' and 'SLPM' and Sharpe objective function. The default is 0.
alpha: float, optional
Significance level of CVaR, EVaR, CDaR and EDaR
target_return: float, optional
Constraint on minimum level of portfolio's return.
target_risk: float, optional
Constraint on maximum level of portfolio's risk.
mean: str, optional
The method used to estimate the expected returns.
The default value is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
Returns
-------
Tuple[Optional[dict], pd.DataFrame]
Dictionary of portfolio weights,
DataFrame of stock returns.
"""
weights, stock_returns = get_mean_risk_portfolio(
symbols=symbols,
objective=objectives_choices["utility"],
**kwargs,
)
return weights, stock_returns
@log_start_end(log=logger)
def get_max_ret(
symbols: List[str],
**kwargs,
) -> Tuple[Optional[dict], pd.DataFrame]:
"""
Builds a maximal return/risk ratio portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
risk_measure: str, optional
The risk measure used to optimize the portfolio.
The default is 'MV'. Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns. Used for
'FLPM' and 'SLPM' and Sharpe objective function. The default is 0.
alpha: float, optional
Significance level of CVaR, EVaR, CDaR and EDaR
target_return: float, optional
Constraint on minimum level of portfolio's return.
target_risk: float, optional
Constraint on maximum level of portfolio's risk.
mean: str, optional
The method used to estimate the expected returns.
The default value is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
Returns
-------
Tuple[Optional[dict], pd.DataFrame]
Dictionary of portfolio weights,
DataFrame of stock returns.
"""
weights, stock_returns = get_mean_risk_portfolio(
symbols=symbols,
objective=objectives_choices["maxret"],
**kwargs,
)
return weights, stock_returns
@log_start_end(log=logger)
def get_max_diversification_portfolio(
symbols: List[str],
**kwargs,
) -> Tuple[Optional[dict], pd.DataFrame]:
"""Builds a maximal diversification portfolio
Parameters
----------
symbols : List[str]
List of portfolio stocks
interval : str, optional
interval to get stock data, by default "3mo"
start_date: str
If not using interval, start date string (YYYY-MM-DD)
end_date: str
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount of money to allocate. The default is 1.
value_short : float, optional
Amount to allocate to portfolio in short positions. The default is 0.
Returns
-------
Tuple[Optional[dict], pd.DataFrame]
Dictionary of portfolio weights,
DataFrame of stock returns.
"""
interval = get_kwarg("interval", kwargs)
start_date = get_kwarg("start_date", kwargs)
end_date = get_kwarg("end_date", kwargs)
log_returns = get_kwarg("log_returns", kwargs)
freq = get_kwarg("freq", kwargs)
maxnan = get_kwarg("maxnan", kwargs)
threshold = get_kwarg("threshold", kwargs)
method = get_kwarg("method", kwargs)
value = get_kwarg("value", kwargs)
value_short = get_kwarg("value_short", kwargs)
covariance = get_kwarg("covariance", kwargs)
d_ewma = get_kwarg("d_ewma", kwargs)
stock_prices = yahoo_finance_model.process_stocks(
symbols, interval, start_date, end_date
)
stock_returns = yahoo_finance_model.process_returns(
stock_prices,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
)
try:
# Building the portfolio object
port = rp.Portfolio(returns=stock_returns)
# Estimate input parameters:
port.assets_stats(method_mu="hist", method_cov=covariance, d=d_ewma)
port.mu = stock_returns.std().to_frame().T
# Budget constraints
port.upperlng = value
if value_short > 0:
port.sht = True
port.uppersht = value_short
port.budget = value - value_short
else:
port.budget = value
# Estimate optimal portfolio:
weights = port.optimization(
model="Classic", rm="MV", obj="Sharpe", rf=0, hist=True
)
except Exception as _:
weights = None
if weights is not None:
weights = weights.round(5)
weights = weights.squeeze().to_dict()
return weights, stock_returns
@log_start_end(log=logger)
def get_max_decorrelation_portfolio(
symbols: List[str],
**kwargs,
) -> Tuple[Optional[dict], pd.DataFrame]:
"""Builds a maximal decorrelation portfolio
Parameters
----------
symbols : List[str]
List of portfolio stocks
interval : str, optional
interval to get stock data, by default "3mo"
start_date: str
If not using interval, start date string (YYYY-MM-DD)
end_date: str
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see s`interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount of money to allocate. The default is 1.
value_short : float, optional
Amount to allocate to portfolio in short positions. The default is 0.
Returns
-------
Tuple[Optional[dict], pd.DataFrame]
Dictionary of portfolio weights,
DataFrame of stock returns.
"""
interval = get_kwarg("interval", kwargs)
start_date = get_kwarg("start_date", kwargs)
end_date = get_kwarg("end_date", kwargs)
log_returns = get_kwarg("log_returns", kwargs)
freq = get_kwarg("freq", kwargs)
maxnan = get_kwarg("maxnan", kwargs)
threshold = get_kwarg("threshold", kwargs)
method = get_kwarg("method", kwargs)
value = get_kwarg("value", kwargs)
value_short = get_kwarg("value_short", kwargs)
covariance = get_kwarg("covariance", kwargs)
d_ewma = get_kwarg("d_ewma", kwargs)
stock_prices = yahoo_finance_model.process_stocks(
symbols, interval, start_date, end_date
)
stock_returns = yahoo_finance_model.process_returns(
stock_prices,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
)
try:
# Building the portfolio object
port = rp.Portfolio(returns=stock_returns)
# Estimate input parameters:
port.assets_stats(method_mu="hist", method_cov=covariance, d=d_ewma)
port.cov = rp.cov2corr(port.cov)
# Budget constraints
port.upperlng = value
if value_short > 0:
port.sht = True
port.uppersht = value_short
port.budget = value - value_short
else:
port.budget = value
# Estimate optimal portfolio:
weights = port.optimization(
model="Classic", rm="MV", obj="MinRisk", rf=0, hist=True
)
except Exception as _:
weights = None
if weights is not None:
weights = weights.round(5)
weights = weights.squeeze().to_dict()
return weights, stock_returns
@log_start_end(log=logger)
def get_black_litterman_portfolio(
symbols: List[str],
**kwargs,
) -> Tuple[Optional[dict], pd.DataFrame]:
"""Builds a maximal diversification portfolio
Parameters
----------
symbols : List[str]
List of portfolio stocks
benchmark : Dict
Dict of portfolio weights
p_views: List
Matrix P of views that shows relationships among assets and returns.
Default value to None.
q_views: List
Matrix Q of expected returns of views. Default value is None.
interval : str, optional
interval to get stock data, by default "3mo"
start_date: str
If not using interval, start date string (YYYY-MM-DD)
end_date: str
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
objective: str
Objective function of the optimization model.
The default is 'Sharpe'. Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'MaxRet': Maximize the expected return of the portfolio.
risk_free_rate: float, optional
Risk free rate, must be in annual frequency. The default is 0.
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function.
The default is 1.
delta: float, optional
Risk aversion factor of Black Litterman model. Default value is None.
equilibrium: bool, optional
If True excess returns are based on equilibrium market portfolio, if False
excess returns are calculated as historical returns minus risk free rate.
Default value is True.
optimize: bool, optional
If True Black Litterman estimates are used as inputs of mean variance model,
if False returns equilibrium weights from Black Litterman model
Default value is True.
value : float, optional
Amount of money to allocate. The default is 1.
value_short : float, optional
Amount to allocate to portfolio in short positions. The default is 0.
Returns
-------
Tuple[Optional[dict], pd.DataFrame]
Dictionary of portfolio weights,
DataFrame of stock returns.
"""
interval = get_kwarg("interval", kwargs)
start_date = get_kwarg("start_date", kwargs)
end_date = get_kwarg("end_date", kwargs)
log_returns = get_kwarg("log_returns", kwargs)
freq = get_kwarg("freq", kwargs)
maxnan = get_kwarg("maxnan", kwargs)
threshold = get_kwarg("threshold", kwargs)
method = get_kwarg("method", kwargs)
value = get_kwarg("value", kwargs)
value_short = get_kwarg("value_short", kwargs)
benchmark = get_kwarg("benchmark", kwargs)
p_views = get_kwarg("p_views", kwargs)
q_views = get_kwarg("q_views", kwargs)
objective = get_kwarg("objective", kwargs)
risk_free_rate = get_kwarg("risk_free_rate", kwargs)
risk_aversion = get_kwarg("risk_aversion", kwargs)
delta = get_kwarg("delta", kwargs)
equilibrium = get_kwarg("equilibrium", kwargs)
optimize = get_kwarg("optimize", kwargs)
stock_prices = yahoo_finance_model.process_stocks(
symbols, interval, start_date, end_date
)
stock_returns = yahoo_finance_model.process_returns(
stock_prices,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
)
# By theory default benchmark is market capitalization portfolio
if benchmark is None:
benchmark, _ = get_property_weights(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
s_property="marketCap",
value=value,
)
factor = time_factor[freq.upper()]
risk_free_rate = risk_free_rate / factor
mu, cov, weights = black_litterman(
stock_returns=stock_returns,
benchmark=benchmark,
p_views=p_views,
q_views=q_views,
delta=delta,
risk_free_rate=risk_free_rate,
equilibrium=equilibrium,
factor=factor,
)
weights = pd.DataFrame(weights)
if optimize:
try:
# Building the portfolio object
port = rp.Portfolio(returns=stock_returns)
# Estimate input parameters:
port.assets_stats(method_mu="hist", method_cov="hist")
port.mu_bl = pd.DataFrame(mu).T
port.cov_bl = pd.DataFrame(cov)
# Budget constraints
port.upperlng = value
if value_short > 0:
port.sht = True
port.uppersht = value_short
port.budget = value - value_short
else:
port.budget = value
# Estimate optimal portfolio:
weights = port.optimization(
model="BL",
rm="MV",
obj=objective,
rf=risk_free_rate,
l=risk_aversion,
hist=True,
)
except Exception as _:
weights = None
if weights is not None:
weights = weights.round(5)
weights = weights.squeeze().to_dict()
return weights, stock_returns
@log_start_end(log=logger)
def get_ef(
symbols: List[str],
**kwargs,
) -> Tuple[
pd.DataFrame,
pd.DataFrame,
pd.DataFrame,
pd.DataFrame,
Optional[pd.DataFrame],
NDArray[floating],
NDArray[floating],
rp.Portfolio,
]:
"""
Get efficient frontier
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
risk_measure: str, optional
The risk measure used to optimize the portfolio.
The default is 'MV'. Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns. Used for
'FLPM' and 'SLPM' and Sharpe objective function. The default is 0.
alpha: float, optional
Significance level of CVaR, EVaR, CDaR and EDaR
The default is 0.05.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
n_portfolios: int, optional
"Number of portfolios to simulate. The default value is 100.
seed: int, optional
Seed used to generate random portfolios. The default value is 123.
Returns
-------
Tuple[
pd.DataFrame,
pd.DataFrame,
pd.DataFrame,
pd.DataFrame,
Optional[pd.DataFrame],
NDArray[floating],
NDArray[floating],
rp.Portfolio,
]
Parameters to create efficient frontier:
frontier, mu, cov, stock_returns, weights, X1, Y1, port
"""
interval = get_kwarg("interval", kwargs)
start_date = get_kwarg("start_date", kwargs)
end_date = get_kwarg("end_date", kwargs)
log_returns = get_kwarg("log_returns", kwargs)
freq = get_kwarg("freq", kwargs)
maxnan = get_kwarg("maxnan", kwargs)
threshold = get_kwarg("threshold", kwargs)
method = get_kwarg("method", kwargs)
value = get_kwarg("value", kwargs)
value_short = get_kwarg("value_short", kwargs)
risk_measure = get_kwarg("risk_measure", kwargs)
risk_free_rate = get_kwarg("risk_free_rate", kwargs)
alpha = get_kwarg("alpha", kwargs)
n_portfolios = get_kwarg("n_portfolios", kwargs)
seed = get_kwarg("seed", kwargs)
risk_free_rate = risk_free_rate / time_factor[freq.upper()]
risk_measure = validate_risk_measure(risk_measure)
stock_prices = yahoo_finance_model.process_stocks(
symbols, interval, start_date, end_date
)
stock_returns = yahoo_finance_model.process_returns(
stock_prices,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
)
try:
# Building the portfolio object
port = rp.Portfolio(returns=stock_returns, alpha=alpha)
# Estimate input parameters:
port.assets_stats(method_mu="hist", method_cov="hist")
# Budget constraints
port.upperlng = value
if value_short > 0:
port.sht = True
port.uppersht = value_short
port.budget = value - value_short
else:
port.budget = value
# Estimate tangency portfolio:
weights: Optional[pd.DataFrame] = port.optimization(
model="Classic",
rm=risk_choices[risk_measure.lower()],
obj="Sharpe",
rf=risk_free_rate,
hist=True,
)
except Exception as _:
weights = None
points = 20 # Number of points of the frontier
frontier = port.efficient_frontier(
model="Classic",
rm=risk_choices[risk_measure.lower()],
points=points,
rf=risk_free_rate,
hist=True,
)
random_weights = generate_random_portfolios(
symbols=symbols,
n_portfolios=n_portfolios,
seed=seed,
)
mu = stock_returns.mean().to_frame().T
cov = stock_returns.cov()
Y = (mu @ frontier).to_numpy() * time_factor[freq.upper()]
Y = np.ravel(Y)
X = np.zeros_like(Y)
for i in range(frontier.shape[1]):
w = np.array(frontier.iloc[:, i], ndmin=2).T
risk = rp.Sharpe_Risk(
w,
cov=cov,
returns=stock_returns,
rm=risk_choices[risk_measure.lower()],
rf=risk_free_rate,
alpha=alpha,
# a_sim=a_sim,
# beta=beta,
# b_sim=b_sim,
)
X[i] = risk
if risk_choices[risk_measure.lower()] not in ["ADD", "MDD", "CDaR", "EDaR", "UCI"]:
X = X * time_factor[freq.upper()] ** 0.5
f = interp1d(X, Y, kind="quadratic")
X1 = np.linspace(X[0], X[-1], num=100)
Y1 = f(X1)
frontier = pd.concat([frontier, random_weights], axis=1)
# to delete stocks with corrupted data
frontier.drop(
frontier.tail(len(random_weights.index) - len(stock_returns.columns)).index,
inplace=True,
)
return frontier, mu, cov, stock_returns, weights, X1, Y1, port
@log_start_end(log=logger)
def get_risk_parity_portfolio(
symbols: List[str],
**kwargs,
) -> Tuple[Optional[dict], pd.DataFrame]:
"""Builds a risk parity portfolio using the risk budgeting approach
Parameters
----------
symbols : List[str]
List of portfolio stocks
interval : str, optional
interval to get stock data, by default "3mo"
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str, optional
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
risk_measure: str, optional
The risk measure used to optimize the portfolio.
The default is 'MV'. Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
risk_cont: List[str], optional
The vector of risk contribution per asset. If empty, the default is
1/n (number of assets).
risk_free_rate: float, optional
Risk free rate, must be in annual frequency. Used for
'FLPM' and 'SLPM' and Sharpe objective function. The default is 0.
alpha: float, optional
Significance level of CVaR, EVaR, CDaR and EDaR
target_return: float, optional
Constraint on minimum level of portfolio's return.
mean: str, optional
The method used to estimate the expected returns.
The default value is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount of money to allocate. The default is 1.
Returns
-------
Tuple[Optional[dict], pd.DataFrame]
Dictionary of portfolio weights,
DataFrame of stock returns.
"""
interval = get_kwarg("interval", kwargs)
start_date = get_kwarg("start_date", kwargs)
end_date = get_kwarg("end_date", kwargs)
log_returns = get_kwarg("log_returns", kwargs)
freq = get_kwarg("freq", kwargs)
maxnan = get_kwarg("maxnan", kwargs)
threshold = get_kwarg("threshold", kwargs)
method = get_kwarg("method", kwargs)
value = get_kwarg("value", kwargs)
risk_measure = get_kwarg("risk_measure", kwargs)
risk_free_rate = get_kwarg("risk_free_rate", kwargs)
alpha = get_kwarg("alpha", kwargs)
target_return = get_kwarg("target_return", kwargs)
mean = get_kwarg("mean", kwargs)
covariance = get_kwarg("covariance", kwargs)
d_ewma = get_kwarg("d_ewma", kwargs)
risk_cont = get_kwarg("risk_cont", kwargs)
stock_prices = yahoo_finance_model.process_stocks(
symbols, interval, start_date, end_date
)
stock_returns = yahoo_finance_model.process_returns(
stock_prices,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
)
risk_free_rate = risk_free_rate / time_factor[freq.upper()]
risk_measure = validate_risk_measure(risk_measure)
try:
# Building the portfolio object
port = rp.Portfolio(returns=stock_returns, alpha=alpha)
# Calculating optimal portfolio
port.assets_stats(method_mu=mean, method_cov=covariance, d=d_ewma)
# Estimate optimal portfolio:
model = "Classic"
hist = True
if risk_cont is None:
risk_cont_ = None # Risk contribution constraints vector
else:
risk_cont_ = np.array(risk_cont).reshape(1, -1)
risk_cont_ = risk_cont_ / np.sum(risk_cont_)
if target_return > -1:
port.lowerret = float(target_return) / time_factor[freq.upper()]
weights = port.rp_optimization(
model=model, rm=risk_measure, rf=risk_free_rate, b=risk_cont_, hist=hist
)
except Exception as _:
weights = None
if weights is not None:
if value > 0.0:
weights = value * weights
weights = weights.round(5)
weights = weights.squeeze().to_dict()
return weights, stock_returns
@log_start_end(log=logger)
def get_rel_risk_parity_portfolio(
symbols: List[str],
**kwargs,
) -> Tuple[Optional[dict], pd.DataFrame]:
"""Builds a relaxed risk parity portfolio using the least squares approach
Parameters
----------
symbols : List[str]
List of portfolio stocks
interval : str, optional
interval to get stock data, by default "3mo"
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str, optional
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
version : str, optional
Relaxed risk parity model version. The default is 'A'.
Possible values are:
- 'A': without regularization and penalization constraints.
- 'B': with regularization constraint but without penalization constraint.
- 'C': with regularization and penalization constraints.
risk_cont: List[str], optional
The vector of risk contribution per asset. If empty, the default is
1/n (number of assets).
penal_factor: float, optional
The penalization factor of penalization constraints. Only used with
version 'C'. The default is 1.
target_return: float, optional
Constraint on minimum level of portfolio's return.
mean: str, optional
The method used to estimate the expected returns.
The default value is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount of money to allocate. The default is 1.
Returns
-------
Tuple[Optional[dict], pd.DataFrame]
Dictionary of portfolio weights,
DataFrame of stock returns.
"""
interval = get_kwarg("interval", kwargs)
start_date = get_kwarg("start_date", kwargs)
end_date = get_kwarg("end_date", kwargs)
log_returns = get_kwarg("log_returns", kwargs)
freq = get_kwarg("freq", kwargs)
maxnan = get_kwarg("maxnan", kwargs)
threshold = get_kwarg("threshold", kwargs)
method = get_kwarg("method", kwargs)
value = get_kwarg("value", kwargs)
target_return = get_kwarg("target_return", kwargs)
mean = get_kwarg("mean", kwargs)
covariance = get_kwarg("covariance", kwargs)
d_ewma = get_kwarg("d_ewma", kwargs)
risk_cont = get_kwarg("risk_cont", kwargs)
version = get_kwarg("version", kwargs)
penal_factor = get_kwarg("penal_factor", kwargs)
stock_prices = yahoo_finance_model.process_stocks(
symbols, interval, start_date, end_date
)
stock_returns = yahoo_finance_model.process_returns(
stock_prices,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
)
try:
# Building the portfolio object
port = rp.Portfolio(returns=stock_returns)
# Calculating optimal portfolio
port.assets_stats(method_mu=mean, method_cov=covariance, d=d_ewma)
# Estimate optimal portfolio:
model = "Classic"
hist = True
if risk_cont is None:
risk_cont_ = None # Risk contribution constraints vector
else:
risk_cont_ = np.array(risk_cont).reshape(1, -1)
risk_cont_ = risk_cont_ / np.sum(risk_cont_)
if target_return > -1:
port.lowerret = float(target_return) / time_factor[freq.upper()]
weights = port.rrp_optimization(
model=model, version=version, l=penal_factor, b=risk_cont_, hist=hist
)
except Exception as _:
weights = None
if weights is not None:
if value > 0.0:
weights = value * weights
weights = weights.round(5)
weights = weights.squeeze().to_dict()
return weights, stock_returns
@log_start_end(log=logger)
def get_hcp_portfolio(
symbols: List[str],
**kwargs,
) -> Tuple[Optional[dict], pd.DataFrame]:
"""Builds hierarchical clustering based portfolios
Parameters
----------
symbols : List[str]
List of portfolio stocks
interval : str, optional
interval to get stock data, by default "3mo"
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str, optional
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
model: str, optional
The hierarchical cluster portfolio model used for optimize the
portfolio. The default is 'HRP'. Possible values are:
- 'HRP': Hierarchical Risk Parity.
- 'HERC': Hierarchical Equal Risk Contribution.
- 'NCO': Nested Clustered Optimization.
codependence: str, optional
The codependence or similarity matrix used to build the distance
metric and clusters. The default is 'pearson'. Possible values are:
- 'pearson': pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{pearson}_{i,j})}
- 'spearman': spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{spearman}_{i,j})}
- 'abs_pearson': absolute value pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{pearson}_{i,j}|)}
- 'abs_spearman': absolute value spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{spearman}_{i,j}|)}
- 'distance': distance correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-\\rho^{distance}_{i,j})}
- 'mutual_info': mutual information matrix. Distance used is variation information matrix.
- 'tail': lower tail dependence index matrix. Dissimilarity formula:
.. math:: D_{i,j} = -\\log{\\lambda_{i,j}}.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `c-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `c-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `c-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `c-MLforAM`.
objective: str, optional
Objective function used by the NCO model.
The default is 'MinRisk'. Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'ERC': Equally risk contribution portfolio of the selected risk measure.
risk_measure: str, optional
The risk measure used to optimize the portfolio. If model is 'NCO',
the risk measures available depends on the objective function.
The default is 'MV'. Possible values are:
- 'MV': Variance.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'TG': Tail Gini.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization (Minimax).
- 'RG': Range of returns.
- 'CVRG': CVaR range of returns.
- 'TGRG': Tail Gini range of returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'DaR': Drawdown at Risk of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).
- 'ADD_Rel': Average Drawdown of compounded cumulative returns.
- 'DaR_Rel': Drawdown at Risk of compounded cumulative returns.
- 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.
- 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.
- 'UCI_Rel': Ulcer Index of compounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in annual frequency.
Used for 'FLPM' and 'SLPM'. The default is 0.
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function.
The default is 1.
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses.
The default is 0.05.
a_sim: float, optional
Number of CVaRs used to approximate Tail Gini of losses. The default is 100.
beta: float, optional
Significance level of CVaR and Tail Gini of gains. If None it duplicates alpha value.
The default is None.
b_sim: float, optional
Number of CVaRs used to approximate Tail Gini of gains. If None it duplicates a_sim value.
The default is None.
linkage: str, optional
Linkage method of hierarchical clustering. For more information see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.
cluster.hierarchy.linkage.html?highlight=linkage#scipy.cluster.hierarchy.linkage>`__.
The default is 'single'. Possible values are:
- 'single'.
- 'complete'.
- 'average'.
- 'weighted'.
- 'centroid'.
- 'median'.
- 'ward'.
- 'dbht': Direct Bubble Hierarchical Tree.
k: int, optional
Number of clusters. This value is took instead of the optimal number
of clusters calculated with the two difference gap statistic.
The default is None.
max_k: int, optional
Max number of clusters used by the two difference gap statistic
to find the optimal number of clusters. The default is 10.
bins_info: str, optional
Number of bins used to calculate variation of information. The default
value is 'KN'. Possible values are:
- 'KN': Knuth's choice method. For more information see `knuth_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.knuth_bin_width.html>`__.
- 'FD': Freedman–Diaconis' choice method. For more information see `freedman_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.freedman_bin_width.html>`__.
- 'SC': Scotts' choice method. For more information see `scott_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.scott_bin_width.html>`__.
- 'HGR': Hacine-Gharbi and Ravier' choice method.
alpha_tail: float, optional
Significance level for lower tail dependence index. The default is 0.05.
leaf_order: bool, optional
Indicates if the cluster are ordered so that the distance between
successive leaves is minimal. The default is True.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount of money to allocate. The default is 1.
Returns
-------
Tuple[Optional[dict], pd.DataFrame]
Dictionary of portfolio weights,
DataFrame of stock returns.
"""
interval = get_kwarg("interval", kwargs)
start_date = get_kwarg("start_date", kwargs)
end_date = get_kwarg("end_date", kwargs)
log_returns = get_kwarg("log_returns", kwargs)
freq = get_kwarg("freq", kwargs)
maxnan = get_kwarg("maxnan", kwargs)
threshold = get_kwarg("threshold", kwargs)
method = get_kwarg("method", kwargs)
value = get_kwarg("value", kwargs)
objective = get_kwarg("objective", kwargs)
risk_measure = get_kwarg("risk_measure", kwargs)
risk_free_rate = get_kwarg("risk_free_rate", kwargs)
risk_aversion = get_kwarg("risk_aversion", kwargs)
alpha = get_kwarg("alpha", kwargs)
a_sim = get_kwarg("a_sim", kwargs)
beta = get_kwarg("beta", kwargs)
b_sim = get_kwarg("b_sim", kwargs)
covariance = get_kwarg("covariance", kwargs)
d_ewma = get_kwarg("d_ewma", kwargs)
model = get_kwarg("model", kwargs, default="HRP")
codependence = get_kwarg("codependence", kwargs)
linkage = get_kwarg("linkage", kwargs)
k = get_kwarg("k", kwargs)
max_k = get_kwarg("max_k", kwargs)
bins_info = get_kwarg("bins_info", kwargs)
alpha_tail = get_kwarg("alpha_tail", kwargs)
leaf_order = get_kwarg("leaf_order", kwargs)
risk_measure = validate_risk_measure(risk_measure)
stock_prices = yahoo_finance_model.process_stocks(
symbols, interval, start_date, end_date
)
stock_returns = yahoo_finance_model.process_returns(
stock_prices,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
)
if linkage == "dbht":
linkage = linkage.upper()
risk_free_rate = risk_free_rate / time_factor[freq.upper()]
try:
# Building the portfolio object
port = rp.HCPortfolio(
returns=stock_returns,
alpha=alpha,
a_sim=a_sim,
beta=beta,
b_sim=b_sim,
)
weights = port.optimization(
model=model,
codependence=codependence,
covariance=covariance,
obj=objective,
rm=risk_measure,
rf=risk_free_rate,
l=risk_aversion,
linkage=linkage,
k=k,
max_k=max_k,
bins_info=bins_info,
alpha_tail=alpha_tail,
leaf_order=leaf_order,
d=d_ewma,
)
except Exception as _:
weights = None
if weights is not None:
if value > 0.0:
weights = value * weights
weights = weights.round(5)
weights = weights.squeeze().to_dict()
return weights, stock_returns
@log_start_end(log=logger)
def get_hrp(
symbols: List[str],
**kwargs,
) -> Tuple[Optional[dict], pd.DataFrame]:
"""
Builds a hierarchical risk parity portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str, optional
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
codependence: str, optional
The codependence or similarity matrix used to build the distance
metric and clusters. The default is 'pearson'. Possible values are:
- 'pearson': pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{pearson}_{i,j})}
- 'spearman': spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{spearman}_{i,j})}
- 'abs_pearson': absolute value pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{pearson}_{i,j}|)}
- 'abs_spearman': absolute value spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{spearman}_{i,j}|)}
- 'distance': distance correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-\\rho^{distance}_{i,j})}
- 'mutual_info': mutual information matrix. Distance used is variation information matrix.
- 'tail': lower tail dependence index matrix. Dissimilarity formula:
.. math:: D_{i,j} = -\\log{\\lambda_{i,j}}.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `c-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `c-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `c-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `c-MLforAM`.
objective: str, optional
Objective function used by the NCO model.
The default is 'MinRisk'. Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'ERC': Equally risk contribution portfolio of the selected risk measure.
risk_measure: str, optional
The risk measure used to optimize the portfolio. If model is 'NCO',
the risk measures available depends on the objective function.
The default is 'MV'. Possible values are:
- 'MV': Variance.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'TG': Tail Gini.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization (Minimax).
- 'RG': Range of returns.
- 'CVRG': CVaR range of returns.
- 'TGRG': Tail Gini range of returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'DaR': Drawdown at Risk of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).
- 'ADD_Rel': Average Drawdown of compounded cumulative returns.
- 'DaR_Rel': Drawdown at Risk of compounded cumulative returns.
- 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.
- 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.
- 'UCI_Rel': Ulcer Index of compounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns.
Used for 'FLPM' and 'SLPM'. The default is 0.
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function.
The default is 1.
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses.
The default is 0.05.
a_sim: float, optional
Number of CVaRs used to approximate Tail Gini of losses. The default is 100.
beta: float, optional
Significance level of CVaR and Tail Gini of gains. If None it duplicates alpha value.
The default is None.
b_sim: float, optional
Number of CVaRs used to approximate Tail Gini of gains. If None it duplicates a_sim value.
The default is None.
linkage: str, optional
Linkage method of hierarchical clustering. For more information see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html>`__.
The default is 'single'. Possible values are:
- 'single'.
- 'complete'.
- 'average'.
- 'weighted'.
- 'centroid'.
- 'median'.
- 'ward'.
- 'dbht': Direct Bubble Hierarchical Tree.
k: int, optional
Number of clusters. This value is took instead of the optimal number
of clusters calculated with the two difference gap statistic.
The default is None.
max_k: int, optional
Max number of clusters used by the two difference gap statistic
to find the optimal number of clusters. The default is 10.
bins_info: str, optional
Number of bins used to calculate variation of information. The default
value is 'KN'. Possible values are:
- 'KN': Knuth's choice method. For more information see `knuth_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.knuth_bin_width.html>`__.
- 'FD': Freedman–Diaconis' choice method. For more information see `freedman_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.freedman_bin_width.html>`__.
- 'SC': Scotts' choice method. For more information see `scott_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.scott_bin_width.html>`__.
- 'HGR': Hacine-Gharbi and Ravier' choice method.
alpha_tail: float, optional
Significance level for lower tail dependence index. The default is 0.05.
leaf_order: bool, optional
Indicates if the cluster are ordered so that the distance between
successive leaves is minimal. The default is True.
d: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
table: bool, optional
True if plot table weights, by default False
Returns
-------
Tuple[Optional[dict], pd.DataFrame]
Dictionary of portfolio weights,
DataFrame of stock returns.
"""
weights, stock_returns = get_hcp_portfolio(
symbols=symbols,
model="HRP",
**kwargs,
)
return weights, stock_returns
@log_start_end(log=logger)
def get_herc(
symbols: List[str],
**kwargs,
) -> Tuple[Optional[dict], pd.DataFrame]:
"""
Builds a hierarchical risk parity portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str, optional
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
codependence: str, optional
The codependence or similarity matrix used to build the distance
metric and clusters. The default is 'pearson'. Possible values are:
- 'pearson': pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{pearson}_{i,j})}
- 'spearman': spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{spearman}_{i,j})}
- 'abs_pearson': absolute value pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{pearson}_{i,j}|)}
- 'abs_spearman': absolute value spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{spearman}_{i,j}|)}
- 'distance': distance correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-\\rho^{distance}_{i,j})}
- 'mutual_info': mutual information matrix. Distance used is variation information matrix.
- 'tail': lower tail dependence index matrix. Dissimilarity formula:
.. math:: D_{i,j} = -\\log{\\lambda_{i,j}}.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `c-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `c-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `c-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `c-MLforAM`.
objective: str, optional
Objective function used by the NCO model.
The default is 'MinRisk'. Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'ERC': Equally risk contribution portfolio of the selected risk measure.
risk_measure: str, optional
The risk measure used to optimize the portfolio. If model is 'NCO',
the risk measures available depends on the objective function.
The default is 'MV'. Possible values are:
- 'MV': Variance.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'TG': Tail Gini.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization (Minimax).
- 'RG': Range of returns.
- 'CVRG': CVaR range of returns.
- 'TGRG': Tail Gini range of returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'DaR': Drawdown at Risk of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).
- 'ADD_Rel': Average Drawdown of compounded cumulative returns.
- 'DaR_Rel': Drawdown at Risk of compounded cumulative returns.
- 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.
- 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.
- 'UCI_Rel': Ulcer Index of compounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns.
Used for 'FLPM' and 'SLPM'. The default is 0.
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function.
The default is 1.
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses.
The default is 0.05.
a_sim: float, optional
Number of CVaRs used to approximate Tail Gini of losses. The default is 100.
beta: float, optional
Significance level of CVaR and Tail Gini of gains. If None it duplicates alpha value.
The default is None.
b_sim: float, optional
Number of CVaRs used to approximate Tail Gini of gains. If None it duplicates a_sim value.
The default is None.
linkage: str, optional
Linkage method of hierarchical clustering. For more information see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html>`__.
The default is 'single'. Possible values are:
- 'single'.
- 'complete'.
- 'average'.
- 'weighted'.
- 'centroid'.
- 'median'.
- 'ward'.
- 'dbht': Direct Bubble Hierarchical Tree.
k: int, optional
Number of clusters. This value is took instead of the optimal number
of clusters calculated with the two difference gap statistic.
The default is None.
max_k: int, optional
Max number of clusters used by the two difference gap statistic
to find the optimal number of clusters. The default is 10.
bins_info: str, optional
Number of bins used to calculate variation of information. The default
value is 'KN'. Possible values are:
- 'KN': Knuth's choice method. For more information see `knuth_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.knuth_bin_width.html>`__.
- 'FD': Freedman–Diaconis' choice method. For more information see `freedman_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.freedman_bin_width.html>`__.
- 'SC': Scotts' choice method. For more information see `scott_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.scott_bin_width.html>`__.
- 'HGR': Hacine-Gharbi and Ravier' choice method.
alpha_tail: float, optional
Significance level for lower tail dependence index. The default is 0.05.
leaf_order: bool, optional
Indicates if the cluster are ordered so that the distance between
successive leaves is minimal. The default is True.
d: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
table: bool, optional
True if plot table weights, by default False
Returns
-------
Tuple[Optional[dict], pd.DataFrame]
Dictionary of portfolio weights,
DataFrame of stock returns.
"""
weights, stock_returns = get_hcp_portfolio(
symbols=symbols,
model="HERC",
**kwargs,
)
return weights, stock_returns
@log_start_end(log=logger)
def get_nco(
symbols: List[str],
**kwargs,
) -> Tuple[Optional[dict], pd.DataFrame]:
"""
Builds a hierarchical risk parity portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str, optional
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
codependence: str, optional
The codependence or similarity matrix used to build the distance
metric and clusters. The default is 'pearson'. Possible values are:
- 'pearson': pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{pearson}_{i,j})}
- 'spearman': spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{spearman}_{i,j})}
- 'abs_pearson': absolute value pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{pearson}_{i,j}|)}
- 'abs_spearman': absolute value spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{spearman}_{i,j}|)}
- 'distance': distance correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-\\rho^{distance}_{i,j})}
- 'mutual_info': mutual information matrix. Distance used is variation information matrix.
- 'tail': lower tail dependence index matrix. Dissimilarity formula:
.. math:: D_{i,j} = -\\log{\\lambda_{i,j}}.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `c-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `c-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `c-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `c-MLforAM`.
objective: str, optional
Objective function used by the NCO model.
The default is 'MinRisk'. Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'ERC': Equally risk contribution portfolio of the selected risk measure.
risk_measure: str, optional
The risk measure used to optimize the portfolio. If model is 'NCO',
the risk measures available depends on the objective function.
The default is 'MV'. Possible values are:
- 'MV': Variance.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'TG': Tail Gini.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization (Minimax).
- 'RG': Range of returns.
- 'CVRG': CVaR range of returns.
- 'TGRG': Tail Gini range of returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'DaR': Drawdown at Risk of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).
- 'ADD_Rel': Average Drawdown of compounded cumulative returns.
- 'DaR_Rel': Drawdown at Risk of compounded cumulative returns.
- 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.
- 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.
- 'UCI_Rel': Ulcer Index of compounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns.
Used for 'FLPM' and 'SLPM'. The default is 0.
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function.
The default is 1.
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses.
The default is 0.05.
a_sim: float, optional
Number of CVaRs used to approximate Tail Gini of losses. The default is 100.
beta: float, optional
Significance level of CVaR and Tail Gini of gains. If None it duplicates alpha value.
The default is None.
b_sim: float, optional
Number of CVaRs used to approximate Tail Gini of gains. If None it duplicates a_sim value.
The default is None.
linkage: str, optional
Linkage method of hierarchical clustering. For more information see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html>`__.
The default is 'single'. Possible values are:
- 'single'.
- 'complete'.
- 'average'.
- 'weighted'.
- 'centroid'.
- 'median'.
- 'ward'.
- 'dbht': Direct Bubble Hierarchical Tree.
k: int, optional
Number of clusters. This value is took instead of the optimal number
of clusters calculated with the two difference gap statistic.
The default is None.
max_k: int, optional
Max number of clusters used by the two difference gap statistic
to find the optimal number of clusters. The default is 10.
bins_info: str, optional
Number of bins used to calculate variation of information. The default
value is 'KN'. Possible values are:
- 'KN': Knuth's choice method. For more information see `knuth_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.knuth_bin_width.html>`__.
- 'FD': Freedman–Diaconis' choice method. For more information see `freedman_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.freedman_bin_width.html>`__.
- 'SC': Scotts' choice method. For more information see `scott_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.scott_bin_width.html>`__.
- 'HGR': Hacine-Gharbi and Ravier' choice method.
alpha_tail: float, optional
Significance level for lower tail dependence index. The default is 0.05.
leaf_order: bool, optional
Indicates if the cluster are ordered so that the distance between
successive leaves is minimal. The default is True.
d: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
table: bool, optional
True if plot table weights, by default False
Returns
-------
Tuple[Optional[dict], pd.DataFrame]
Dictionary of portfolio weights,
DataFrame of stock returns.
"""
weights, stock_returns = get_hcp_portfolio(
symbols=symbols,
model="NCO",
**kwargs,
)
return weights, stock_returns
@log_start_end(log=logger)
def black_litterman(
stock_returns: pd.DataFrame,
benchmark,
**kwargs,
) -> Tuple[dict, dict, dict]:
"""
Calculates Black-Litterman estimates following He and Litterman (1999)
Parameters
----------
stock_returns: pd.DataFrame
_description_
benchmark: Dict
Dict of portfolio weights
p_views: List
Matrix P of views that shows relationships among assets and returns.
Default value to None.
q_views: List
Matrix Q of expected returns of views in annual frequency. Default value is None.
delta: float
Risk aversion factor. Default value is None.
risk_free_rate: float, optional
Risk free rate, must be in annual frequency. Default value is 0.
equilibrium: bool, optional
If True excess returns are based on equilibrium market portfolio, if False
excess returns are calculated as historical returns minus risk free rate.
Default value is True.
factor: int
The time factor
Returns
-------
Tuple[dict, dict, dict]
Black-Litterman model estimates of expected returns,
Covariance matrix,
Portfolio weights.
"""
p_views = get_kwarg("p_views", kwargs)
q_views = get_kwarg("q_views", kwargs)
delta = get_kwarg("delta", kwargs)
risk_free_rate = get_kwarg("risk_free_rate", kwargs)
equilibrium = get_kwarg("equilibrium", kwargs)
factor = get_kwarg("factor", kwargs, default=252)
symbols = stock_returns.columns.tolist()
benchmark = pd.Series(benchmark).to_numpy().reshape(-1, 1)
mu = stock_returns.mean().to_numpy().reshape(-1, 1)
S = stock_returns.cov().to_numpy()
if delta is None:
a = mu.T @ benchmark
delta = (a - risk_free_rate) / (benchmark.T @ S @ benchmark)
delta = delta.item()
if equilibrium:
PI_eq = delta * (S @ benchmark)
else:
PI_eq = mu - risk_free_rate
flag = False
if p_views is None or q_views is None:
p_views = np.identity(S.shape[0])
q_views = PI_eq
flag = True
else:
p_views = np.array(p_views, dtype=float)
q_views = np.array(q_views, dtype=float).reshape(-1, 1) / factor
tau = 1 / stock_returns.shape[0]
Omega = np.diag(np.diag(p_views @ (tau * S) @ p_views.T))
PI = np.linalg.inv(
np.linalg.inv(tau * S) + p_views.T @ np.linalg.inv(Omega) @ p_views
) @ (np.linalg.inv(tau * S) @ PI_eq + p_views.T @ np.linalg.inv(Omega) @ q_views)
if flag:
n, m = S.shape
M = np.zeros([n, m])
else:
M = np.linalg.inv(
np.linalg.inv(tau * S) + p_views.T @ np.linalg.inv(Omega) @ p_views
)
mu = PI + risk_free_rate
cov = S + M
weights = np.linalg.inv(delta * cov) @ PI
mu = pd.DataFrame(mu, index=symbols).to_dict()
cov = pd.DataFrame(cov, index=symbols, columns=symbols).to_dict()
weights = pd.DataFrame(weights, index=symbols).to_dict()
return mu, cov, weights
@log_start_end(log=logger)
def generate_random_portfolios(
symbols: List[str],
n_portfolios: int = 100,
seed: int = 123,
value: float = 1.0,
) -> pd.DataFrame:
"""Build random portfolios
Parameters
----------
symbols : List[str]
List of portfolio stocks
n_portfolios: int, optional
"Number of portfolios to simulate. The default value is 100.
seed: int, optional
Seed used to generate random portfolios. The default value is 123.
value : float, optional
Amount of money to allocate. The default is 1.
"""
assets = symbols.copy()
# Generate random portfolios
n_samples = int(n_portfolios / 3)
rs = np.random.RandomState(seed=seed)
# Equal probability for each asset
w1 = rs.dirichlet(np.ones(len(assets)), n_samples)
# More concentrated
w2 = rs.dirichlet(np.ones(len(assets)) * 0.65, n_samples)
# More diversified
w3 = rs.dirichlet(np.ones(len(assets)) * 2, n_samples)
# Each individual asset
w4 = np.identity(len(assets))
w = np.concatenate((w1, w2, w3, w4), axis=0)
w = pd.DataFrame(w, columns=assets).T
if value > 0.0:
w = value * w
return w
@log_start_end(log=logger)
def get_properties() -> List[str]:
"""Get properties to use on property optimization.
Returns
-------
List[str]:
List of available properties to use on property optimization.
"""
return valid_property_infos
@log_start_end(log=logger)
def get_categories(
weights: dict, categories: dict, column: str = "ASSET_CLASS"
) -> pd.DataFrame:
"""Get categories from dictionary
Parameters
----------
weights : dict
Dictionary with weights
categories: dict
Dictionary with categories
column : str, optional
Column name to use on categories, by default "ASSET_CLASS"
Returns
-------
pd.DataFrame
DataFrame with weights
"""
if not weights:
return pd.DataFrame()
if column == "CURRENT_INVESTED_AMOUNT":
return pd.DataFrame()
df = pd.DataFrame.from_dict(
data=weights, orient="index", columns=["value"], dtype=float
)
categories_df = pd.DataFrame.from_dict(data=categories, dtype=float)
categories_df = df.join(categories_df)
categories_df.set_index(column, inplace=True)
categories_df.groupby(level=0).sum()
df = pd.pivot_table(
categories_df,
values=["value", "CURRENT_INVESTED_AMOUNT"],
index=["CURRENCY", column],
aggfunc=np.sum,
)
df["CURRENT_WEIGHTS"] = (
df["CURRENT_INVESTED_AMOUNT"].groupby(level=0).transform(lambda x: x / sum(x))
)
df["value"] = df["value"].groupby(level=0).transform(lambda x: x / sum(x))
df = pd.concat(
[d.append(d.sum().rename((k, "TOTAL " + k))) for k, d in df.groupby(level=0)]
)
df = df.iloc[:, [0, 2, 1]]
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_optimization/optimizer_model.py | 0.889757 | 0.34859 | optimizer_model.py | pypi |
from typing import Dict, List, Tuple
import pandas as pd
from openbb_terminal.portfolio.portfolio_optimization import (
excel_model,
optimizer_model,
optimizer_helper,
)
from openbb_terminal.portfolio.portfolio_optimization.parameters import params_view
from openbb_terminal.rich_config import console
class PoEngine:
"""Portfolio Optimization Engine"""
def __init__(
self,
symbols_categories: Dict[str, Dict[str, str]] = None,
symbols_file_path: str = None,
parameters_file_path: str = None,
):
"""Initialize the engine
Parameters
----------
symbols_categories : Dict[str, float], optional
Categories, by default None
symbols_file_path : str, optional
Symbols file path, by default None
parameters_file_path : str, optional
Parameters file path, by default None
"""
self._categories: Dict[str, Dict[str, str]] = {}
self._symbols: List[str] = []
self._weights: Dict[str, float] = {}
self._returns: pd.DataFrame = None
self._params: Dict[str, float] = {}
self._current_model: str
if symbols_categories is not None:
self._symbols, self._categories = PoEngine.__parse_dictionary(
symbols_categories
)
elif symbols_file_path is not None:
self._symbols, self._categories = excel_model.load_allocation(
symbols_file_path
)
else:
raise ValueError("symbols or file_path must be provided")
if parameters_file_path is not None:
self._params, self._current_model = params_view.load_file(
parameters_file_path
)
@staticmethod
def __parse_dictionary(
symbols_categories: Dict[str, Dict[str, str]]
) -> Tuple[List[str], Dict[str, Dict[str, str]]]:
"""Parse the categories dictionary
Parameters
----------
symbols_categories : Dict[str, Dict[str, str]]
Categories
Returns
-------
Tuple[List[str], Dict[str, Dict[str, str]]]
Symbols and categories
"""
if isinstance(symbols_categories, dict):
symbols = PoEngine.__get_symbols_from_categories(symbols_categories)
else:
raise TypeError("'symbols_categories' must be a dictionary.")
for symbol in symbols:
symbols_categories["CURRENCY"].setdefault(symbol, "USD")
symbols_categories["CURRENT_INVESTED_AMOUNT"].setdefault(symbol, "0")
return symbols, symbols_categories
@staticmethod
def __get_symbols_from_categories(
symbols_categories: Dict[str, Dict[str, str]]
) -> List[str]:
"""Get the symbols from the categories dictionary
Parameters
----------
symbols_categories : Dict[str, Dict[str, str]], optional
Categories
Returns
-------
List[str]
List of symbols
"""
try:
symbols = []
for item in symbols_categories.items():
_, values = item
for v in values.keys():
symbols.append(v)
return list(set(symbols))
except Exception:
console.print(
"Unsupported dictionary format. See `portfolio.po.load` examples for correct format."
)
return []
def get_symbols(self):
return self._symbols
def get_available_categories(self) -> List[str]:
"""Get the available categories
Returns
-------
List[str]
Available categories
"""
available_categories = list(self._categories.keys())
if "CURRENT_INVESTED_AMOUNT" in available_categories:
available_categories.remove("CURRENT_INVESTED_AMOUNT")
return available_categories
def get_category(self, category: str = None) -> Dict[str, str]:
"""Get the category
Parameters
----------
category : str, optional
Category, by default None
Returns
-------
Dict[str, str]
Category
"""
if category is None:
console.print("No category provided. Please provide a category.")
return {}
d = self.get_categories_dict()
if category in d:
return d[category]
return {}
def get_categories_dict(self) -> Dict[str, Dict[str, str]]:
"""Get the categories
Returns
-------
Dict[str, Dict[str, str]]
Categories
"""
if not self._categories:
console.print("No categories found. Use 'load' to load a file.")
return {}
return self._categories
def get_category_df(self, category: str = None) -> pd.DataFrame:
"""Get the category df
Returns
-------
pd.DataFrame
Category DataFrame
"""
if category is None:
console.print("No category provided. Please provide a category.")
return pd.DataFrame()
if not self._categories:
console.print("No categories found. Use 'load' to load a file.")
return pd.DataFrame()
return optimizer_model.get_categories(
weights=self._weights, categories=self._categories, column=category
)
def set_weights(self, weights: Dict[str, float]):
"""Set the weights
Parameters
----------
weights : Dict[str, float]
Weights
"""
self._weights = weights
def get_weights(self, warning=True) -> Dict[str, float]:
"""Get the weights
Parameters
----------
warning : bool, optional
Display warning, by default True
Returns
-------
Dict[str, float]
Weights
"""
if not self._weights:
if warning:
console.print("No weights found. Please perform some optimization.")
return {}
return self._weights
def get_weights_df(self, warning=True) -> pd.DataFrame:
"""Get the weights
Parameters
----------
warning : bool, optional
Display warning, by default True
Returns
-------
pd.DataFrame
Weights
"""
if not self._weights:
if warning:
console.print("No weights found. Please perform some optimization.")
return pd.DataFrame()
return optimizer_helper.dict_to_df(self._weights)
def set_params(self, params: Dict[str, float], update=False):
"""Set the parameters
Parameters
----------
params : Dict[str, float]
Parameters
update : bool, optional
Update the current model, by default False
"""
if update:
self._params.update(params)
else:
self._params = params
def get_params(self) -> Dict:
"""Get the parameters
Returns
-------
Dict
Parameters
"""
return self._params
def set_params_from_file(self, file_path: str):
"""Set the parameters from a file
Parameters
----------
file_path : str
File path
"""
self._params, self._current_model = params_view.load_file(file_path)
def set_current_model(self, model: str):
"""Set the current model
Parameters
----------
model : str
Model
"""
self._current_model = model
def get_current_model(self) -> str:
"""Get the current model
Returns
-------
str
Current model
"""
return self._current_model
def set_returns(self, returns: pd.DataFrame):
"""Set the stock returns
Parameters
----------
returns : pd.DataFrame
Stock returns
"""
self._returns = returns
def get_returns(self) -> pd.DataFrame:
"""Get the stock returns
Returns
-------
pd.DataFrame
Stock returns
"""
if self._returns.empty:
console.print("No returns found. Please perform some optimization.")
return pd.DataFrame()
return self._returns | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_optimization/po_engine.py | 0.910585 | 0.248517 | po_engine.py | pypi |
__docformat__ = "numpy"
# pylint: disable=R0913, R0914, C0302, too-many-branches, too-many-statements, line-too-long
# flake8: noqa: E501
import logging
import math
import warnings
from datetime import date
from typing import Any, Dict, List, Optional
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import riskfolio as rp
from dateutil.relativedelta import relativedelta, FR
from matplotlib.gridspec import GridSpec
from matplotlib.lines import Line2D
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import plot_autoscale, print_rich_table
from openbb_terminal.portfolio.portfolio_optimization import (
optimizer_helper,
optimizer_model,
)
from openbb_terminal.rich_config import console
warnings.filterwarnings("ignore")
logger = logging.getLogger(__name__)
objectives_choices = {
"minrisk": "MinRisk",
"sharpe": "Sharpe",
"utility": "Utility",
"maxret": "MaxRet",
"erc": "ERC",
}
risk_names = {
"mv": "volatility",
"mad": "mean absolute deviation",
"gmd": "gini mean difference",
"msv": "semi standard deviation",
"var": "value at risk (VaR)",
"cvar": "conditional value at risk (CVaR)",
"tg": "tail gini",
"evar": "entropic value at risk (EVaR)",
"rg": "range",
"cvrg": "CVaR range",
"tgrg": "tail gini range",
"wr": "worst realization",
"flpm": "first lower partial moment",
"slpm": "second lower partial moment",
"mdd": "maximum drawdown uncompounded",
"add": "average drawdown uncompounded",
"dar": "drawdown at risk (DaR) uncompounded",
"cdar": "conditional drawdown at risk (CDaR) uncompounded",
"edar": "entropic drawdown at risk (EDaR) uncompounded",
"uci": "ulcer index uncompounded",
"mdd_rel": "maximum drawdown compounded",
"add_rel": "average drawdown compounded",
"dar_rel": "drawdown at risk (DaR) compounded",
"cdar_rel": "conditional drawdown at risk (CDaR) compounded",
"edar_rel": "entropic drawdown at risk (EDaR) compounded",
"uci_rel": "ulcer index compounded",
}
risk_choices = {
"mv": "MV",
"mad": "MAD",
"gmd": "GMD",
"msv": "MSV",
"var": "VaR",
"cvar": "CVaR",
"tg": "TG",
"evar": "EVaR",
"rg": "RG",
"cvrg": "CVRG",
"tgrg": "TGRG",
"wr": "WR",
"flpm": "FLPM",
"slpm": "SLPM",
"mdd": "MDD",
"add": "ADD",
"dar": "DaR",
"cdar": "CDaR",
"edar": "EDaR",
"uci": "UCI",
"mdd_rel": "MDD_Rel",
"add_rel": "ADD_Rel",
"dar_rel": "DaR_Rel",
"cdar_rel": "CDaR_Rel",
"edar_rel": "EDaR_Rel",
"uci_rel": "UCI_Rel",
}
time_factor = {
"D": 252.0,
"W": 52.0,
"M": 12.0,
}
dict_conversion = {"period": "historic_period", "start": "start_period"}
@log_start_end(log=logger)
def d_period(interval: str = "1y", start_date: str = "", end_date: str = ""):
"""
Builds a date range string
Parameters
----------
interval : str
interval starting today
start_date: str
If not using interval, start date string (YYYY-MM-DD)
end_date: str
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
"""
extra_choices = {
"ytd": "[Year-to-Date]",
"max": "[All-time]",
}
if start_date == "":
if interval in extra_choices:
p = extra_choices[interval]
else:
if interval[-1] == "d":
p = "[" + interval[:-1] + " Days]"
elif interval[-1] == "w":
p = "[" + interval[:-1] + " Weeks]"
elif interval[-1] == "o":
p = "[" + interval[:-2] + " Months]"
elif interval[-1] == "y":
p = "[" + interval[:-1] + " Years]"
if p[1:3] == "1 ":
p = p.replace("s", "")
else:
if end_date == "":
end_ = date.today()
if end_.weekday() >= 5:
end_ = end_ + relativedelta(weekday=FR(-1))
end_date = end_.strftime("%Y-%m-%d")
p = "[From " + start_date + " to " + end_date + "]"
return p
@log_start_end(log=logger)
def portfolio_performance(
weights: dict,
data: pd.DataFrame,
freq: str = "D",
risk_measure: str = "MV",
risk_free_rate: float = 0,
alpha: float = 0.05,
a_sim: float = 100,
beta: float = None,
b_sim: float = None,
):
"""
Prints portfolio performance indicators
Parameters
----------
weights: dict
Portfolio weights
data: pd.DataFrame
Stock returns dataframe
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
risk_measure : str, optional
The risk measure used. The default is 'MV'. Possible values are:
- 'MV': Variance.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'TG': Tail Gini.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization (Minimax).
- 'RG': Range of returns.
- 'CVRG': CVaR range of returns.
- 'TGRG': Tail Gini range of returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'DaR': Drawdown at Risk of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).
- 'ADD_Rel': Average Drawdown of compounded cumulative returns.
- 'DaR_Rel': Drawdown at Risk of compounded cumulative returns.
- 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.
- 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.
- 'UCI_Rel': Ulcer Index of compounded cumulative returns.
risk_free_rate : float, optional
risk free rate.
alpha : float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of
losses. The default is 0.05.
a_sim : float, optional
Number of CVaRs used to approximate Tail Gini of losses. The default
is 100.
beta : float, optional
Significance level of CVaR and Tail Gini of gains. If None it
duplicates alpha value. The default is None.
b_sim : float, optional
Number of CVaRs used to approximate Tail Gini of gains. If None
it duplicates a_sim value. The default is None.
"""
try:
freq = freq.upper()
weights = pd.Series(weights).to_frame()
returns = data @ weights
mu = returns.mean().item() * time_factor[freq]
sigma = returns.std().item() * time_factor[freq] ** 0.5
sharpe = (mu - risk_free_rate) / sigma
factor_1 = str(int(time_factor[freq])) + ") "
factor_2 = "√" + factor_1
print("\nAnnual (by " + factor_1 + f"expected return: {100 * mu:.2f}%")
print("Annual (by " + factor_2 + f"volatility: {100 * sigma:.2f}%")
print(f"Sharpe ratio: {sharpe:.4f}")
if risk_measure != "MV":
risk = rp.Sharpe_Risk(
weights,
cov=data.cov(),
returns=data,
rm=risk_measure,
rf=risk_free_rate,
alpha=alpha,
a_sim=a_sim,
beta=beta,
b_sim=b_sim,
)
drawdowns = [
"MDD",
"ADD",
"DaR",
"CDaR",
"EDaR",
"UCI",
"MDD_Rel",
"ADD_Rel",
"DaR_Rel",
"CDaR_Rel",
"EDaR_Rel",
"UCI_Rel",
]
if risk_measure in drawdowns:
sharpe_2 = (mu - risk_free_rate) / risk
print(
risk_names[risk_measure.lower()].capitalize()
+ " : "
+ f"{100 * risk:.2f}%"
)
else:
risk = risk * time_factor[freq] ** 0.5
sharpe_2 = (mu - risk_free_rate) / risk
print(
"Annual (by "
+ factor_2
+ risk_names[risk_measure.lower()]
+ " : "
+ f"{100 * risk:.2f}%"
)
print(
"Return / "
+ risk_names[risk_measure.lower()]
+ f" ratio: {sharpe_2:.4f}"
)
except Exception as _:
console.print(
"[red]\nFailed to calculate portfolio performance indicators.[/red]"
)
@log_start_end(log=logger)
def display_weights(weights: dict, market_neutral: bool = False):
"""
Prints weights in a nice format
Parameters
----------
weights: dict
weights to display. Keys are stocks. Values are either weights or values
market_neutral : bool
Flag indicating shorting allowed (negative weights)
"""
df = optimizer_helper.dict_to_df(weights)
if df.empty:
return
if math.isclose(df.sum()["value"], 1, rel_tol=0.5):
df["value"] = (df["value"] * 100).apply(lambda s: f"{s:.2f}") + " %"
df["value"] = (
df["value"]
.astype(str)
.apply(lambda s: " " * (8 - len(s)) + s if len(s) < 8 else "" + s)
)
else:
df["value"] = (df["value"] * 100).apply(lambda s: f"{s:.0f}") + " $"
df["value"] = (
df["value"]
.astype(str)
.apply(lambda s: " " * (16 - len(s)) + s if len(s) < 16 else "" + s)
)
if market_neutral:
tot_value = df["value"].abs().mean()
header = "Value ($)" if tot_value > 1.01 else "Value (%)"
print_rich_table(df, headers=[header], show_index=True, title="Weights")
else:
print_rich_table(df, headers=["Value"], show_index=True, title="Weights")
@log_start_end(log=logger)
def display_weights_sa(weights: dict, weights_sa: dict):
"""
Prints weights in a nice format
Parameters
----------
weights: dict
weights to display. Keys are stocks. Values are either weights or values
weights_sa: dict
weights of sensitivity analysis to display. Keys are stocks.
Values are either weights or values
"""
if not weights or not weights_sa:
return
weight_df = pd.DataFrame.from_dict(
data=weights, orient="index", columns=["value"], dtype=float
)
weight_sa_df = pd.DataFrame.from_dict(
data=weights_sa, orient="index", columns=["value s.a."], dtype=float
)
weight_df = weight_df.join(weight_sa_df, how="inner")
weight_df["value vs value s.a."] = weight_df["value"] - weight_df["value s.a."]
weight_df["value"] = (weight_df["value"] * 100).apply(lambda s: f"{s:.2f}") + " %"
weight_df["value"] = (
weight_df["value"]
.astype(str)
.apply(lambda s: " " * (8 - len(s)) + s if len(s) < 8 else "" + s)
)
weight_df["value s.a."] = (weight_df["value s.a."] * 100).apply(
lambda s: f"{s:.2f}"
) + " %"
weight_df["value s.a."] = (
weight_df["value s.a."]
.astype(str)
.apply(
lambda s: " " * (len("value s.a.") - len(s)) + s
if len(s) < len("value s.a.")
else "" + s
)
)
weight_df["value vs value s.a."] = (weight_df["value vs value s.a."] * 100).apply(
lambda s: f"{s:.2f}"
) + " %"
weight_df["value vs value s.a."] = (
weight_df["value vs value s.a."]
.astype(str)
.apply(
lambda s: " " * (len("value vs value s.a.") - len(s)) + s
if len(s) < len("value vs value s.a.")
else "" + s
)
)
headers = list(weight_df.columns)
headers = [s.title() for s in headers]
print_rich_table(
weight_df, headers=headers, show_index=True, title="Weights Comparison"
)
@log_start_end(log=logger)
def display_categories(
weights: dict, categories: dict, column: str = "ASSET_CLASS", title: str = ""
):
"""
Prints categories in a nice format
Parameters
----------
weights: dict
weights to display. Keys are stocks. Values are either weights or values
categories: dict
categories to display. Keys are stocks. Values are either weights or values
column: str
column selected to show table
- ASSET_CLASS
- SECTOR
- INDUSTRY
- COUNTRY
title: str
title to display
"""
if column == "CURRENT_INVESTED_AMOUNT":
console.print(f"[yellow]'{column}' cannot be displayed as a category.[/yellow]")
return
df = optimizer_model.get_categories(weights, categories, column)
if df.empty:
return
df["value"] = (df["value"] * 100).apply(lambda s: f"{s:.2f}") + " %"
df["value"] = (
df["value"]
.astype(str)
.apply(lambda s: " " * (8 - len(s)) + s if len(s) < 8 else "" + s)
)
df["CURRENT_WEIGHTS"] = (df["CURRENT_WEIGHTS"] * 100).apply(
lambda s: f"{s:.2f}"
) + " %"
df["CURRENT_WEIGHTS"] = (
df["CURRENT_WEIGHTS"]
.astype(str)
.apply(
lambda s: " " * (len("CURRENT_WEIGHTS") - len(s)) + s
if len(s) < len("CURRENT_WEIGHTS")
else "" + s
)
)
df["CURRENT_INVESTED_AMOUNT"] = (
df["CURRENT_INVESTED_AMOUNT"].apply(lambda s: f"{s:,.0f}") + " $"
)
df["CURRENT_INVESTED_AMOUNT"] = (
df["CURRENT_INVESTED_AMOUNT"]
.astype(str)
.apply(
lambda s: " " * (len("CURRENT_INVESTED_AMOUNT") - len(s)) + s
if len(s) < len("CURRENT_INVESTED_AMOUNT")
else "" + s
)
)
df.reset_index(level=1, inplace=True)
headers = [s.title() for s in list(df.columns)]
show_index = True
if column == "CURRENCY":
show_index = False
print_rich_table(df, headers=headers, show_index=show_index, title=title)
@log_start_end(log=logger)
def display_categories_sa(
weights: dict, weights_sa: dict, categories: dict, column: str, title: str = ""
):
"""
Prints categories in a nice format
Parameters
----------
weights: dict
weights to display. Keys are stocks. Values are either weights or values
weights_sa: dict
weights of sensitivity analysis to display. Keys are stocks. Values are either weights or values
categories: dict
categories to display. Keys are stocks. Values are either weights or values
column: int.
column selected to show table
- ASSET_CLASS
- SECTOR
- INDUSTRY
- COUNTRY
"""
if not weights or not weights_sa:
return
weight_df = pd.DataFrame.from_dict(
data=weights, orient="index", columns=["value"], dtype=float
)
weight_sa_df = pd.DataFrame.from_dict(
data=weights_sa, orient="index", columns=["value s.a."], dtype=float
)
categories_df = pd.DataFrame.from_dict(data=categories, dtype=float)
col = list(categories_df.columns).index(column)
categories_df = weight_df.join(categories_df.iloc[:, [col, 4, 5]], how="inner")
categories_df = categories_df.join(weight_sa_df, how="inner")
categories_df.set_index(column, inplace=True)
categories_df.groupby(level=0).sum()
table_df = pd.pivot_table(
categories_df,
values=["value", "value s.a.", "CURRENT_INVESTED_AMOUNT"],
index=["CURRENCY", column],
aggfunc=np.sum,
)
table_df["CURRENT_WEIGHTS"] = (
table_df["CURRENT_INVESTED_AMOUNT"]
.groupby(level=0)
.transform(lambda x: x / sum(x))
)
table_df["value"] = (
table_df["value"].groupby(level=0).transform(lambda x: x / sum(x))
)
table_df["value s.a."] = (
table_df["value s.a."].groupby(level=0).transform(lambda x: x / sum(x))
)
table_df = pd.concat(
[
d.append(d.sum().rename((k, "TOTAL " + k)))
for k, d in table_df.groupby(level=0)
]
)
table_df["value vs value s.a."] = table_df["value"] - table_df["value s.a."]
table_df = table_df.iloc[:, [0, 3, 1, 2, 4]]
table_df["value"] = (table_df["value"] * 100).apply(lambda s: f"{s:.2f}") + " %"
table_df["value"] = (
table_df["value"]
.astype(str)
.apply(lambda s: " " * (8 - len(s)) + s if len(s) < 8 else "" + s)
)
table_df["value s.a."] = (table_df["value s.a."] * 100).apply(
lambda s: f"{s:.2f}"
) + " %"
table_df["value s.a."] = (
table_df["value s.a."]
.astype(str)
.apply(
lambda s: " " * (len("value s.a.") - len(s)) + s
if len(s) < len("value s.a.")
else "" + s
)
)
table_df["value vs value s.a."] = (table_df["value vs value s.a."] * 100).apply(
lambda s: f"{s:.2f}"
) + " %"
table_df["value vs value s.a."] = (
table_df["value vs value s.a."]
.astype(str)
.apply(
lambda s: " " * (len("value vs value s.a.") - len(s)) + s
if len(s) < len("value vs value s.a.")
else "" + s
)
)
table_df["CURRENT_WEIGHTS"] = (table_df["CURRENT_WEIGHTS"] * 100).apply(
lambda s: f"{s:.2f}"
) + " %"
table_df["CURRENT_WEIGHTS"] = (
table_df["CURRENT_WEIGHTS"]
.astype(str)
.apply(
lambda s: " " * (len("CURRENT_WEIGHTS") - len(s)) + s
if len(s) < len("CURRENT_WEIGHTS")
else "" + s
)
)
table_df["CURRENT_INVESTED_AMOUNT"] = (
table_df["CURRENT_INVESTED_AMOUNT"].apply(lambda s: f"{s:,.0f}") + " $"
)
table_df["CURRENT_INVESTED_AMOUNT"] = (
table_df["CURRENT_INVESTED_AMOUNT"]
.astype(str)
.apply(
lambda s: " " * (len("CURRENT_INVESTED_AMOUNT") - len(s)) + s
if len(s) < len("CURRENT_INVESTED_AMOUNT")
else "" + s
)
)
table_df.reset_index(inplace=True)
table_df.set_index("CURRENCY", inplace=True)
headers = list(table_df.columns)
headers = [s.title() for s in headers]
print_rich_table(table_df, headers=headers, show_index=True, title=title)
@log_start_end(log=logger)
def display_equal_weight(
symbols: List[str],
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
risk_measure="mv",
risk_free_rate: float = 0,
alpha: float = 0.05,
value: float = 1,
table: bool = False,
) -> Dict:
"""
Equally weighted portfolio, where weight = 1/# of symbols
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False.
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
risk_measure: str, optional
The risk measure used to optimize the portfolio.
The default is 'MV'. Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns. Used for
'FLPM' and 'SLPM' and Sharpe objective function. The default is 0.
alpha: float, optional
Significance level of CVaR, EVaR, CDaR and EDaR.
value : float, optional
Amount to allocate to portfolio, by default 1.0
table: bool, optional
True if plot table weights, by default False
"""
p = d_period(interval, start_date, end_date)
s_title = f"{p} Equally Weighted Portfolio\n"
weights, stock_returns = optimizer_model.get_equal_weights(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
value=value,
)
if not weights:
console.print("There is no solution with these parameters.")
return {}
if table:
console.print(s_title)
display_weights(weights)
portfolio_performance(
weights=weights,
data=stock_returns,
risk_measure=risk_choices[risk_measure],
risk_free_rate=risk_free_rate,
alpha=alpha,
# a_sim=a_sim,
# beta=beta,
# b_sim=beta_sim,
freq=freq,
)
return weights
@log_start_end(log=logger)
def display_property_weighting(
symbols: List[str],
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
s_property: str = "marketCap",
risk_measure: str = "mv",
risk_free_rate: float = 0,
alpha: float = 0.05,
value: float = 1,
table: bool = False,
) -> Dict[str, float]:
"""
Builds a portfolio weighted by selected property
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
s_property : str
Property to get weighted portfolio of
risk_measure: str, optional
The risk measure used to compute indicators.
The default is 'MV'. Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns. Used for
'FLPM' and 'SLPM' and Sharpe objective function. The default is 0.
alpha: float, optional
Significance level of CVaR, EVaR, CDaR and EDaR.
value : float, optional
Amount to allocate to portfolio, by default 1.0
table: bool, optional
True if plot table weights, by default False
"""
p = d_period(interval, start_date, end_date)
s_title = f"{p} Weighted Portfolio based on " + s_property + "\n"
weights, stock_returns = optimizer_model.get_property_weights(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
s_property=s_property,
value=value,
)
if not weights:
console.print("There is no solution with these parameters.")
return {}
if table:
console.print(s_title)
display_weights(weights)
portfolio_performance(
weights=weights,
data=stock_returns,
risk_measure=risk_choices[risk_measure],
risk_free_rate=risk_free_rate,
alpha=alpha,
# a_sim=a_sim,
# beta=beta,
# b_sim=beta_sim,
freq=freq,
)
return weights
@log_start_end(log=logger)
def display_mean_risk(
symbols: List[str],
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
risk_measure: str = "mv",
objective: str = "sharpe",
risk_free_rate: float = 0,
risk_aversion: float = 1,
alpha: float = 0.05,
target_return: float = -1,
target_risk: float = -1,
mean: str = "hist",
covariance: str = "hist",
d_ewma: float = 0.94,
value: float = 1.0,
value_short: float = 0.0,
table: bool = False,
) -> Dict:
"""
Builds a mean risk optimal portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
risk_measure: str, optional
The risk measure used to optimize the portfolio.
The default is 'MV'. Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
objective: str
Objective function of the optimization model.
The default is 'Sharpe'. Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'MaxRet': Maximize the expected return of the portfolio.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns. Used for
'FLPM' and 'SLPM' and Sharpe objective function. The default is 0.
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function.
The default is 1.
alpha: float, optional
Significance level of CVaR, EVaR, CDaR and EDaR
target_return: float, optional
Constraint on minimum level of portfolio's return.
target_risk: float, optional
Constraint on maximum level of portfolio's risk.
mean: str, optional
The method used to estimate the expected returns.
The default value is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
table: bool, optional
True if plot table weights, by default False
"""
p = d_period(interval, start_date, end_date)
if objective == "sharpe":
s_title = f"{p} Maximal return/risk ratio portfolio using "
elif objective == "minrisk":
s_title = f"{p} Minimum risk portfolio using "
elif objective == "maxret":
s_title = f"{p} Maximal return portfolio using "
elif objective == "utility":
s_title = f"{p} Maximal risk averse utility function portfolio using "
s_title += risk_names[risk_measure] + " as risk measure\n"
weights, stock_returns = optimizer_model.get_mean_risk_portfolio(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
risk_measure=risk_choices[risk_measure],
objective=objectives_choices[objective],
risk_free_rate=risk_free_rate,
risk_aversion=risk_aversion,
alpha=alpha,
target_return=target_return,
target_risk=target_risk,
mean=mean,
covariance=covariance,
d_ewma=d_ewma,
value=value,
value_short=value_short,
)
if not weights:
console.print("There is no solution with these parameters.")
return {}
if table:
console.print(s_title)
display_weights(weights)
portfolio_performance(
weights=weights,
data=stock_returns,
risk_measure=risk_choices[risk_measure],
risk_free_rate=risk_free_rate,
alpha=alpha,
# a_sim=a_sim,
# beta=beta,
# b_sim=beta_sim,
freq=freq,
)
return weights
@log_start_end(log=logger)
def display_max_sharpe(
symbols: List[str],
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
risk_measure: str = "MV",
risk_free_rate: float = 0,
risk_aversion: float = 1,
alpha: float = 0.05,
target_return: float = -1,
target_risk: float = -1,
mean: str = "hist",
covariance: str = "hist",
d_ewma: float = 0.94,
value: float = 1.0,
value_short: float = 0.0,
table: bool = False,
) -> Dict:
"""
Builds a maximal return/risk ratio portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
risk_measure: str, optional
The risk measure used to optimize the portfolio.
The default is 'MV'. Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns. Used for
'FLPM' and 'SLPM' and Sharpe objective function. The default is 0.
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function.
The default is 1.
alpha: float, optional
Significance level of CVaR, EVaR, CDaR and EDaR
target_return: float, optional
Constraint on minimum level of portfolio's return.
target_risk: float, optional
Constraint on maximum level of portfolio's risk.
mean: str, optional
The method used to estimate the expected returns.
The default value is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
table: bool, optional
True if plot table weights, by default False
"""
p = d_period(interval, start_date, end_date)
s_title = f"{p} Maximal return/risk ratio portfolio using "
s_title += risk_names[risk_measure.lower()] + " as risk measure\n"
weights, stock_returns = optimizer_model.get_max_sharpe(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
risk_measure=risk_measure,
risk_free_rate=risk_free_rate,
risk_aversion=risk_aversion,
alpha=alpha,
target_return=target_return,
target_risk=target_risk,
mean=mean,
covariance=covariance,
d_ewma=d_ewma,
value=value,
value_short=value_short,
)
if not weights:
console.print("There is no solution with these parameters.")
return {}
if table:
console.print(s_title)
display_weights(weights)
portfolio_performance(
weights=weights,
data=stock_returns,
risk_measure=risk_choices[risk_measure],
risk_free_rate=risk_free_rate,
alpha=alpha,
# a_sim=a_sim,
# beta=beta,
# b_sim=beta_sim,
freq=freq,
)
return weights
@log_start_end(log=logger)
def display_min_risk(
symbols: List[str],
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
risk_measure: str = "MV",
risk_free_rate: float = 0,
risk_aversion: float = 1,
alpha: float = 0.05,
target_return: float = -1,
target_risk: float = -1,
mean: str = "hist",
covariance: str = "hist",
d_ewma: float = 0.94,
value: float = 1.0,
value_short: float = 0.0,
table: bool = False,
) -> Dict:
"""
Builds a minimum risk portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
risk_measure: str, optional
The risk measure used to optimize the portfolio.
The default is 'MV'. Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns. Used for
'FLPM' and 'SLPM' and Sharpe objective function. The default is 0.
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function.
The default is 1.
alpha: float, optional
Significance level of CVaR, EVaR, CDaR and EDaR
target_return: float, optional
Constraint on minimum level of portfolio's return.
target_risk: float, optional
Constraint on maximum level of portfolio's risk.
mean: str, optional
The method used to estimate the expected returns.
The default value is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
table: bool, optional
True if plot table weights, by default False
"""
p = d_period(interval, start_date, end_date)
s_title = f"{p} Minimum risk portfolio using "
s_title += risk_names[risk_measure.lower()] + " as risk measure\n"
weights, stock_returns = optimizer_model.get_min_risk(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
risk_measure=risk_measure,
risk_free_rate=risk_free_rate,
risk_aversion=risk_aversion,
alpha=alpha,
target_return=target_return,
target_risk=target_risk,
mean=mean,
covariance=covariance,
d_ewma=d_ewma,
value=value,
value_short=value_short,
)
if not weights:
console.print("There is no solution with these parameters.")
return {}
if table:
console.print(s_title)
display_weights(weights)
portfolio_performance(
weights=weights,
data=stock_returns,
risk_measure=risk_choices[risk_measure],
risk_free_rate=risk_free_rate,
alpha=alpha,
# a_sim=a_sim,
# beta=beta,
# b_sim=beta_sim,
freq=freq,
)
return weights
@log_start_end(log=logger)
def display_max_util(
symbols: List[str],
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
risk_measure: str = "MV",
risk_free_rate: float = 0,
risk_aversion: float = 1,
alpha: float = 0.05,
target_return: float = -1,
target_risk: float = -1,
mean: str = "hist",
covariance: str = "hist",
d_ewma: float = 0.94,
value: float = 1.0,
value_short: float = 0.0,
table: bool = False,
) -> Dict:
"""
Builds a maximal risk averse utility portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
risk_measure: str, optional
The risk measure used to optimize the portfolio.
The default is 'MV'. Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns. Used for
'FLPM' and 'SLPM' and Sharpe objective function. The default is 0.
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function.
The default is 1.
alpha: float, optional
Significance level of CVaR, EVaR, CDaR and EDaR
target_return: float, optional
Constraint on minimum level of portfolio's return.
target_risk: float, optional
Constraint on maximum level of portfolio's risk.
mean: str, optional
The method used to estimate the expected returns.
The default value is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
table: bool, optional
True if plot table weights, by default False
"""
p = d_period(interval, start_date, end_date)
s_title = f"{p} Maximal risk averse utility function portfolio using "
s_title += risk_names[risk_measure.lower()] + " as risk measure\n"
weights, stock_returns = optimizer_model.get_max_util(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
risk_measure=risk_measure,
risk_free_rate=risk_free_rate,
risk_aversion=risk_aversion,
alpha=alpha,
target_return=target_return,
target_risk=target_risk,
mean=mean,
covariance=covariance,
d_ewma=d_ewma,
value=value,
value_short=value_short,
)
if not weights:
console.print("There is no solution with these parameters.")
return {}
if table:
console.print(s_title)
display_weights(weights)
portfolio_performance(
weights=weights,
data=stock_returns,
risk_measure=risk_choices[risk_measure],
risk_free_rate=risk_free_rate,
alpha=alpha,
# a_sim=a_sim,
# beta=beta,
# b_sim=beta_sim,
freq=freq,
)
return weights
@log_start_end(log=logger)
def display_max_ret(
symbols: List[str],
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
risk_measure: str = "MV",
risk_free_rate: float = 0,
risk_aversion: float = 1,
alpha: float = 0.05,
target_return: float = -1,
target_risk: float = -1,
mean: str = "hist",
covariance: str = "hist",
d_ewma: float = 0.94,
value: float = 1.0,
value_short: float = 0.0,
table: bool = False,
) -> Dict:
"""
Builds a maximal return portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
risk_measure: str, optional
The risk measure used to optimize the portfolio.
The default is 'MV'. Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns. Used for
'FLPM' and 'SLPM' and Sharpe objective function. The default is 0.
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function.
The default is 1.
alpha: float, optional
Significance level of CVaR, EVaR, CDaR and EDaR
target_return: float, optional
Constraint on minimum level of portfolio's return.
target_risk: float, optional
Constraint on maximum level of portfolio's risk.
mean: str, optional
The method used to estimate the expected returns.
The default value is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
table: bool, optional
True if plot table weights, by default False
"""
p = d_period(interval, start_date, end_date)
s_title = f"{p} Maximal risk averse utility function portfolio using "
s_title += risk_names[risk_measure.lower()] + " as risk measure\n"
weights, stock_returns = optimizer_model.get_max_ret(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
risk_measure=risk_measure,
risk_free_rate=risk_free_rate,
risk_aversion=risk_aversion,
alpha=alpha,
target_return=target_return,
target_risk=target_risk,
mean=mean,
covariance=covariance,
d_ewma=d_ewma,
value=value,
value_short=value_short,
)
if not weights:
console.print("There is no solution with these parameters.")
return {}
if table:
console.print(s_title)
display_weights(weights)
portfolio_performance(
weights=weights,
data=stock_returns,
risk_measure=risk_choices[risk_measure],
risk_free_rate=risk_free_rate,
alpha=alpha,
# a_sim=a_sim,
# beta=beta,
# b_sim=beta_sim,
freq=freq,
)
return weights
@log_start_end(log=logger)
def display_max_div(
symbols: List[str],
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
covariance: str = "hist",
d_ewma: float = 0.94,
value: float = 1.0,
value_short: float = 0.0,
table: bool = False,
) -> Dict:
"""
Builds a maximal diversification portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
table: bool, optional
True if plot table weights, by default False
"""
p = d_period(interval, start_date, end_date)
s_title = f"{p} Maximal diversification portfolio\n"
weights, stock_returns = optimizer_model.get_max_diversification_portfolio(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
covariance=covariance,
d_ewma=d_ewma,
value=value,
value_short=value_short,
)
if not weights:
console.print("There is no solution with these parameters.")
return {}
if table:
console.print(s_title)
display_weights(weights)
portfolio_performance(
weights=weights,
data=stock_returns,
risk_measure="MV",
risk_free_rate=0,
# alpha=0.05,
# a_sim=100,
# beta=None,
# b_sim=None,
freq=freq,
)
return weights
@log_start_end(log=logger)
def display_max_decorr(
symbols: List[str],
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
covariance: str = "hist",
d_ewma: float = 0.94,
value: float = 1.0,
value_short: float = 0.0,
table: bool = False,
) -> Dict:
"""
Builds a maximal decorrelation portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
table: bool, optional
True if plot table weights, by default False
"""
p = d_period(interval, start_date, end_date)
s_title = f"{p} Maximal decorrelation portfolio\n"
weights, stock_returns = optimizer_model.get_max_decorrelation_portfolio(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
covariance=covariance,
d_ewma=d_ewma,
value=value,
value_short=value_short,
)
if not weights:
console.print("There is no solution with this parameters")
return {}
if table:
console.print(s_title)
display_weights(weights)
portfolio_performance(
weights=weights,
data=stock_returns,
risk_measure="MV",
risk_free_rate=0,
# alpha=alpha,
# a_sim=a_sim,
# beta=beta,
# b_simb_sim,
freq=freq,
)
return weights
@log_start_end(log=logger)
def display_black_litterman(
symbols: List[str],
p_views: List = None,
q_views: List = None,
benchmark: Dict = None,
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
objective: str = "Sharpe",
risk_free_rate: float = 0,
risk_aversion: float = 1,
delta: float = None,
equilibrium: bool = True,
optimize: bool = True,
value: float = 1.0,
value_short: float = 0,
table: bool = False,
) -> Dict:
"""
Builds a black litterman portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
p_views: List
Matrix P of views that shows relationships among assets and returns.
Default value to None.
q_views: List
Matrix Q of expected returns of views. Default value is None.
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
benchmark : Dict
Dict of portfolio weights
objective: str
Objective function of the optimization model.
The default is 'Sharpe'. Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'MaxRet': Maximize the expected return of the portfolio.
risk_free_rate: float, optional
Risk free rate, must be in annual frequency. The default is 0.
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function.
The default is 1.
delta: float, optional
Risk aversion factor of Black Litterman model. Default value is None.
equilibrium: bool, optional
If True excess returns are based on equilibrium market portfolio, if False
excess returns are calculated as historical returns minus risk free rate.
Default value is True.
optimize: bool, optional
If True Black Litterman estimates are used as inputs of mean variance model,
if False returns equilibrium weights from Black Litterman model
Default value is True.
value : float, optional
Amount of money to allocate. The default is 1.
value_short : float, optional
Amount to allocate to portfolio in short positions. The default is 0.
table: bool, optional
True if plot table weights, by default False
"""
p = d_period(interval, start_date, end_date)
s_title = f"{p} Black Litterman portfolio\n"
weights, stock_returns = optimizer_model.get_black_litterman_portfolio(
symbols=symbols,
benchmark=benchmark,
p_views=p_views,
q_views=q_views,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
objective=objectives_choices[objective],
risk_free_rate=risk_free_rate,
risk_aversion=risk_aversion,
delta=delta,
equilibrium=equilibrium,
optimize=optimize,
value=value,
value_short=value_short,
)
if not weights:
console.print("There is no solution with this parameters")
return {}
if table:
console.print(s_title)
display_weights(weights)
portfolio_performance(
weights=weights,
data=stock_returns,
risk_measure="MV",
risk_free_rate=0,
# alpha=alpha,
# a_sim=a_sim,
# beta=beta,
# b_simb_sim,
freq=freq,
)
return weights
@log_start_end(log=logger)
def display_ef(
symbols: List[str],
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
risk_measure: str = "MV",
risk_free_rate: float = 0,
alpha: float = 0.05,
value: float = 1.0,
value_short: float = 0.0,
n_portfolios: int = 100,
seed: int = 123,
tangency: bool = False,
plot_tickers: bool = True,
external_axes: Optional[List[plt.Axes]] = None,
):
"""
Display efficient frontier
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
risk_measure: str, optional
The risk measure used to optimize the portfolio.
The default is 'MV'. Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns. Used for
'FLPM' and 'SLPM' and Sharpe objective function. The default is 0.
alpha: float, optional
Significance level of CVaR, EVaR, CDaR and EDaR
The default is 0.05.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
n_portfolios: int, optional
"Number of portfolios to simulate. The default value is 100.
seed: int, optional
Seed used to generate random portfolios. The default value is 123.
tangency: bool, optional
Adds the optimal line with the risk-free asset.
external_axes: Optional[List[plt.Axes]]
Optional axes to plot data on
plot_tickers: bool
Whether to plot the tickers for the assets
"""
frontier, mu, cov, stock_returns, weights, X1, Y1, port = optimizer_model.get_ef(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
risk_measure=risk_measure,
risk_free_rate=risk_free_rate,
alpha=alpha,
value=value,
value_short=value_short,
n_portfolios=n_portfolios,
seed=seed,
)
try:
risk_free_rate = risk_free_rate / time_factor[freq.upper()]
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
ax = rp.plot_frontier(
w_frontier=frontier,
mu=mu,
cov=cov,
returns=stock_returns,
rm=risk_choices[risk_measure.lower()],
rf=risk_free_rate,
alpha=alpha,
cmap="RdYlBu",
w=weights,
label="",
marker="*",
s=16,
c="r",
t_factor=time_factor[freq.upper()],
ax=ax,
)
# Add risk free line
if tangency:
ret_sharpe = (mu @ weights).to_numpy().item() * time_factor[freq.upper()]
risk_sharpe = rp.Sharpe_Risk(
weights,
cov=cov,
returns=stock_returns,
rm=risk_choices[risk_measure.lower()],
rf=risk_free_rate,
alpha=alpha,
# a_sim=a_sim,
# beta=beta,
# b_sim=b_sim,
)
if risk_choices[risk_measure.lower()] not in [
"ADD",
"MDD",
"CDaR",
"EDaR",
"UCI",
]:
risk_sharpe = risk_sharpe * time_factor[freq.upper()] ** 0.5
y = ret_sharpe * 1.5
b = risk_free_rate * time_factor[freq.upper()]
m = (ret_sharpe - b) / risk_sharpe
x2 = (y - b) / m
x = [0, x2]
y = [b, y]
line = Line2D(x, y, label="Capital Allocation Line")
ax.set_xlim(xmin=min(X1) * 0.8)
ax.add_line(line)
ax.plot(X1, Y1, color="b")
plot_tickers = True
if plot_tickers:
ticker_plot = pd.DataFrame(columns=["ticker", "var"])
for ticker in port.cov.columns:
weight_df = pd.DataFrame({"weights": 1}, index=[ticker])
risk = rp.Sharpe_Risk(
weight_df,
cov=port.cov[ticker][ticker],
returns=stock_returns.loc[:, [ticker]],
rm=risk_choices[risk_measure.lower()],
rf=risk_free_rate,
alpha=alpha,
)
if risk_choices[risk_measure.lower()] not in [
"MDD",
"ADD",
"CDaR",
"EDaR",
"UCI",
]:
risk = risk * time_factor[freq.upper()] ** 0.5
ticker_plot = ticker_plot.append(
{"ticker": ticker, "var": risk}, ignore_index=True
)
ticker_plot = ticker_plot.set_index("ticker")
ticker_plot = ticker_plot.merge(
port.mu.T * time_factor[freq.upper()], right_index=True, left_index=True
)
ticker_plot = ticker_plot.rename(columns={0: "ret"})
ax.scatter(ticker_plot["var"], ticker_plot["ret"])
for row in ticker_plot.iterrows():
ax.annotate(row[0], (row[1]["var"], row[1]["ret"]))
ax.set_title(f"Efficient Frontier simulating {n_portfolios} portfolios")
ax.legend(loc="best", scatterpoints=1)
theme.style_primary_axis(ax)
l, b, w, h = ax.get_position().bounds
ax.set_position([l, b, w * 0.9, h])
ax1 = ax.get_figure().axes
ll, bb, ww, hh = ax1[-1].get_position().bounds
ax1[-1].set_position([ll * 1.02, bb, ww, hh])
if external_axes is None:
theme.visualize_output(force_tight_layout=False)
except Exception as _:
console.print("[red]Error plotting efficient frontier.[/red]")
@log_start_end(log=logger)
def display_risk_parity(
symbols: List[str],
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
risk_measure: str = "mv",
risk_cont: List[str] = None,
risk_free_rate: float = 0,
alpha: float = 0.05,
target_return: float = -1,
mean: str = "hist",
covariance: str = "hist",
d_ewma: float = 0.94,
value: float = 1.0,
table: bool = False,
) -> Dict:
"""
Builds a risk parity portfolio using the risk budgeting approach
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str
interval to look at returns from
start_date: str
If not using interval, start date string (YYYY-MM-DD)
end_date: str
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
- X (integer days) for returns calculated every X days.
maxnan: float
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float
Value used to replace outliers that are higher to threshold.
method: str
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
risk_measure: str
The risk measure used to optimize the portfolio.
The default is 'MV'. Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
risk_cont: List[str], optional
The vector of risk contribution per asset. If empty, the default is
1/n (number of assets).
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns. Used for
'FLPM' and 'SLPM' and Sharpe objective function. The default is 0.
alpha: float, optional
Significance level of CVaR, EVaR, CDaR and EDaR
target_return: float, optional
Constraint on minimum level of portfolio's return.
mean: str, optional
The method used to estimate the expected returns.
The default value is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio, by default 1.0
table: bool, optional
True if plot table weights, by default False
"""
p = d_period(interval, start_date, end_date)
s_title = f"{p} Risk parity portfolio based on risk budgeting approach\n"
s_title += "using " + risk_names[risk_measure] + " as risk measure\n"
weights, stock_returns = optimizer_model.get_risk_parity_portfolio(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
risk_measure=risk_choices[risk_measure],
risk_cont=risk_cont,
risk_free_rate=risk_free_rate,
alpha=alpha,
target_return=target_return,
mean=mean,
covariance=covariance,
d_ewma=d_ewma,
value=value,
)
if not weights:
console.print("There is no solution with this parameters")
return {}
if table:
console.print(s_title)
display_weights(weights)
portfolio_performance(
weights=weights,
data=stock_returns,
risk_measure=risk_choices[risk_measure],
risk_free_rate=risk_free_rate,
freq=freq,
)
return weights
@log_start_end(log=logger)
def display_rel_risk_parity(
symbols: List[str],
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
version: str = "A",
risk_cont: List[str] = None,
penal_factor: float = 1,
target_return: float = -1,
mean: str = "hist",
covariance: str = "hist",
d_ewma: float = 0.94,
value: float = 1.0,
table: bool = False,
) -> Dict:
"""
Builds a relaxed risk parity portfolio using the least squares approach
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str, optional
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
- X (integer days) for returns calculated every X days.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str, optional
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
version : str, optional
Relaxed risk parity model version. The default is 'A'.
Possible values are:
- 'A': without regularization and penalization constraints.
- 'B': with regularization constraint but without penalization constraint.
- 'C': with regularization and penalization constraints.
risk_cont: List[str], optional
The vector of risk contribution per asset. If empty, the default is
1/n (number of assets).
penal_factor: float, optional
The penalization factor of penalization constraints. Only used with
version 'C'. The default is 1.
target_return: float, optional
Constraint on minimum level of portfolio's return.
mean: str, optional
The method used to estimate the expected returns.
The default value is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio, by default 1.0
table: bool, optional
True if plot table weights, by default False
"""
p = d_period(interval, start_date, end_date)
s_title = f"{p} Relaxed risk parity portfolio based on least squares approach\n"
weights, stock_returns = optimizer_model.get_rel_risk_parity_portfolio(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
version=version.upper(),
risk_cont=risk_cont,
penal_factor=penal_factor,
target_return=target_return,
mean=mean,
covariance=covariance,
d_ewma=d_ewma,
value=value,
)
if not weights:
console.print("There is no solution with this parameters")
return {}
if table:
console.print(s_title)
display_weights(weights)
portfolio_performance(
weights=weights,
data=stock_returns,
risk_measure=risk_choices["mv"],
freq=freq,
)
return weights
@log_start_end(log=logger)
def display_hcp(
symbols: List[str],
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
model: str = "HRP",
codependence: str = "pearson",
covariance: str = "hist",
objective: str = "minrisk",
risk_measure: str = "mv",
risk_free_rate: float = 0.0,
risk_aversion: float = 1.0,
alpha: float = 0.05,
a_sim: int = 100,
beta: float = None,
b_sim: int = None,
linkage: str = "ward",
k: int = None,
max_k: int = 10,
bins_info: str = "KN",
alpha_tail: float = 0.05,
leaf_order: bool = True,
d_ewma: float = 0.94,
value: float = 1.0,
table: bool = False,
) -> Dict:
"""
Builds a hierarchical clustering portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str, optional
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
model: str, optional
The hierarchical cluster portfolio model used for optimize the
portfolio. The default is 'HRP'. Possible values are:
- 'HRP': Hierarchical Risk Parity.
- 'HERC': Hierarchical Equal Risk Contribution.
- 'NCO': Nested Clustered Optimization.
codependence: str, optional
The codependence or similarity matrix used to build the distance
metric and clusters. The default is 'pearson'. Possible values are:
- 'pearson': pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{pearson}_{i,j})}.
- 'spearman': spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{spearman}_{i,j})}.
- 'abs_pearson': absolute value pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{pearson}_{i,j}|)}.
- 'abs_spearman': absolute value spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{spearman}_{i,j}|)}.
- 'distance': distance correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-\\rho^{distance}_{i,j})}.
- 'mutual_info': mutual information matrix. Distance used is variation information matrix.
- 'tail': lower tail dependence index matrix. Dissimilarity formula:
.. math:: D_{i,j} = -\\log{\\lambda_{i,j}}.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `c-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `c-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `c-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `c-MLforAM`.
objective: str, optional
Objective function used by the NCO model.
The default is 'MinRisk'. Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'ERC': Equally risk contribution portfolio of the selected risk measure.
risk_measure: str, optional
The risk measure used to optimize the portfolio. If model is 'NCO',
the risk measures available depends on the objective function.
The default is 'MV'. Possible values are:
- 'MV': Variance.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'TG': Tail Gini.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization (Minimax).
- 'RG': Range of returns.
- 'CVRG': CVaR range of returns.
- 'TGRG': Tail Gini range of returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'DaR': Drawdown at Risk of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).
- 'ADD_Rel': Average Drawdown of compounded cumulative returns.
- 'DaR_Rel': Drawdown at Risk of compounded cumulative returns.
- 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.
- 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.
- 'UCI_Rel': Ulcer Index of compounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns.
Used for 'FLPM' and 'SLPM'. The default is 0.
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function.
The default is 1.
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses.
The default is 0.05.
a_sim: float, optional
Number of CVaRs used to approximate Tail Gini of losses. The default is 100.
beta: float, optional
Significance level of CVaR and Tail Gini of gains. If None it duplicates alpha value.
The default is None.
b_sim: float, optional
Number of CVaRs used to approximate Tail Gini of gains. If None it duplicates a_sim value.
The default is None.
linkage: str, optional
Linkage method of hierarchical clustering. For more information see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html>`__.
The default is 'single'. Possible values are:
- 'single'.
- 'complete'.
- 'average'.
- 'weighted'.
- 'centroid'.
- 'median'.
- 'ward'.
- 'dbht': Direct Bubble Hierarchical Tree.
k: int, optional
Number of clusters. This value is took instead of the optimal number
of clusters calculated with the two difference gap statistic.
The default is None.
max_k: int, optional
Max number of clusters used by the two difference gap statistic
to find the optimal number of clusters. The default is 10.
bins_info: str, optional
Number of bins used to calculate variation of information. The default
value is 'KN'. Possible values are:
- 'KN': Knuth's choice method. For more information see `knuth_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.knuth_bin_width.html>`__.
- 'FD': Freedman–Diaconis' choice method. For more information see `freedman_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.freedman_bin_width.html>`__.
- 'SC': Scotts' choice method. For more information see `scott_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.scott_bin_width.html>`__.
- 'HGR': Hacine-Gharbi and Ravier' choice method.
alpha_tail: float, optional
Significance level for lower tail dependence index. The default is 0.05.
leaf_order: bool, optional
Indicates if the cluster are ordered so that the distance between
successive leaves is minimal. The default is True.
d: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio, by default 1.0
table: bool, optional
True if plot table weights, by default False
"""
p = d_period(interval, start_date, end_date)
if model == "HRP":
s_title = f"{p} Hierarchical risk parity portfolio"
s_title += " using " + codependence + " codependence,\n" + linkage
elif model == "HERC":
s_title = f"{p} Hierarchical equal risk contribution portfolio"
s_title += " using " + codependence + "\ncodependence," + linkage
elif model == "NCO":
s_title = f"{p} Nested clustered optimization"
s_title += " using " + codependence + " codependence,\n" + linkage
s_title += " linkage and " + risk_names[risk_measure] + " as risk measure\n"
weights, stock_returns = optimizer_model.get_hcp_portfolio(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
model=model,
codependence=codependence,
covariance=covariance,
objective=objectives_choices[objective],
risk_measure=risk_choices[risk_measure],
risk_free_rate=risk_free_rate,
risk_aversion=risk_aversion,
alpha=alpha,
a_sim=a_sim,
beta=beta,
b_sim=b_sim,
linkage=linkage,
k=k,
max_k=max_k,
bins_info=bins_info,
alpha_tail=alpha_tail,
leaf_order=leaf_order,
d_ewma=d_ewma,
value=value,
)
if not weights:
console.print("There is no solution with this parameters")
return {}
if table:
console.print(s_title)
display_weights(weights)
portfolio_performance(
weights=weights,
data=stock_returns,
risk_measure=risk_choices[risk_measure],
risk_free_rate=risk_free_rate,
alpha=alpha,
a_sim=a_sim,
beta=beta,
b_sim=b_sim,
freq=freq,
)
return weights
@log_start_end(log=logger)
def display_hrp(
symbols: List[str],
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
codependence: str = "pearson",
covariance: str = "hist",
risk_measure: str = "mv",
risk_free_rate: float = 0.0,
risk_aversion: float = 1.0,
alpha: float = 0.05,
a_sim: int = 100,
beta: float = None,
b_sim: int = None,
linkage: str = "single",
k: int = 0,
max_k: int = 10,
bins_info: str = "KN",
alpha_tail: float = 0.05,
leaf_order: bool = True,
d_ewma: float = 0.94,
value: float = 1.0,
table: bool = False,
) -> Dict:
"""
Builds a hierarchical risk parity portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str, optional
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
codependence: str, optional
The codependence or similarity matrix used to build the distance
metric and clusters. The default is 'pearson'. Possible values are:
- 'pearson': pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{pearson}_{i,j})}.
- 'spearman': spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{spearman}_{i,j})}.
- 'abs_pearson': absolute value pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{pearson}_{i,j}|)}.
- 'abs_spearman': absolute value spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{spearman}_{i,j}|)}.
- 'distance': distance correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-\\rho^{distance}_{i,j})}.
- 'mutual_info': mutual information matrix. Distance used is variation information matrix.
- 'tail': lower tail dependence index matrix. Dissimilarity formula:
.. math:: D_{i,j} = -\\log{\\lambda_{i,j}}.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `c-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `c-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `c-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `c-MLforAM`.
risk_measure: str, optional
The risk measure used to optimize the portfolio. If model is 'NCO',
the risk measures available depends on the objective function.
The default is 'MV'. Possible values are:
- 'MV': Variance.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'TG': Tail Gini.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization (Minimax).
- 'RG': Range of returns.
- 'CVRG': CVaR range of returns.
- 'TGRG': Tail Gini range of returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'DaR': Drawdown at Risk of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).
- 'ADD_Rel': Average Drawdown of compounded cumulative returns.
- 'DaR_Rel': Drawdown at Risk of compounded cumulative returns.
- 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.
- 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.
- 'UCI_Rel': Ulcer Index of compounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns.
Used for 'FLPM' and 'SLPM'. The default is 0.
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function.
The default is 1.
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses.
The default is 0.05.
a_sim: float, optional
Number of CVaRs used to approximate Tail Gini of losses. The default is 100.
beta: float, optional
Significance level of CVaR and Tail Gini of gains. If None it duplicates alpha value.
The default is None.
b_sim: float, optional
Number of CVaRs used to approximate Tail Gini of gains. If None it duplicates a_sim value.
The default is None.
linkage: str, optional
Linkage method of hierarchical clustering. For more information see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html>`__.
The default is 'single'. Possible values are:
- 'single'.
- 'complete'.
- 'average'.
- 'weighted'.
- 'centroid'.
- 'median'.
- 'ward'.
- 'dbht': Direct Bubble Hierarchical Tree.
k: int, optional
Number of clusters. This value is took instead of the optimal number
of clusters calculated with the two difference gap statistic.
The default is None.
max_k: int, optional
Max number of clusters used by the two difference gap statistic
to find the optimal number of clusters. The default is 10.
bins_info: str, optional
Number of bins used to calculate variation of information. The default
value is 'KN'. Possible values are:
- 'KN': Knuth's choice method. For more information see `knuth_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.knuth_bin_width.html>`__.
- 'FD': Freedman–Diaconis' choice method. For more information see `freedman_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.freedman_bin_width.html>`__.
- 'SC': Scotts' choice method. For more information see `scott_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.scott_bin_width.html>`__.
- 'HGR': Hacine-Gharbi and Ravier' choice method.
alpha_tail: float, optional
Significance level for lower tail dependence index. The default is 0.05.
leaf_order: bool, optional
Indicates if the cluster are ordered so that the distance between
successive leaves is minimal. The default is True.
d: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
table: bool, optional
True if plot table weights, by default False
"""
p = d_period(interval, start_date, end_date)
s_title = f"{p} Hierarchical risk parity portfolio"
s_title += " using " + codependence + " codependence,\n" + linkage
s_title += " linkage and " + risk_names[risk_measure] + " as risk measure\n"
weights, stock_returns = optimizer_model.get_hrp(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
codependence=codependence,
covariance=covariance,
risk_measure=risk_choices[risk_measure],
risk_free_rate=risk_free_rate,
risk_aversion=risk_aversion,
alpha=alpha,
a_sim=a_sim,
beta=beta,
b_sim=b_sim,
linkage=linkage,
k=k,
max_k=max_k,
bins_info=bins_info,
alpha_tail=alpha_tail,
leaf_order=leaf_order,
d_ewma=d_ewma,
value=value,
)
if not weights:
console.print("There is no solution with this parameters")
return {}
if table:
console.print(s_title)
display_weights(weights)
portfolio_performance(
weights=weights,
data=stock_returns,
risk_measure=risk_choices[risk_measure],
risk_free_rate=risk_free_rate,
alpha=alpha,
a_sim=a_sim,
beta=beta,
b_sim=b_sim,
freq=freq,
)
return weights
@log_start_end(log=logger)
def display_herc(
symbols: List[str],
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
codependence: str = "pearson",
covariance: str = "hist",
risk_measure: str = "mv",
risk_free_rate: float = 0.0,
risk_aversion: float = 1.0,
alpha: float = 0.05,
a_sim: int = 100,
beta: float = None,
b_sim: int = None,
linkage: str = "ward",
k: int = 0,
max_k: int = 10,
bins_info: str = "KN",
alpha_tail: float = 0.05,
leaf_order: bool = True,
d_ewma: float = 0.94,
value: float = 1.0,
table: bool = False,
) -> Dict:
"""
Builds a hierarchical equal risk contribution portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str, optional
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
model: str, optional
The hierarchical cluster portfolio model used for optimize the
portfolio. The default is 'HRP'. Possible values are:
- 'HRP': Hierarchical Risk Parity.
- 'HERC': Hierarchical Equal Risk Contribution.
- 'NCO': Nested Clustered Optimization.
codependence: str, optional
The codependence or similarity matrix used to build the distance
metric and clusters. The default is 'pearson'. Possible values are:
- 'pearson': pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{pearson}_{i,j})}.
- 'spearman': spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{spearman}_{i,j})}.
- 'abs_pearson': absolute value pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{pearson}_{i,j}|)}.
- 'abs_spearman': absolute value spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{spearman}_{i,j}|)}.
- 'distance': distance correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-\\rho^{distance}_{i,j})}.
- 'mutual_info': mutual information matrix. Distance used is variation information matrix.
- 'tail': lower tail dependence index matrix. Dissimilarity formula:
.. math:: D_{i,j} = -\\log{\\lambda_{i,j}}.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `c-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `c-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `c-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `c-MLforAM`.
risk_measure: str, optional
The risk measure used to optimize the portfolio. If model is 'NCO',
the risk measures available depends on the objective function.
The default is 'MV'. Possible values are:
- 'MV': Variance.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'TG': Tail Gini.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization (Minimax).
- 'RG': Range of returns.
- 'CVRG': CVaR range of returns.
- 'TGRG': Tail Gini range of returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'DaR': Drawdown at Risk of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).
- 'ADD_Rel': Average Drawdown of compounded cumulative returns.
- 'DaR_Rel': Drawdown at Risk of compounded cumulative returns.
- 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.
- 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.
- 'UCI_Rel': Ulcer Index of compounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns.
Used for 'FLPM' and 'SLPM'. The default is 0.
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function.
The default is 1.
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses.
The default is 0.05.
a_sim: float, optional
Number of CVaRs used to approximate Tail Gini of losses. The default is 100.
beta: float, optional
Significance level of CVaR and Tail Gini of gains. If None it duplicates alpha value.
The default is None.
b_sim: float, optional
Number of CVaRs used to approximate Tail Gini of gains. If None it duplicates a_sim value.
The default is None.
linkage: str, optional
Linkage method of hierarchical clustering. For more information see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html>`__.
The default is 'single'. Possible values are:
- 'single'.
- 'complete'.
- 'average'.
- 'weighted'.
- 'centroid'.
- 'median'.
- 'ward'.
- 'dbht': Direct Bubble Hierarchical Tree.
k: int, optional
Number of clusters. This value is took instead of the optimal number
of clusters calculated with the two difference gap statistic.
The default is None.
max_k: int, optional
Max number of clusters used by the two difference gap statistic
to find the optimal number of clusters. The default is 10.
bins_info: str, optional
Number of bins used to calculate variation of information. The default
value is 'KN'. Possible values are:
- 'KN': Knuth's choice method. For more information see `knuth_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.knuth_bin_width.html>`__.
- 'FD': Freedman–Diaconis' choice method. For more information see `freedman_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.freedman_bin_width.html>`__.
- 'SC': Scotts' choice method. For more information see `scott_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.scott_bin_width.html>`__.
- 'HGR': Hacine-Gharbi and Ravier' choice method.
alpha_tail: float, optional
Significance level for lower tail dependence index. The default is 0.05.
leaf_order: bool, optional
Indicates if the cluster are ordered so that the distance between
successive leaves is minimal. The default is True.
d: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
table: bool, optional
True if plot table weights, by default False
"""
p = d_period(interval, start_date, end_date)
s_title = f"{p} Hierarchical equal risk contribution portfolio"
s_title += " using " + codependence + "\ncodependence," + linkage
s_title += " linkage and " + risk_names[risk_measure] + " as risk measure\n"
weights, stock_returns = optimizer_model.get_herc(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
codependence=codependence,
covariance=covariance,
risk_measure=risk_choices[risk_measure],
risk_free_rate=risk_free_rate,
risk_aversion=risk_aversion,
alpha=alpha,
a_sim=a_sim,
beta=beta,
b_sim=b_sim,
linkage=linkage,
k=k,
max_k=max_k,
bins_info=bins_info,
alpha_tail=alpha_tail,
leaf_order=leaf_order,
d_ewma=d_ewma,
value=value,
)
if not weights:
console.print("There is no solution with this parameters")
return {}
if table:
console.print(s_title)
display_weights(weights)
portfolio_performance(
weights=weights,
data=stock_returns,
risk_measure=risk_choices[risk_measure],
risk_free_rate=risk_free_rate,
alpha=alpha,
a_sim=a_sim,
beta=beta,
b_sim=b_sim,
freq=freq,
)
return weights
@log_start_end(log=logger)
def display_nco(
symbols: List[str],
interval: str = "3y",
start_date: str = "",
end_date: str = "",
log_returns: bool = False,
freq: str = "D",
maxnan: float = 0.05,
threshold: float = 0,
method: str = "time",
codependence: str = "pearson",
covariance: str = "hist",
objective: str = "MinRisk",
risk_measure: str = "mv",
risk_free_rate: float = 0.0,
risk_aversion: float = 1.0,
alpha: float = 0.05,
a_sim: int = 100,
beta: float = None,
b_sim: int = None,
linkage: str = "ward",
k: int = None,
max_k: int = 10,
bins_info: str = "KN",
alpha_tail: float = 0.05,
leaf_order: bool = True,
d_ewma: float = 0.94,
value: float = 1.0,
table: bool = False,
) -> Dict:
"""
Builds a hierarchical equal risk contribution portfolio
Parameters
----------
symbols : List[str]
List of portfolio tickers
interval : str
interval to look at returns from
start_date: str, optional
If not using interval, start date string (YYYY-MM-DD)
end_date: str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last
weekday.
log_returns: bool, optional
If True calculate log returns, else arithmetic returns. Default value
is False
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
maxnan: float, optional
Max percentage of nan values accepted per asset to be included in
returns.
threshold: float, optional
Value used to replace outliers that are higher to threshold.
method: str, optional
Method used to fill nan values. Default value is 'time'. For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
model: str, optional
The hierarchical cluster portfolio model used for optimize the
portfolio. The default is 'HRP'. Possible values are:
- 'HRP': Hierarchical Risk Parity.
- 'HERC': Hierarchical Equal Risk Contribution.
- 'NCO': Nested Clustered Optimization.
codependence: str, optional
The codependence or similarity matrix used to build the distance
metric and clusters. The default is 'pearson'. Possible values are:
- 'pearson': pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{pearson}_{i,j})}.
- 'spearman': spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{spearman}_{i,j})}.
- 'abs_pearson': absolute value pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{pearson}_{i,j}|)}.
- 'abs_spearman': absolute value spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{spearman}_{i,j}|)}.
- 'distance': distance correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-\\rho^{distance}_{i,j})}.
- 'mutual_info': mutual information matrix. Distance used is variation information matrix.
- 'tail': lower tail dependence index matrix. Dissimilarity formula:
.. math:: D_{i,j} = -\\log{\\lambda_{i,j}}.
covariance: str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `c-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `c-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `c-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `c-MLforAM`.
objective: str, optional
Objective function used by the NCO model.
The default is 'MinRisk'. Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'ERC': Equally risk contribution portfolio of the selected risk measure.
risk_measure: str, optional
The risk measure used to optimize the portfolio. If model is 'NCO',
the risk measures available depends on the objective function.
The default is 'MV'. Possible values are:
- 'MV': Variance.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'TG': Tail Gini.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization (Minimax).
- 'RG': Range of returns.
- 'CVRG': CVaR range of returns.
- 'TGRG': Tail Gini range of returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'DaR': Drawdown at Risk of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).
- 'ADD_Rel': Average Drawdown of compounded cumulative returns.
- 'DaR_Rel': Drawdown at Risk of compounded cumulative returns.
- 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.
- 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.
- 'UCI_Rel': Ulcer Index of compounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns.
Used for 'FLPM' and 'SLPM'. The default is 0.
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function.
The default is 1.
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses.
The default is 0.05.
a_sim: float, optional
Number of CVaRs used to approximate Tail Gini of losses. The default is 100.
beta: float, optional
Significance level of CVaR and Tail Gini of gains. If None it duplicates alpha value.
The default is None.
b_sim: float, optional
Number of CVaRs used to approximate Tail Gini of gains. If None it duplicates a_sim value.
The default is None.
linkage: str, optional
Linkage method of hierarchical clustering. For more information see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html>`__.
The default is 'single'. Possible values are:
- 'single'.
- 'complete'.
- 'average'.
- 'weighted'.
- 'centroid'.
- 'median'.
- 'ward'.
- 'dbht': Direct Bubble Hierarchical Tree.
k: int, optional
Number of clusters. This value is took instead of the optimal number
of clusters calculated with the two difference gap statistic.
The default is None.
max_k: int, optional
Max number of clusters used by the two difference gap statistic
to find the optimal number of clusters. The default is 10.
bins_info: str, optional
Number of bins used to calculate variation of information. The default
value is 'KN'. Possible values are:
- 'KN': Knuth's choice method. For more information see `knuth_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.knuth_bin_width.html>`__.
- 'FD': Freedman–Diaconis' choice method. For more information see `freedman_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.freedman_bin_width.html>`__.
- 'SC': Scotts' choice method. For more information see `scott_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.scott_bin_width.html>`__.
- 'HGR': Hacine-Gharbi and Ravier' choice method.
alpha_tail: float, optional
Significance level for lower tail dependence index. The default is 0.05.
leaf_order: bool, optional
Indicates if the cluster are ordered so that the distance between
successive leaves is minimal. The default is True.
d: float, optional
The smoothing factor of ewma methods.
The default is 0.94.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
table: bool, optional
True if plot table weights, by default False
"""
p = d_period(interval, start_date, end_date)
s_title = f"{p} Nested clustered optimization"
s_title += " using " + codependence + " codependence,\n" + linkage
s_title += " linkage and " + risk_names[risk_measure] + " as risk measure\n"
weights, stock_returns = optimizer_model.get_nco(
symbols=symbols,
interval=interval,
start_date=start_date,
end_date=end_date,
log_returns=log_returns,
freq=freq,
maxnan=maxnan,
threshold=threshold,
method=method,
codependence=codependence,
covariance=covariance,
objective=objectives_choices[objective.lower()],
risk_measure=risk_choices[risk_measure],
risk_free_rate=risk_free_rate,
risk_aversion=risk_aversion,
alpha=alpha,
a_sim=a_sim,
beta=beta,
b_sim=b_sim,
linkage=linkage,
k=k,
max_k=max_k,
bins_info=bins_info,
alpha_tail=alpha_tail,
leaf_order=leaf_order,
d_ewma=d_ewma,
value=value,
)
if not weights:
console.print("There is no solution with this parameters")
return {}
if table:
console.print(s_title)
display_weights(weights)
portfolio_performance(
weights=weights,
data=stock_returns,
risk_measure=risk_choices[risk_measure],
risk_free_rate=risk_free_rate,
alpha=alpha,
a_sim=a_sim,
beta=beta,
b_sim=b_sim,
freq=freq,
)
return weights
@log_start_end(log=logger)
def my_autopct(x):
"""Function for autopct of plt.pie. This results in values not being printed in the pie if they are 'too small'"""
if x > 4:
return f"{x:.2f} %"
return ""
@log_start_end(log=logger)
def pie_chart_weights(
weights: dict, title_opt: str, external_axes: Optional[List[plt.Axes]]
):
"""Show a pie chart of holdings
Parameters
----------
weights: dict
Weights to display, where keys are tickers, and values are either weights or values if -v specified
title_opt: str
Title to be used on the plot title
external_axes:Optiona[List[plt.Axes]]
Optional external axes to plot data on
"""
if not weights:
return
init_stocks = list(weights.keys())
init_sizes = list(weights.values())
symbols = []
sizes = []
for stock, size in zip(init_stocks, init_sizes):
if size > 0:
symbols.append(stock)
sizes.append(size)
total_size = np.sum(sizes)
colors = theme.get_colors()
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
if math.isclose(sum(sizes), 1, rel_tol=0.1):
_, _, autotexts = ax.pie(
sizes,
labels=symbols,
autopct=my_autopct,
colors=colors,
textprops=dict(color="white"),
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
startangle=45,
normalize=True,
)
plt.setp(autotexts, color="white", fontweight="bold")
else:
_, _, autotexts = ax.pie(
sizes,
labels=symbols,
autopct="",
colors=colors,
textprops=dict(color="white"),
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
startangle=45,
normalize=True,
)
plt.setp(autotexts, color="white", fontweight="bold")
for i, a in enumerate(autotexts):
if sizes[i] / total_size > 0.05:
a.set_text(f"{sizes[i]:.2f}")
else:
a.set_text("")
ax.axis("equal")
# leg1 = ax.legend(
# wedges,
# [str(s) for s in stocks],
# title=" Ticker",
# loc="upper left",
# bbox_to_anchor=(0.80, 0, 0.5, 1),
# frameon=False,
# )
# leg2 = ax.legend(
# wedges,
# [
# f"{' ' if ((100*s/total_size) < 10) else ''}{100*s/total_size:.2f}%"
# for s in sizes
# ],
# title=" ",
# loc="upper left",
# handlelength=0,
# bbox_to_anchor=(0.91, 0, 0.5, 1),
# frameon=False,
# )
# ax.add_artist(leg1)
# ax.add_artist(leg2)
plt.setp(autotexts, size=8, weight="bold")
title = "Portfolio - " + title_opt + "\n"
title += "Portfolio Composition"
ax.set_title(title)
if external_axes is None:
theme.visualize_output()
@log_start_end(log=logger)
def additional_plots(
weights: Dict,
data: pd.DataFrame,
category_dict: Dict = None,
category: str = "",
portfolio_name: str = "",
freq: str = "D",
risk_measure: str = "MV",
risk_free_rate: float = 0,
alpha: float = 0.05,
a_sim: float = 100,
beta: float = None,
b_sim: float = None,
pie: bool = False,
hist: bool = False,
dd: bool = False,
rc_chart: bool = False,
heat: bool = False,
external_axes: Optional[List[plt.Axes]] = None,
):
"""
Plot additional charts
Parameters
----------
weights: Dict
Dict of portfolio weights
data: pd.DataFrame
DataFrame of stock returns
category_dict: Dict
Dict of categories
category: str
Category to plot
portfolio_name: str
Portfolio name
freq: str, optional
The frequency used to calculate returns. Default value is 'D'. Possible
values are:
- 'D' for daily returns.
- 'W' for weekly returns.
- 'M' for monthly returns.
risk_measure: str, optional
The risk measure used to optimize the portfolio. If model is 'NCO',
the risk measures available depends on the objective function.
The default is 'MV'. Possible values are:
- 'MV': Variance.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'TG': Tail Gini.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization (Minimax).
- 'RG': Range of returns.
- 'CVRG': CVaR range of returns.
- 'TGRG': Tail Gini range of returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'DaR': Drawdown at Risk of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).
- 'ADD_Rel': Average Drawdown of compounded cumulative returns.
- 'DaR_Rel': Drawdown at Risk of compounded cumulative returns.
- 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.
- 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.
- 'UCI_Rel': Ulcer Index of compounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, must be in the same interval of assets returns.
Used for 'FLPM' and 'SLPM'. The default is 0.
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses.
The default is 0.05.
a_sim: float, optional
Number of CVaRs used to approximate Tail Gini of losses. The default is 100.
beta: float, optional
Significance level of CVaR and Tail Gini of gains. If None it duplicates alpha value.
The default is None.
b_sim: float, optional
Number of CVaRs used to approximate Tail Gini of gains. If None it duplicates a_sim value.
The default is None.
pie : bool, optional
Display a pie chart of values, by default False
hist : bool, optional
Display a histogram with risk measures, by default False
dd : bool, optional
Display a drawdown chart with risk measures, by default False
rc-chart : float, optional
Display a risk contribution chart for assets, by default False
heat : float, optional
Display a heatmap of correlation matrix with dendrogram, by default False
external_axes: Optional[List[plt.Axes]]
Optional axes to plot data on
"""
title_opt = category if not portfolio_name else category + " - " + portfolio_name
if category_dict is not None:
weights_df = pd.DataFrame.from_dict(
data=weights, orient="index", columns=["value"], dtype=float
)
category_df = pd.DataFrame.from_dict(
data=category_dict, orient="index", columns=["category"]
)
weights_df = weights_df.join(category_df, how="inner")
weights_df.sort_index(inplace=True)
# Calculating classes returns
classes = list(set(weights_df["category"]))
weights_classes = weights_df.groupby(["category"]).sum()
matrix_classes = np.zeros((len(weights_df), len(classes)))
labels = weights_df["category"].tolist()
j_value = 0
for i in classes:
matrix_classes[:, j_value] = np.array(
[1 if x == i else 0 for x in labels], dtype=float
)
matrix_classes[:, j_value] = (
matrix_classes[:, j_value]
* weights_df["value"]
/ weights_classes.loc[i, "value"]
)
j_value += 1
matrix_classes = pd.DataFrame(
matrix_classes, columns=classes, index=weights_df.index
)
data = data @ matrix_classes
weights_df = weights_classes["value"].copy()
weights_df.replace(0, np.nan, inplace=True)
weights_df.dropna(inplace=True)
weights_df.sort_values(ascending=True, inplace=True)
data = data[weights_df.index.tolist()]
data.columns = [i.title() for i in data.columns]
weights_df.index = [i.title() for i in weights_df.index]
weights = weights_df.to_dict()
colors = theme.get_colors()
if pie:
pie_chart_weights(weights, title_opt, external_axes)
if hist:
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
ax = rp.plot_hist(data, w=pd.Series(weights).to_frame(), alpha=alpha, ax=ax)
ax.legend(fontsize="x-small", loc="best")
# Changing colors
for i in ax.get_children()[:-1]:
if isinstance(i, matplotlib.patches.Rectangle):
i.set_color(colors[0])
i.set_alpha(0.7)
k = 1
for i, j in zip(ax.get_legend().get_lines()[::-1], ax.get_lines()[::-1]):
i.set_color(colors[k])
j.set_color(colors[k])
k += 1
title = "Portfolio - " + title_opt + "\n"
title += ax.get_title(loc="left")
ax.set_title(title)
if external_axes is None:
theme.visualize_output()
if dd:
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
nav = data.cumsum()
ax = rp.plot_drawdown(
nav=nav, w=pd.Series(weights).to_frame(), alpha=alpha, ax=ax
)
ax[0].remove()
ax = ax[1]
fig = ax.get_figure()
gs = GridSpec(1, 1, figure=fig)
ax.set_position(gs[0].get_position(fig))
ax.set_subplotspec(gs[0])
# Changing colors
ax.get_lines()[0].set_color(colors[0])
k = 1
for i, j in zip(ax.get_legend().get_lines()[::-1], ax.get_lines()[1:][::-1]):
i.set_color(colors[k])
j.set_color(colors[k])
k += 1
ax.get_children()[1].set_facecolor(colors[0])
ax.get_children()[1].set_alpha(0.7)
title = "Portfolio - " + title_opt + "\n"
title += ax.get_title(loc="left")
ax.set_title(title)
if external_axes is None:
theme.visualize_output()
if rc_chart:
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
ax = rp.plot_risk_con(
w=pd.Series(weights).to_frame(),
cov=data.cov(),
returns=data,
rm=risk_choices[risk_measure.lower()],
rf=risk_free_rate,
alpha=alpha,
a_sim=a_sim,
beta=beta,
b_sim=b_sim,
color=colors[1],
t_factor=time_factor[freq.upper()],
ax=ax,
)
# Changing colors
for i in ax.get_children()[:-1]:
if isinstance(i, matplotlib.patches.Rectangle):
i.set_width(i.get_width())
i.set_color(colors[0])
title = "Portfolio - " + title_opt + "\n"
title += ax.get_title(loc="left")
ax.set_title(title)
if external_axes is None:
theme.visualize_output()
if heat:
if len(weights) == 1:
single_key = list(weights.keys())[0].upper()
console.print(
f"[yellow]Heatmap needs at least two values for '{category}', only found '{single_key}'.[/yellow]"
)
return
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
if len(weights) <= 3:
number_of_clusters = len(weights)
else:
number_of_clusters = None
ax = rp.plot_clusters(
returns=data,
codependence="pearson",
linkage="ward",
k=number_of_clusters,
max_k=10,
leaf_order=True,
dendrogram=True,
cmap="RdYlBu",
# linecolor='tab:purple',
ax=ax,
)
ax = ax.get_figure().axes
ax[0].grid(False)
ax[0].axis("off")
if category_dict is None:
# Vertical dendrogram
l, b, w, h = ax[4].get_position().bounds
l1 = l * 0.5
w1 = w * 0.2
b1 = h * 0.05
ax[4].set_position([l - l1, b + b1, w * 0.8, h * 0.95])
# Heatmap
l, b, w, h = ax[1].get_position().bounds
ax[1].set_position([l - l1 - w1, b + b1, w * 0.8, h * 0.95])
w2 = w * 0.2
# colorbar
l, b, w, h = ax[2].get_position().bounds
ax[2].set_position([l - l1 - w1 - w2, b, w, h])
# Horizontal dendrogram
l, b, w, h = ax[3].get_position().bounds
ax[3].set_position([l - l1 - w1, b, w * 0.8, h])
else:
# Vertical dendrogram
l, b, w, h = ax[4].get_position().bounds
l1 = l * 0.5
w1 = w * 0.4
b1 = h * 0.2
ax[4].set_position([l - l1, b + b1, w * 0.6, h * 0.8])
# Heatmap
l, b, w, h = ax[1].get_position().bounds
ax[1].set_position([l - l1 - w1, b + b1, w * 0.6, h * 0.8])
w2 = w * 0.05
# colorbar
l, b, w, h = ax[2].get_position().bounds
ax[2].set_position([l - l1 - w1 - w2, b, w, h])
# Horizontal dendrogram
l, b, w, h = ax[3].get_position().bounds
ax[3].set_position([l - l1 - w1, b, w * 0.6, h])
title = "Portfolio - " + title_opt + "\n"
title += ax[3].get_title(loc="left")
ax[3].set_title(title)
if external_axes is None:
theme.visualize_output(force_tight_layout=True)
def display_show(weights: Dict, tables: List[str], categories_dict: Dict[Any, Any]):
"""Display the results of the optimization.
Parameters
----------
weights : Dict
Dictionary of weights.
tables : List[str]
List of tables to display.
categories_dict : Dict[Any, Any]
Dictionary of categories.
"""
display_weights(weights)
for t in tables:
console.print("")
display_categories(
weights=weights,
categories=categories_dict,
column=t,
title="Category - " + t.title(),
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_optimization/optimizer_view.py | 0.859634 | 0.344306 | optimizer_view.py | pypi |
__docformat__ = "numpy"
# pylint: disable=R0913, R0914, C0302, too-many-branches, too-many-statements, line-too-long
# flake8: noqa: E501
import logging
import math
import warnings
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import riskfolio as rp
from matplotlib.gridspec import GridSpec
from matplotlib.lines import Line2D
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import plot_autoscale
from openbb_terminal.portfolio.portfolio_optimization.po_model import (
validate_inputs,
get_ef,
)
from openbb_terminal.portfolio.portfolio_optimization.statics import (
RISK_CHOICES,
TIME_FACTOR,
)
from openbb_terminal.portfolio.portfolio_optimization.optimizer_helper import get_kwarg
from openbb_terminal.portfolio.portfolio_optimization.po_engine import PoEngine
from openbb_terminal.rich_config import console
warnings.filterwarnings("ignore")
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_ef(portfolio_engine: PoEngine = None, **kwargs):
"""Display efficient frontier
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float, optional
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float, optional
Value used to replace outliers that are higher than threshold, by default 0.0
method: str, optional
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
risk_measure: str, optional
The risk measure used to optimize the portfolio, by default 'MV'
Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, annualized. Used for 'FLPM' and 'SLPM' and Sharpe objective function, by default 0.0
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses, by default 0.05
n_portfolios: int, optional
Number of portfolios to simulate, by default 100
seed: int, optional
Seed used to generate random portfolios, by default 123
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> openbb.portfolio.po.ef_chart(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> openbb.portfolio.po.ef_chart(portfolio_engine=p)
"""
n_portfolios = get_kwarg("n_portfolios", kwargs)
freq = get_kwarg("freq", kwargs)
risk_measure = get_kwarg("risk_measure", kwargs)
risk_free_rate = get_kwarg("risk_free_rate", kwargs)
alpha = get_kwarg("alpha", kwargs)
# Pop chart args
tangency = kwargs.pop("tangency", False)
plot_tickers = kwargs.pop("plot_tickers", False)
external_axes = kwargs.pop("external_axes", None)
frontier, mu, cov, stock_returns, weights, X1, Y1, port = get_ef(
portfolio_engine,
**kwargs,
)
risk_free_rate = risk_free_rate / TIME_FACTOR[freq.upper()]
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
ax = rp.plot_frontier(
w_frontier=frontier,
mu=mu,
cov=cov,
returns=stock_returns,
rm=RISK_CHOICES[risk_measure.lower()],
rf=risk_free_rate,
alpha=alpha,
cmap="RdYlBu",
w=weights,
label="",
marker="*",
s=16,
c="r",
t_factor=TIME_FACTOR[freq.upper()],
ax=ax,
)
# Add risk free line
if tangency:
ret_sharpe = (mu @ weights).to_numpy().item() * TIME_FACTOR[freq.upper()]
risk_sharpe = rp.Sharpe_Risk(
weights,
cov=cov,
returns=stock_returns,
rm=RISK_CHOICES[risk_measure.lower()],
rf=risk_free_rate,
alpha=alpha,
# a_sim=a_sim,
# beta=beta,
# b_sim=b_sim,
)
if RISK_CHOICES[risk_measure.lower()] not in [
"ADD",
"MDD",
"CDaR",
"EDaR",
"UCI",
]:
risk_sharpe = risk_sharpe * TIME_FACTOR[freq.upper()] ** 0.5
y = ret_sharpe * 1.5
b = risk_free_rate * TIME_FACTOR[freq.upper()]
m = (ret_sharpe - b) / risk_sharpe
x2 = (y - b) / m
x = [0, x2]
y = [b, y]
line = Line2D(x, y, label="Capital Allocation Line")
ax.set_xlim(xmin=min(X1) * 0.8)
ax.add_line(line)
ax.plot(X1, Y1, color="b")
plot_tickers = True
if plot_tickers:
ticker_plot = pd.DataFrame(columns=["ticker", "var"])
for ticker in port.cov.columns:
weight_df = pd.DataFrame({"weights": 1}, index=[ticker])
risk = rp.Sharpe_Risk(
weight_df,
cov=port.cov[ticker][ticker],
returns=stock_returns.loc[:, [ticker]],
rm=RISK_CHOICES[risk_measure.lower()],
rf=risk_free_rate,
alpha=alpha,
)
if RISK_CHOICES[risk_measure.lower()] not in [
"MDD",
"ADD",
"CDaR",
"EDaR",
"UCI",
]:
risk = risk * TIME_FACTOR[freq.upper()] ** 0.5
ticker_plot = ticker_plot.append(
{"ticker": ticker, "var": risk}, ignore_index=True
)
ticker_plot = ticker_plot.set_index("ticker")
ticker_plot = ticker_plot.merge(
port.mu.T * TIME_FACTOR[freq.upper()], right_index=True, left_index=True
)
ticker_plot = ticker_plot.rename(columns={0: "ret"})
ax.scatter(ticker_plot["var"], ticker_plot["ret"])
for row in ticker_plot.iterrows():
ax.annotate(row[0], (row[1]["var"], row[1]["ret"]))
ax.set_title(f"Efficient Frontier simulating {n_portfolios} portfolios")
ax.legend(loc="best", scatterpoints=1)
theme.style_primary_axis(ax)
l, b, w, h = ax.get_position().bounds
ax.set_position([l, b, w * 0.9, h])
ax1 = ax.get_figure().axes
ll, bb, ww, hh = ax1[-1].get_position().bounds
ax1[-1].set_position([ll * 1.02, bb, ww, hh])
if external_axes is None:
theme.visualize_output(force_tight_layout=False)
@log_start_end(log=logger)
def display_plot(portfolio_engine: PoEngine = None, chart_type: str = "pie", **kwargs):
"""
Display efficient frontier
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
chart_type : str, optional
Chart type, by default "pie"
Options are "pie", "hist", "dd" or "rc"
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.equal(portfolio_engine=p)
>>> p.get_available_categories()
['SECTOR', 'CURRENCY']
>>> openbb.portfolio.po.plot(portfolio_engine=p, category="SECTOR", chart_type="pie")
>>> openbb.portfolio.po.plot(portfolio_engine=p, category="SECTOR", chart_type="hist")
>>> openbb.portfolio.po.plot(portfolio_engine=p, category="SECTOR", chart_type="dd")
>>> openbb.portfolio.po.plot(portfolio_engine=p, category="SECTOR", chart_type="rc")
>>> openbb.portfolio.po.plot(portfolio_engine=p, category="SECTOR", chart_type="heat")
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.equal(portfolio_engine=p)
>>> p.get_available_categories()
['ASSET_CLASS',
'SECTOR',
'INDUSTRY',
'COUNTRY',
'CURRENCY']
>>> openbb.portfolio.po.plot(portfolio_engine=p, category="ASSET_CLASS", chart_type="pie")
>>> openbb.portfolio.po.plot(portfolio_engine=p, category="SECTOR", chart_type="hist")
>>> openbb.portfolio.po.plot(portfolio_engine=p, category="INDUSTRY", chart_type="dd")
>>> openbb.portfolio.po.plot(portfolio_engine=p, category="COUNTRY", chart_type="rc")
>>> openbb.portfolio.po.plot(portfolio_engine=p, category="ASSET_CLASS", chart_type="heat")
"""
if portfolio_engine is None:
console.print("No portfolio engine found.")
return
available_categories = portfolio_engine.get_available_categories()
if not available_categories:
console.print("No categories found.")
return
msg = ", ".join(available_categories)
if "category" not in kwargs:
console.print(f"Please specify a category from the following: {msg}")
return
if kwargs["category"] not in available_categories:
console.print(f"Please specify a category from the following: {msg}")
return
category = kwargs["category"]
_, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine=portfolio_engine, kwargs=kwargs
)
weights: pd.DataFrame = valid_portfolio_engine.get_weights_df()
if weights.empty:
return
data: pd.DataFrame = valid_portfolio_engine.get_returns()
if data.empty:
return
if category:
category_dict = valid_portfolio_engine.get_category(category)
category_df = pd.DataFrame.from_dict(
data=category_dict, orient="index", columns=["category"]
)
weights = weights.join(category_df, how="inner")
weights.sort_index(inplace=True)
# Calculating classes returns
classes = list(set(weights["category"]))
weights_classes = weights.groupby(["category"]).sum()
matrix_classes = np.zeros((len(weights), len(classes)))
labels = weights["category"].tolist()
j_value = 0
for i in classes:
matrix_classes[:, j_value] = np.array(
[1 if x == i else 0 for x in labels], dtype=float
)
matrix_classes[:, j_value] = (
matrix_classes[:, j_value]
* weights["value"]
/ weights_classes.loc[i, "value"]
)
j_value += 1
matrix_classes = pd.DataFrame(
matrix_classes, columns=classes, index=weights.index
)
data = data @ matrix_classes
weights = weights_classes["value"].copy()
weights.replace(0, np.nan, inplace=True)
weights.dropna(inplace=True)
weights.sort_values(ascending=True, inplace=True)
data = data[weights.index.tolist()]
data.columns = [i.title() for i in data.columns]
weights.index = [i.title() for i in weights.index]
weights = weights.to_dict()
valid_kwargs["weights"] = weights
valid_kwargs["data"] = data
valid_kwargs["colors"] = theme.get_colors()
if chart_type == "pie":
display_pie(**valid_kwargs)
elif chart_type == "hist":
display_hist(**valid_kwargs)
elif chart_type == "dd":
display_dd(**valid_kwargs)
elif chart_type == "rc":
display_rc(**valid_kwargs)
elif chart_type == "heat":
display_heat(**valid_kwargs)
else:
console.print(
"Invalid chart type, please choose from the following: pie, hist, dd, rc, heat"
)
@log_start_end(log=logger)
def display_heat(**kwargs):
weights = kwargs.get("weights", None)
data = kwargs.get("data", None)
category = kwargs.get("category", None)
title = kwargs.get("title", "")
external_axes = kwargs.get("external_axes", None)
if len(weights) == 1:
single_key = list(weights.keys())[0].upper()
console.print(
f"[yellow]Heatmap needs at least two values for '{category}', only found '{single_key}'.[/yellow]"
)
return
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
if len(weights) <= 3:
number_of_clusters = len(weights)
else:
number_of_clusters = None
ax = rp.plot_clusters(
returns=data,
codependence="pearson",
linkage="ward",
k=number_of_clusters,
max_k=10,
leaf_order=True,
dendrogram=True,
cmap="RdYlBu",
# linecolor='tab:purple',
ax=ax,
)
ax = ax.get_figure().axes
ax[0].grid(False)
ax[0].axis("off")
if category is None:
# Vertical dendrogram
l, b, w, h = ax[4].get_position().bounds
l1 = l * 0.5
w1 = w * 0.2
b1 = h * 0.05
ax[4].set_position([l - l1, b + b1, w * 0.8, h * 0.95])
# Heatmap
l, b, w, h = ax[1].get_position().bounds
ax[1].set_position([l - l1 - w1, b + b1, w * 0.8, h * 0.95])
w2 = w * 0.2
# colorbar
l, b, w, h = ax[2].get_position().bounds
ax[2].set_position([l - l1 - w1 - w2, b, w, h])
# Horizontal dendrogram
l, b, w, h = ax[3].get_position().bounds
ax[3].set_position([l - l1 - w1, b, w * 0.8, h])
else:
# Vertical dendrogram
l, b, w, h = ax[4].get_position().bounds
l1 = l * 0.5
w1 = w * 0.4
b1 = h * 0.2
ax[4].set_position([l - l1, b + b1, w * 0.6, h * 0.8])
# Heatmap
l, b, w, h = ax[1].get_position().bounds
ax[1].set_position([l - l1 - w1, b + b1, w * 0.6, h * 0.8])
w2 = w * 0.05
# colorbar
l, b, w, h = ax[2].get_position().bounds
ax[2].set_position([l - l1 - w1 - w2, b, w, h])
# Horizontal dendrogram
l, b, w, h = ax[3].get_position().bounds
ax[3].set_position([l - l1 - w1, b, w * 0.6, h])
title = "Portfolio - " + title + "\n"
title += ax[3].get_title(loc="left")
ax[3].set_title(title)
if external_axes is None:
theme.visualize_output(force_tight_layout=True)
@log_start_end(log=logger)
def display_rc(**kwargs):
weights = kwargs.get("weights", None)
data = kwargs.get("data", None)
colors = kwargs.get("colors", None)
title = kwargs.get("title", "")
external_axes = kwargs.get("external_axes", None)
risk_measure = get_kwarg("risk_measure", kwargs)
risk_free_rate = get_kwarg("risk_free_rate", kwargs)
alpha = get_kwarg("alpha", kwargs)
a_sim = get_kwarg("a_sim", kwargs)
beta = get_kwarg("beta", kwargs)
b_sim = get_kwarg("b_sim", kwargs)
freq = get_kwarg("freq", kwargs)
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
ax = rp.plot_risk_con(
w=pd.Series(weights).to_frame(),
cov=data.cov(),
returns=data,
rm=RISK_CHOICES[risk_measure.lower()],
rf=risk_free_rate,
alpha=alpha,
a_sim=a_sim,
beta=beta,
b_sim=b_sim,
color=colors[1],
t_factor=TIME_FACTOR[freq.upper()],
ax=ax,
)
# Changing colors
for i in ax.get_children()[:-1]:
if isinstance(i, matplotlib.patches.Rectangle):
i.set_width(i.get_width())
i.set_color(colors[0])
title = "Portfolio - " + title + "\n"
title += ax.get_title(loc="left")
ax.set_title(title)
if external_axes is None:
theme.visualize_output()
@log_start_end(log=logger)
def display_hist(**kwargs):
weights = kwargs.get("weights", None)
data = kwargs.get("data", None)
colors = kwargs.get("colors", None)
title = kwargs.get("title", "")
external_axes = kwargs.get("external_axes", None)
alpha = kwargs.get("alpha", 0.05)
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
ax = rp.plot_hist(data, w=pd.Series(weights).to_frame(), alpha=alpha, ax=ax)
ax.legend(fontsize="x-small", loc="best")
# Changing colors
for i in ax.get_children()[:-1]:
if isinstance(i, matplotlib.patches.Rectangle):
i.set_color(colors[0])
i.set_alpha(0.7)
k = 1
for i, j in zip(ax.get_legend().get_lines()[::-1], ax.get_lines()[::-1]):
i.set_color(colors[k])
j.set_color(colors[k])
k += 1
title = "Portfolio - " + title + "\n"
title += ax.get_title(loc="left")
ax.set_title(title)
if external_axes is None:
theme.visualize_output()
@log_start_end(log=logger)
def display_dd(**kwargs):
weights = kwargs.get("weights", None)
data = kwargs.get("data", None)
colors = kwargs.get("colors", None)
title = kwargs.get("title", "")
external_axes = kwargs.get("external_axes", None)
alpha = get_kwarg("alpha", kwargs)
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
nav = data.cumsum()
ax = rp.plot_drawdown(nav=nav, w=pd.Series(weights).to_frame(), alpha=alpha, ax=ax)
ax[0].remove()
ax = ax[1]
fig = ax.get_figure()
gs = GridSpec(1, 1, figure=fig)
ax.set_position(gs[0].get_position(fig))
ax.set_subplotspec(gs[0])
# Changing colors
ax.get_lines()[0].set_color(colors[0])
k = 1
for i, j in zip(ax.get_legend().get_lines()[::-1], ax.get_lines()[1:][::-1]):
i.set_color(colors[k])
j.set_color(colors[k])
k += 1
ax.get_children()[1].set_facecolor(colors[0])
ax.get_children()[1].set_alpha(0.7)
title = "Portfolio - " + title + "\n"
title += ax.get_title(loc="left")
ax.set_title(title)
if external_axes is None:
theme.visualize_output()
@log_start_end(log=logger)
def display_pie(**kwargs):
"""Show a pie chart of holdings
Parameters
----------
weights: dict
Weights to display, where keys are tickers, and values are either weights or values if -v specified
title: str
Title to be used on the plot title
external_axes:Optiona[List[plt.Axes]]
Optional external axes to plot data on
"""
weights = kwargs.get("weights", None)
colors = kwargs.get("colors", None)
title = kwargs.get("title", "")
external_axes = kwargs.get("external_axes", None)
if not weights:
return
init_stocks = list(weights.keys())
init_sizes = list(weights.values())
symbols = []
sizes = []
for stock, size in zip(init_stocks, init_sizes):
if size > 0:
symbols.append(stock)
sizes.append(size)
total_size = np.sum(sizes)
colors = theme.get_colors()
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
if math.isclose(sum(sizes), 1, rel_tol=0.1):
_, _, autotexts = ax.pie(
sizes,
labels=symbols,
autopct=my_autopct,
colors=colors,
textprops=dict(color="white"),
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
startangle=45,
normalize=True,
)
plt.setp(autotexts, color="white", fontweight="bold")
else:
_, _, autotexts = ax.pie(
sizes,
labels=symbols,
autopct="",
colors=colors,
textprops=dict(color="white"),
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
startangle=45,
normalize=True,
)
plt.setp(autotexts, color="white", fontweight="bold")
for i, a in enumerate(autotexts):
if sizes[i] / total_size > 0.05:
a.set_text(f"{sizes[i]:.2f}")
else:
a.set_text("")
ax.axis("equal")
# leg1 = ax.legend(
# wedges,
# [str(s) for s in stocks],
# title=" Ticker",
# loc="upper left",
# bbox_to_anchor=(0.80, 0, 0.5, 1),
# frameon=False,
# )
# leg2 = ax.legend(
# wedges,
# [
# f"{' ' if ((100*s/total_size) < 10) else ''}{100*s/total_size:.2f}%"
# for s in sizes
# ],
# title=" ",
# loc="upper left",
# handlelength=0,
# bbox_to_anchor=(0.91, 0, 0.5, 1),
# frameon=False,
# )
# ax.add_artist(leg1)
# ax.add_artist(leg2)
plt.setp(autotexts, size=8, weight="bold")
title = "Portfolio - " + title + "\n"
title += "Portfolio Composition"
ax.set_title(title)
if external_axes is None:
theme.visualize_output()
@log_start_end(log=logger)
def my_autopct(x):
"""Function for autopct of plt.pie. This results in values not being printed in the pie if they are 'too small'"""
if x > 4:
return f"{x:.2f} %"
return "" | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_optimization/po_view.py | 0.849722 | 0.347676 | po_view.py | pypi |
__docformat__ = "numpy"
# pylint: disable=R0913, C0302, E1101, line-too-long
# flake8: noqa: E501
import logging
from typing import Dict, List, Optional, Tuple, Union
import warnings
import pandas as pd
from numpy.typing import NDArray
from numpy import floating
from riskfolio import rp
from openbb_terminal.decorators import log_start_end
from openbb_terminal.portfolio.portfolio_optimization import (
optimizer_helper,
optimizer_model,
)
from openbb_terminal.portfolio.portfolio_optimization.parameters.params_helpers import (
check_convert_parameters,
)
from openbb_terminal.portfolio.portfolio_optimization.statics import (
RISK_NAMES,
TERMINAL_TEMPLATE_MAP,
TIME_FACTOR,
DRAWDOWNS,
)
from openbb_terminal.portfolio.portfolio_optimization.po_engine import PoEngine
from openbb_terminal.rich_config import console
warnings.filterwarnings("ignore")
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def generate_portfolio(
symbols_categories: Dict[str, Dict[str, str]] = None,
symbols_file_path: str = None,
parameters_file_path: str = None,
) -> Union[PoEngine, None]:
"""Load portfolio optimization engine
Parameters
----------
symbols_categories: Dict[str, Dict[str, str]] = None
Categories, by default None
symbols_file_path : str, optional
Symbols file full path, by default None
parameters_file_path : str, optional
Parameters file full path, by default None
Returns
-------
PoEngine
Portfolio optimization engine
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.equal(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.equal(portfolio_engine=p)
"""
if symbols_file_path:
return PoEngine(
symbols_file_path=symbols_file_path,
parameters_file_path=parameters_file_path,
)
if symbols_categories:
return PoEngine(
symbols_categories=symbols_categories,
parameters_file_path=parameters_file_path,
)
console.print("No 'symbols_file_path' or 'symbols_categories' provided.")
return None
@log_start_end(log=logger)
def load_parameters_file(
portfolio_engine: PoEngine,
parameters_file_path: str,
) -> Dict:
"""Load portfolio optimization engine from file
Parameters
----------
portfolio_engine : PoEngine
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
parameters_file_path : str
Parameters file full path, by default None
Returns
-------
Dict
Loaded parameters
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> p.get_params()
{}
>>> parameters = openbb.portfolio.po.file(portfolio_engine=p, parameters_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/optimization/defaults.ini")
Parameters:
interval : 3y
log_returns : 0
freq : d
maxnan : 0.05
threshold : 0.3
alpha : 0.05
>>> p.get_params()
{'interval': '3y',
'log_returns': '0',
'freq': 'd',
'maxnan': '0.05',
'threshold': '0.3',
'alpha': '0.05'}
>>> p.set_params({"risk_free_rate": 0.05}, update=True)
>>> p.get_params()
{'interval': '3y',
'log_returns': '0',
'freq': 'd',
'maxnan': '0.05',
'threshold': '0.3',
'alpha': '0.05',
'risk_free_rate': 0.05}
>>> weights, performance = openbb.portfolio.po.maxsharpe(portfolio_engine=p)
"""
portfolio_engine.set_params_from_file(parameters_file_path)
return portfolio_engine.get_params()
@log_start_end(log=logger)
def validate_inputs(
portfolio_engine=None, kwargs=None
) -> Tuple[List[str], PoEngine, dict]:
"""Check valid inputs
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
kwargs : dict
Keyword arguments, by default None
Returns
-------
Tuple[List[str], PoEngine, dict]
List of symbols, Portfolio optimization engine, Keyword arguments
"""
if portfolio_engine:
symbols = portfolio_engine.get_symbols()
parameters = portfolio_engine.get_params().copy()
parameters.update(kwargs)
else:
console.print("No 'portfolio_engine' provided.")
converted_parameters = check_convert_parameters(received_parameters=parameters)
terminal_parameters = {}
# TODO: Remove this conversion when mapping between template and terminal is not needed
TEMPLATE_TERMINAL_MAP = {v: k for k, v in TERMINAL_TEMPLATE_MAP.items()}
for key, value in converted_parameters.items():
terminal_parameters[TEMPLATE_TERMINAL_MAP.get(key, key)] = value
return symbols, portfolio_engine, terminal_parameters
@log_start_end(log=logger)
def get_portfolio_performance(weights: Dict, data: pd.DataFrame, **kwargs) -> Dict:
"""Get portfolio performance
Parameters
----------
weights : Dict
Portfolio weights
data : pd.DataFrame
Dataframe with returns
Returns
-------
Dict
Portfolio performance
"""
try:
if not weights:
return {}
freq = optimizer_helper.get_kwarg("freq", kwargs)
risk_measure = optimizer_helper.get_kwarg("risk_measure", kwargs)
risk_free_rate = optimizer_helper.get_kwarg("risk_free_rate", kwargs)
alpha = optimizer_helper.get_kwarg("alpha", kwargs)
a_sim = optimizer_helper.get_kwarg("a_sim", kwargs)
beta = optimizer_helper.get_kwarg("beta", kwargs)
b_sim = optimizer_helper.get_kwarg("b_sim", kwargs)
freq = freq.upper()
weights = pd.Series(weights).to_frame()
returns = data @ weights
mu = returns.mean().item() * TIME_FACTOR[freq]
sigma = returns.std().item() * TIME_FACTOR[freq] ** 0.5
sharpe = (mu - risk_free_rate) / sigma
performance_dict = {
"Return": mu,
"Volatility": sigma,
"Sharpe ratio": sharpe,
}
risk_measure = optimizer_helper.validate_risk_measure(
risk_measure, warning=False
)
if risk_measure != "MV":
risk = rp.Sharpe_Risk(
weights,
cov=data.cov(),
returns=data,
rm=risk_measure,
rf=risk_free_rate,
alpha=alpha,
a_sim=a_sim,
beta=beta,
b_sim=b_sim,
)
if risk_measure in DRAWDOWNS:
sharpe_2 = (mu - risk_free_rate) / risk
else:
risk = risk * TIME_FACTOR[freq] ** 0.5
sharpe_2 = (mu - risk_free_rate) / risk
performance_dict[RISK_NAMES[risk_measure.lower()]] = risk
performance_dict.update({"Sharpe ratio (risk adjusted)": sharpe_2})
except Exception as _:
console.print(
"[red]\nFailed to calculate portfolio performance indicators.[/red]"
)
performance_dict = {}
return performance_dict
@log_start_end(log=logger)
def get_maxsharpe(
portfolio_engine: PoEngine = None, **kwargs
) -> Tuple[pd.DataFrame, Dict]:
"""Optimize Sharpe ratio weights
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float, optional
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float, optional
Value used to replace outliers that are higher than threshold, by default 0.0
method: str, optional
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
objective: str, optional
Objective function of the optimization model, by default 'Sharpe'
Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'MaxRet': Maximize the expected return of the portfolio.
risk_measure: str, optional
The risk measure used to optimize the portfolio, by default 'MV'
Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, annualized. Used for 'FLPM' and 'SLPM' and Sharpe objective function, by default 0.0
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function, by default 1.0
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses, by default 0.05
target_return: float, optional
Constraint on minimum level of portfolio's return, by default -1.0
target_risk: float, optional
Constraint on maximum level of portfolio's risk, by default -1.0
mean: str, optional
The method used to estimate the expected returns, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods, by default 0.94
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple with weights and performance dictionary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.maxsharpe(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.maxsharpe(portfolio_engine=p)
"""
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
weights, returns = optimizer_model.get_max_sharpe(
symbols=valid_symbols, **valid_kwargs
)
performance_dict = get_portfolio_performance(weights, returns, **valid_kwargs)
valid_portfolio_engine.set_weights(weights=weights)
valid_portfolio_engine.set_returns(returns=returns)
return valid_portfolio_engine.get_weights_df(warning=False), performance_dict
@log_start_end(log=logger)
def get_minrisk(
portfolio_engine: PoEngine = None, **kwargs
) -> Tuple[pd.DataFrame, Dict]:
"""Optimize minimum risk weights
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float, optional
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float, optional
Value used to replace outliers that are higher than threshold, by default 0.0
method: str, optional
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
objective: str, optional
Objective function of the optimization model, by default 'Sharpe'
Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'MaxRet': Maximize the expected return of the portfolio.
risk_measure: str, optional
The risk measure used to optimize the portfolio, by default 'MV'
Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, annualized. Used for 'FLPM' and 'SLPM' and Sharpe objective function, by default 0.0
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function, by default 1.0
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses, by default 0.05
target_return: float, optional
Constraint on minimum level of portfolio's return, by default -1.0
target_risk: float, optional
Constraint on maximum level of portfolio's risk, by default -1.0
mean: str, optional
The method used to estimate the expected returns, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods, by default 0.94
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple with weights and performance dictionary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.minrisk(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.minrisk(portfolio_engine=p)
"""
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
weights, returns = optimizer_model.get_min_risk(
symbols=valid_symbols, **valid_kwargs
)
performance_dict = get_portfolio_performance(weights, returns, **valid_kwargs)
valid_portfolio_engine.set_weights(weights=weights)
valid_portfolio_engine.set_returns(returns=returns)
return valid_portfolio_engine.get_weights_df(warning=False), performance_dict
@log_start_end(log=logger)
def get_maxutil(
portfolio_engine: PoEngine = None, **kwargs
) -> Tuple[pd.DataFrame, Dict]:
"""Optimize maximum utility weights
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float, optional
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float, optional
Value used to replace outliers that are higher than threshold, by default 0.0
method: str, optional
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
objective: str, optional
Objective function of the optimization model, by default 'Sharpe'
Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'MaxRet': Maximize the expected return of the portfolio.
risk_measure: str, optional
The risk measure used to optimize the portfolio, by default 'MV'
Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, annualized. Used for 'FLPM' and 'SLPM' and Sharpe objective function, by default 0.0
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function, by default 1.0
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses, by default 0.05
target_return: float, optional
Constraint on minimum level of portfolio's return, by default -1.0
target_risk: float, optional
Constraint on maximum level of portfolio's risk, by default -1.0
mean: str, optional
The method used to estimate the expected returns, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods, by default 0.94
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple with weights and performance dictionary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.maxutil(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.maxutil(portfolio_engine=p)
"""
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
weights, returns = optimizer_model.get_max_util(
symbols=valid_symbols, **valid_kwargs
)
performance_dict = get_portfolio_performance(weights, returns, **valid_kwargs)
valid_portfolio_engine.set_weights(weights=weights)
valid_portfolio_engine.set_returns(returns=returns)
return valid_portfolio_engine.get_weights_df(warning=False), performance_dict
@log_start_end(log=logger)
def get_maxret(
portfolio_engine: PoEngine = None, **kwargs
) -> Tuple[pd.DataFrame, Dict]:
"""Optimize maximum return weights
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float, optional
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float, optional
Value used to replace outliers that are higher than threshold, by default 0.0
method: str, optional
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
objective: str, optional
Objective function of the optimization model, by default 'Sharpe'
Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'MaxRet': Maximize the expected return of the portfolio.
risk_measure: str, optional
The risk measure used to optimize the portfolio, by default 'MV'
Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, annualized. Used for 'FLPM' and 'SLPM' and Sharpe objective function, by default 0.0
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function, by default 1.0
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses, by default 0.05
target_return: float, optional
Constraint on minimum level of portfolio's return, by default -1.0
target_risk: float, optional
Constraint on maximum level of portfolio's risk, by default -1.0
mean: str, optional
The method used to estimate the expected returns, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods, by default 0.94
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple with weights and performance dictionary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.maxret(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.maxret(portfolio_engine=p)
"""
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
weights, returns = optimizer_model.get_max_ret(
symbols=valid_symbols, **valid_kwargs
)
performance_dict = get_portfolio_performance(weights, returns, **valid_kwargs)
valid_portfolio_engine.set_weights(weights=weights)
valid_portfolio_engine.set_returns(returns=returns)
return valid_portfolio_engine.get_weights_df(warning=False), performance_dict
@log_start_end(log=logger)
def get_maxdiv(
portfolio_engine: PoEngine = None, **kwargs
) -> Tuple[pd.DataFrame, Dict]:
"""Optimize diversification weights
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float, optional
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float, optional
Value used to replace outliers that are higher than threshold, by default 0.0
method: str, optional
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
covariance: str, optional
The method used to estimate the covariance matrix, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods, by default 0.94
risk_measure: str, optional
The risk measure used to optimize the portfolio, by default 'MV'
Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, annualized. Used for 'FLPM' and 'SLPM' and Sharpe objective function, by default 0.0
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses, by default 0.05
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple with weights and performance dictionary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.maxdiv(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.maxdiv(portfolio_engine=p)
"""
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
weights, returns = optimizer_model.get_max_diversification_portfolio(
symbols=valid_symbols, **valid_kwargs
)
performance_dict = get_portfolio_performance(weights, returns, **valid_kwargs)
valid_portfolio_engine.set_weights(weights=weights)
valid_portfolio_engine.set_returns(returns=returns)
return valid_portfolio_engine.get_weights_df(warning=False), performance_dict
@log_start_end(log=logger)
def get_maxdecorr(
portfolio_engine: PoEngine = None, **kwargs
) -> Tuple[pd.DataFrame, Dict]:
"""Optimize decorrelation weights
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float, optional
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float, optional
Value used to replace outliers that are higher than threshold, by default 0.0
method: str, optional
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
covariance: str, optional
The method used to estimate the covariance matrix, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods, by default 0.94
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple with weights and performance dictionary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.maxdecorr(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.maxdecorr(portfolio_engine=p)
"""
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
weights, returns = optimizer_model.get_max_decorrelation_portfolio(
symbols=valid_symbols, **valid_kwargs
)
performance_dict = get_portfolio_performance(weights, returns, **valid_kwargs)
valid_portfolio_engine.set_weights(weights=weights)
valid_portfolio_engine.set_returns(returns=returns)
return valid_portfolio_engine.get_weights_df(warning=False), performance_dict
@log_start_end(log=logger)
def get_blacklitterman(
portfolio_engine: PoEngine = None, **kwargs
) -> Tuple[pd.DataFrame, Dict]:
"""Optimize decorrelation weights
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float, optional
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float, optional
Value used to replace outliers that are higher than threshold, by default 0.0
method: str, optional
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
benchmark : Dict
Dict of portfolio weights, by default None
p_views: List
Matrix P of views that shows relationships among assets and returns, by default None
q_views: List
Matrix Q of expected returns of views, by default None
objective: str, optional
Objective function of the optimization model, by default 'Sharpe'
Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'MaxRet': Maximize the expected return of the portfolio.
risk_free_rate: float, optional
Risk free rate, annualized. Used for 'FLPM' and 'SLPM' and Sharpe objective function, by default 0.0
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function, by default 1.0
delta: float, optional
Risk aversion factor of Black Litterman model, by default None
equilibrium: bool, optional
If True excess returns are based on equilibrium market portfolio, if False
excess returns are calculated as historical returns minus risk free rate, by default True
optimize: bool, optional
If True Black Litterman estimates are used as inputs of mean variance model,
if False returns equilibrium weights from Black Litterman model, by default True
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple with weights and performance dictionary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.blacklitterman(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.blacklitterman(portfolio_engine=p)
"""
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
weights, returns = optimizer_model.get_black_litterman_portfolio(
symbols=valid_symbols, **valid_kwargs
)
performance_dict = get_portfolio_performance(weights, returns, **valid_kwargs)
valid_portfolio_engine.set_weights(weights=weights)
valid_portfolio_engine.set_returns(returns=returns)
return valid_portfolio_engine.get_weights_df(warning=False), performance_dict
@log_start_end(log=logger)
def get_ef(
portfolio_engine: PoEngine = None, **kwargs
) -> Tuple[
pd.DataFrame,
pd.DataFrame,
pd.DataFrame,
pd.DataFrame,
Optional[pd.DataFrame],
NDArray[floating],
NDArray[floating],
rp.Portfolio,
]:
"""Get Efficient Frontier
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float, optional
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float, optional
Value used to replace outliers that are higher than threshold, by default 0.0
method: str, optional
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
value_short : float, optional
Amount to allocate to portfolio in short positions, by default 0.0
risk_measure: str, optional
The risk measure used to optimize the portfolio, by default 'MV'
Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, annualized. Used for 'FLPM' and 'SLPM' and Sharpe objective function, by default 0.0
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses, by default 0.05
n_portfolios: int, optional
Number of portfolios to simulate, by default 100
seed: int, optional
Seed used to generate random portfolios, by default 123
Returns
-------
Tuple[
pd.DataFrame,
pd.DataFrame,
pd.DataFrame,
pd.DataFrame,
Optional[pd.DataFrame],
NDArray[floating],
NDArray[floating],
rp.Portfolio,
]
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.ef(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> frontier = openbb.portfolio.po.ef(portfolio_engine=p)
"""
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
frontier, mu, cov, returns, weights, X1, Y1, port = optimizer_model.get_ef(
symbols=valid_symbols, **valid_kwargs
)
valid_portfolio_engine.set_weights(weights=weights.to_dict()["weights"])
valid_portfolio_engine.set_returns(returns=returns)
return frontier, mu, cov, returns, weights, X1, Y1, port
@log_start_end(log=logger)
def get_riskparity(
portfolio_engine: PoEngine = None, **kwargs
) -> Tuple[pd.DataFrame, Dict]:
"""Optimize with Risk Parity using the risk budgeting approach
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float, optional
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float, optional
Value used to replace outliers that are higher than threshold, by default 0.0
method: str, optional
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
risk_measure: str, optional
The risk measure used to optimize the portfolio, by default 'MV'
Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, annualized. Used for 'FLPM' and 'SLPM' and Sharpe objective function, by default 0.0
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses, by default 0.05
target_return: float, optional
Constraint on minimum level of portfolio's return, by default -1.0
mean: str, optional
The method used to estimate the expected returns, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods, by default 0.94
risk_cont: List[str], optional
The vector of risk contribution per asset, by default 1/n (number of assets)
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple with weights and performance dictionary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.riskparity(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.riskparity(portfolio_engine=p)
"""
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
weights, returns = optimizer_model.get_risk_parity_portfolio(
symbols=valid_symbols, **valid_kwargs
)
performance_dict = get_portfolio_performance(weights, returns, **valid_kwargs)
valid_portfolio_engine.set_weights(weights=weights)
valid_portfolio_engine.set_returns(returns=returns)
return valid_portfolio_engine.get_weights_df(warning=False), performance_dict
@log_start_end(log=logger)
def get_relriskparity(
portfolio_engine: PoEngine = None, **kwargs
) -> Tuple[pd.DataFrame, Dict]:
"""Optimize with Relaxed Risk Parity using the least squares approach
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float, optional
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float, optional
Value used to replace outliers that are higher than threshold, by default 0.0
method: str, optional
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
risk_measure: str, optional
The risk measure used to optimize the portfolio, by default 'MV'
Possible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization.
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, annualized. Used for 'FLPM' and 'SLPM' and Sharpe objective function, by default 0.0
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses, by default 0.05
target_return: float, optional
Constraint on minimum level of portfolio's return, by default -1.0
mean: str, optional
The method used to estimate the expected returns, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
covariance: str, optional
The method used to estimate the covariance matrix, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods, by default 0.94
risk_cont: List[str], optional
The vector of risk contribution per asset, by default 1/n (number of assets)
version : str, optional
Relaxed risk parity model version, by default 'A'
Possible values are:
- 'A': without regularization and penalization constraints.
- 'B': with regularization constraint but without penalization constraint.
- 'C': with regularization and penalization constraints.
penal_factor: float, optional
The penalization factor of penalization constraints. Only used with
version 'C', by default 1.0
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple with weights and performance dictionary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.relriskparity(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.relriskparity(portfolio_engine=p)
"""
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
weights, returns = optimizer_model.get_rel_risk_parity_portfolio(
symbols=valid_symbols, **valid_kwargs
)
performance_dict = get_portfolio_performance(weights, returns, **valid_kwargs)
valid_portfolio_engine.set_weights(weights=weights)
valid_portfolio_engine.set_returns(returns=returns)
return valid_portfolio_engine.get_weights_df(warning=False), performance_dict
@log_start_end(log=logger)
def get_hrp(portfolio_engine: PoEngine = None, **kwargs) -> Tuple[pd.DataFrame, Dict]:
"""Optimize with Hierarchical Risk Parity
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float, optional
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float, optional
Value used to replace outliers that are higher than threshold, by default 0.0
method: str, optional
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
objective: str, optional
Objective function of the optimization model, by default 'MinRisk'
Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'MaxRet': Maximize the expected return of the portfolio.
risk_measure: str, optional
The risk measure used to optimize the portfolio, by default 'MV'
If model is 'NCO', the risk measures available depends on the objective function.
Possible values are:
- 'MV': Variance.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'TG': Tail Gini.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization (Minimax).
- 'RG': Range of returns.
- 'CVRG': CVaR range of returns.
- 'TGRG': Tail Gini range of returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'DaR': Drawdown at Risk of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).
- 'ADD_Rel': Average Drawdown of compounded cumulative returns.
- 'DaR_Rel': Drawdown at Risk of compounded cumulative returns.
- 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.
- 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.
- 'UCI_Rel': Ulcer Index of compounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, annualized. Used for 'FLPM' and 'SLPM' and Sharpe objective function, by default 0.0
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function, by default 1.0
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses, by default 0.05
a_sim: float, optional
Number of CVaRs used to approximate Tail Gini of losses, by default 100
beta: float, optional
Significance level of CVaR and Tail Gini of gains. If None it duplicates alpha value, by default None
b_sim: float, optional
Number of CVaRs used to approximate Tail Gini of gains. If None it duplicates a_sim value, by default None
covariance: str, optional
The method used to estimate the covariance matrix, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods, by default 0.94
codependence: str, optional
The codependence or similarity matrix used to build the distance
metric and clusters. The default is 'pearson'. Possible values are:
- 'pearson': pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{pearson}_{i,j})}
- 'spearman': spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{spearman}_{i,j})}
- 'abs_pearson': absolute value pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{pearson}_{i,j}|)}
- 'abs_spearman': absolute value spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{spearman}_{i,j}|)}
- 'distance': distance correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-\\rho^{distance}_{i,j})}
- 'mutual_info': mutual information matrix. Distance used is variation information matrix.
- 'tail': lower tail dependence index matrix. Dissimilarity formula:
.. math:: D_{i,j} = -\\log{\\lambda_{i,j}}
linkage: str, optional
Linkage method of hierarchical clustering. For more information see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.
cluster.hierarchy.linkage.html?highlight=linkage#scipy.cluster.hierarchy.linkage>`__.
The default is 'single'. Possible values are:
- 'single'.
- 'complete'.
- 'average'.
- 'weighted'.
- 'centroid'.
- 'median'.
- 'ward'.
- 'dbht': Direct Bubble Hierarchical Tree.
k: int, optional
Number of clusters. This value is took instead of the optimal number
of clusters calculated with the two difference gap statistic, by default None
max_k: int, optional
Max number of clusters used by the two difference gap statistic
to find the optimal number of clusters, by default 10
bins_info: str, optional
Number of bins used to calculate variation of information, by default 'KN'.
Possible values are:
- 'KN': Knuth's choice method. For more information see `knuth_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.knuth_bin_width.html>`__.
- 'FD': Freedman–Diaconis' choice method. For more information see `freedman_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.freedman_bin_width.html>`__.
- 'SC': Scotts' choice method. For more information see `scott_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.scott_bin_width.html>`__.
- 'HGR': Hacine-Gharbi and Ravier' choice method.
alpha_tail: float, optional
Significance level for lower tail dependence index, by default 0.05
leaf_order: bool, optional
Indicates if the cluster are ordered so that the distance between
successive leaves is minimal, by default True
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple with weights and performance dictionary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.hrp(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.hrp(portfolio_engine=p)
"""
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
weights, returns = optimizer_model.get_hrp(symbols=valid_symbols, **valid_kwargs)
performance_dict = get_portfolio_performance(weights, returns, **valid_kwargs)
valid_portfolio_engine.set_weights(weights=weights)
valid_portfolio_engine.set_returns(returns=returns)
return valid_portfolio_engine.get_weights_df(warning=False), performance_dict
@log_start_end(log=logger)
def get_herc(portfolio_engine: PoEngine = None, **kwargs) -> Tuple[pd.DataFrame, Dict]:
"""Optimize with Hierarchical Equal Risk Contribution (HERC) method.
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float, optional
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float, optional
Value used to replace outliers that are higher than threshold, by default 0.0
method: str, optional
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
objective: str, optional
Objective function of the optimization model, by default 'MinRisk'
Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'MaxRet': Maximize the expected return of the portfolio.
risk_measure: str, optional
The risk measure used to optimize the portfolio, by default 'MV'
If model is 'NCO', the risk measures available depends on the objective function.
Possible values are:
- 'MV': Variance.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'TG': Tail Gini.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization (Minimax).
- 'RG': Range of returns.
- 'CVRG': CVaR range of returns.
- 'TGRG': Tail Gini range of returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'DaR': Drawdown at Risk of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).
- 'ADD_Rel': Average Drawdown of compounded cumulative returns.
- 'DaR_Rel': Drawdown at Risk of compounded cumulative returns.
- 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.
- 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.
- 'UCI_Rel': Ulcer Index of compounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, annualized. Used for 'FLPM' and 'SLPM' and Sharpe objective function, by default 0.0
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function, by default 1.0
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses, by default 0.05
a_sim: float, optional
Number of CVaRs used to approximate Tail Gini of losses, by default 100
beta: float, optional
Significance level of CVaR and Tail Gini of gains. If None it duplicates alpha value, by default None
b_sim: float, optional
Number of CVaRs used to approximate Tail Gini of gains. If None it duplicates a_sim value, by default None
covariance: str, optional
The method used to estimate the covariance matrix, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods, by default 0.94
codependence: str, optional
The codependence or similarity matrix used to build the distance
metric and clusters. The default is 'pearson'. Possible values are:
- 'pearson': pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{pearson}_{i,j})}
- 'spearman': spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{spearman}_{i,j})}
- 'abs_pearson': absolute value pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{pearson}_{i,j}|)}
- 'abs_spearman': absolute value spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{spearman}_{i,j}|)}
- 'distance': distance correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-\\rho^{distance}_{i,j})}
- 'mutual_info': mutual information matrix. Distance used is variation information matrix.
- 'tail': lower tail dependence index matrix. Dissimilarity formula:
.. math:: D_{i,j} = -\\log{\\lambda_{i,j}}
linkage: str, optional
Linkage method of hierarchical clustering. For more information see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.
cluster.hierarchy.linkage.html?highlight=linkage#scipy.cluster.hierarchy.linkage>`__.
The default is 'single'. Possible values are:
- 'single'.
- 'complete'.
- 'average'.
- 'weighted'.
- 'centroid'.
- 'median'.
- 'ward'.
- 'dbht': Direct Bubble Hierarchical Tree.
k: int, optional
Number of clusters. This value is took instead of the optimal number
of clusters calculated with the two difference gap statistic, by default None
max_k: int, optional
Max number of clusters used by the two difference gap statistic
to find the optimal number of clusters, by default 10
bins_info: str, optional
Number of bins used to calculate variation of information, by default 'KN'.
Possible values are:
- 'KN': Knuth's choice method. For more information see `knuth_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.knuth_bin_width.html>`__.
- 'FD': Freedman–Diaconis' choice method. For more information see `freedman_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.freedman_bin_width.html>`__.
- 'SC': Scotts' choice method. For more information see `scott_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.scott_bin_width.html>`__.
- 'HGR': Hacine-Gharbi and Ravier' choice method.
alpha_tail: float, optional
Significance level for lower tail dependence index, by default 0.05
leaf_order: bool, optional
Indicates if the cluster are ordered so that the distance between
successive leaves is minimal, by default True
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple with weights and performance dictionary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.herc(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.herc(portfolio_engine=p)
"""
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
weights, returns = optimizer_model.get_herc(symbols=valid_symbols, **valid_kwargs)
performance_dict = get_portfolio_performance(weights, returns, **valid_kwargs)
valid_portfolio_engine.set_weights(weights=weights)
valid_portfolio_engine.set_returns(returns=returns)
return valid_portfolio_engine.get_weights_df(warning=False), performance_dict
@log_start_end(log=logger)
def get_nco(portfolio_engine: PoEngine = None, **kwargs) -> Tuple[pd.DataFrame, Dict]:
"""Optimize with Non-Convex Optimization (NCO) model.
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float, optional
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float, optional
Value used to replace outliers that are higher than threshold, by default 0.0
method: str, optional
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
objective: str, optional
Objective function of the optimization model, by default 'MinRisk'
Possible values are:
- 'MinRisk': Minimize the selected risk measure.
- 'Utility': Maximize the risk averse utility function.
- 'Sharpe': Maximize the risk adjusted return ratio based on the selected risk measure.
- 'MaxRet': Maximize the expected return of the portfolio.
risk_measure: str, optional
The risk measure used to optimize the portfolio, by default 'MV'
If model is 'NCO', the risk measures available depends on the objective function.
Possible values are:
- 'MV': Variance.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'TG': Tail Gini.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization (Minimax).
- 'RG': Range of returns.
- 'CVRG': CVaR range of returns.
- 'TGRG': Tail Gini range of returns.
- 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'DaR': Drawdown at Risk of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).
- 'ADD_Rel': Average Drawdown of compounded cumulative returns.
- 'DaR_Rel': Drawdown at Risk of compounded cumulative returns.
- 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.
- 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.
- 'UCI_Rel': Ulcer Index of compounded cumulative returns.
risk_free_rate: float, optional
Risk free rate, annualized. Used for 'FLPM' and 'SLPM' and Sharpe objective function, by default 0.0
risk_aversion: float, optional
Risk aversion factor of the 'Utility' objective function, by default 1.0
alpha: float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR, EDaR, Tail Gini of losses, by default 0.05
a_sim: float, optional
Number of CVaRs used to approximate Tail Gini of losses, by default 100
beta: float, optional
Significance level of CVaR and Tail Gini of gains. If None it duplicates alpha value, by default None
b_sim: float, optional
Number of CVaRs used to approximate Tail Gini of gains. If None it duplicates a_sim value, by default None
covariance: str, optional
The method used to estimate the covariance matrix, by default 'hist'
Possible values are:
- 'hist': use historical estimates.
- 'ewma1': use ewma with adjust=True. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ewma2': use ewma with adjust=False. For more information see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/window.html#exponentially-weighted-window>`__.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: `a-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of `a-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of `a-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of `a-MLforAM`.
d_ewma: float, optional
The smoothing factor of ewma methods, by default 0.94
codependence: str, optional
The codependence or similarity matrix used to build the distance
metric and clusters. The default is 'pearson'. Possible values are:
- 'pearson': pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{pearson}_{i,j})}
- 'spearman': spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{0.5(1-\\rho^{spearman}_{i,j})}
- 'abs_pearson': absolute value pearson correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{pearson}_{i,j}|)}
- 'abs_spearman': absolute value spearman correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-|\\rho^{spearman}_{i,j}|)}
- 'distance': distance correlation matrix. Distance formula:
.. math:: D_{i,j} = \\sqrt{(1-\\rho^{distance}_{i,j})}
- 'mutual_info': mutual information matrix. Distance used is variation information matrix.
- 'tail': lower tail dependence index matrix. Dissimilarity formula:
.. math:: D_{i,j} = -\\log{\\lambda_{i,j}}
linkage: str, optional
Linkage method of hierarchical clustering. For more information see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.
cluster.hierarchy.linkage.html?highlight=linkage#scipy.cluster.hierarchy.linkage>`__.
The default is 'single'. Possible values are:
- 'single'.
- 'complete'.
- 'average'.
- 'weighted'.
- 'centroid'.
- 'median'.
- 'ward'.
- 'dbht': Direct Bubble Hierarchical Tree.
k: int, optional
Number of clusters. This value is took instead of the optimal number
of clusters calculated with the two difference gap statistic, by default None
max_k: int, optional
Max number of clusters used by the two difference gap statistic
to find the optimal number of clusters, by default 10
bins_info: str, optional
Number of bins used to calculate variation of information, by default 'KN'.
Possible values are:
- 'KN': Knuth's choice method. For more information see `knuth_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.knuth_bin_width.html>`__.
- 'FD': Freedman–Diaconis' choice method. For more information see `freedman_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.freedman_bin_width.html>`__.
- 'SC': Scotts' choice method. For more information see `scott_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.scott_bin_width.html>`__.
- 'HGR': Hacine-Gharbi and Ravier' choice method.
alpha_tail: float, optional
Significance level for lower tail dependence index, by default 0.05
leaf_order: bool, optional
Indicates if the cluster are ordered so that the distance between
successive leaves is minimal, by default True
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple with weights and performance dictionary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.nco(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.nco(portfolio_engine=p)
"""
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
weights, returns = optimizer_model.get_nco(symbols=valid_symbols, **valid_kwargs)
performance_dict = get_portfolio_performance(weights, returns, **valid_kwargs)
valid_portfolio_engine.set_weights(weights=weights)
valid_portfolio_engine.set_returns(returns=returns)
return valid_portfolio_engine.get_weights_df(warning=False), performance_dict
@log_start_end(log=logger)
def get_equal(portfolio_engine: PoEngine = None, **kwargs) -> Tuple[pd.DataFrame, Dict]:
"""Equally weighted portfolio, where weight = 1/# of symbols
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float
Value used to replace outliers that are higher than threshold, by default 0.0
method: str
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple with weights and performance dictionary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.equal(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.equal(portfolio_engine=p)
"""
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
weights, returns = optimizer_model.get_equal_weights(
symbols=valid_symbols, **valid_kwargs
)
performance_dict = get_portfolio_performance(weights, returns, **valid_kwargs)
valid_portfolio_engine.set_weights(weights=weights)
valid_portfolio_engine.set_returns(returns=returns)
return valid_portfolio_engine.get_weights_df(warning=False), performance_dict
@log_start_end(log=logger)
def get_mktcap(
portfolio_engine: PoEngine = None, **kwargs
) -> Tuple[pd.DataFrame, Dict]:
"""Optimize weighted according to market capitalization
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float
Value used to replace outliers that are higher than threshold, by default 0.0
method: str
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple with weights and performance dictionary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.mktcap(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.mktcap(portfolio_engine=p)
"""
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
weights, returns = optimizer_model.get_property_weights(
symbols=valid_symbols, s_property="marketCap", **valid_kwargs
)
performance_dict = get_portfolio_performance(weights, returns, **valid_kwargs)
valid_portfolio_engine.set_weights(weights=weights)
valid_portfolio_engine.set_returns(returns=returns)
return valid_portfolio_engine.get_weights_df(warning=False), performance_dict
@log_start_end(log=logger)
def get_dividend(
portfolio_engine: PoEngine = None, **kwargs
) -> Tuple[pd.DataFrame, Dict]:
"""Optimize weighted according to dividend yield
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float
Value used to replace outliers that are higher than threshold, by default 0.0
method: str
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple with weights and performance dictionary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.dividend(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.dividend(portfolio_engine=p)
"""
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
weights, returns = optimizer_model.get_property_weights(
symbols=valid_symbols, s_property="dividendYield", **valid_kwargs
)
performance_dict = get_portfolio_performance(weights, returns, **valid_kwargs)
valid_portfolio_engine.set_weights(weights=weights)
valid_portfolio_engine.set_returns(returns=returns)
return valid_portfolio_engine.get_weights_df(warning=False), performance_dict
@log_start_end(log=logger)
def get_property(
portfolio_engine: PoEngine = None,
prop: str = "marketCap",
**kwargs,
) -> Tuple[pd.DataFrame, Dict]:
"""Optimize weighted according to property
Parameters
----------
portfolio_engine : PoEngine, optional
Portfolio optimization engine, by default None
Use `portfolio.po.load` to load a portfolio engine
prop : str, optional
Property to use for optimization, by default 'marketCap'
Use `portfolio.po.get_properties() to get a list of available properties
interval : str, optional
Interval to get data, by default '3y'
start_date : str, optional
If not using interval, start date string (YYYY-MM-DD), by default ""
end_date : str, optional
If not using interval, end date string (YYYY-MM-DD). If empty use last weekday, by default ""
log_returns : bool, optional
If True use log returns, else arithmetic returns, by default False
freq : str, optional
Frequency of returns, by default 'D'. Options: 'D' for daily, 'W' for weekly, 'M' for monthly
maxnan: float
Maximum percentage of NaNs allowed in the data, by default 0.05
threshold: float
Value used to replace outliers that are higher than threshold, by default 0.0
method: str
Method used to fill nan values, by default 'time'
For more information see `interpolate <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html>`__.
value : float, optional
Amount to allocate to portfolio in long positions, by default 1.0
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple with weights and performance dictionary
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.property(portfolio_engine=p)
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.property(portfolio_engine=p, prop="forwardPE")
"""
if prop is None:
console.print("No property provided")
return pd.DataFrame()
valid_symbols, valid_portfolio_engine, valid_kwargs = validate_inputs(
portfolio_engine, kwargs
)
if not valid_symbols:
return pd.DataFrame()
weights, returns = optimizer_model.get_property_weights(
symbols=valid_symbols, s_property=prop, **valid_kwargs
)
performance_dict = get_portfolio_performance(weights, returns, **valid_kwargs)
valid_portfolio_engine.set_weights(weights=weights)
valid_portfolio_engine.set_returns(returns=returns)
return valid_portfolio_engine.get_weights_df(warning=False), performance_dict
@log_start_end(log=logger)
def show(
portfolio_engine: PoEngine,
category: str = None,
) -> Union[pd.DataFrame, pd.DataFrame]:
"""Show portfolio optimization results
Parameters
----------
portfolio_engine : PoEngine
Portfolio optimization engine
Use `portfolio.po.load` to load a portfolio engine
category : str, optional
Category to show, by default None
After loading a portfolio with `portfolio.po.load` you can use
the object method `get_available_categories()` to get a list of available categories.
You can also use the object method `set_categories_dict()` to set a custom dictionary
of categories. The dictionary must contain "CURRENT_INVESTED_AMOUNT" and "CURRENT_WEIGHTS"
as keys as shown in the example below.
Returns
-------
Union[pd.DataFrame, Tuple[pd.DataFrame, pd.DataFrame]]
Portfolio weights and categories
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> d = {
"SECTOR": {
"AAPL": "INFORMATION TECHNOLOGY",
"MSFT": "INFORMATION TECHNOLOGY",
"AMZN": "CONSUMER DISCRETIONARY",
},
"CURRENCY": {
"AAPL": "USD",
"MSFT": "USD",
"AMZN": "USD",
},
"CURRENT_INVESTED_AMOUNT": {
"AAPL": "100000.0",
"MSFT": "200000.0",
"AMZN": "300000.0",
},
}
>>> p = openbb.portfolio.po.load(symbols_categories=d)
>>> weights, performance = openbb.portfolio.po.equal(portfolio_engine=p)
>>> p.get_available_categories()
['SECTOR', 'CURRENCY']
>>> weights_df, category_df = openbb.portfolio.po.show(portfolio_engine=p, category="SECTOR")
>>> from openbb_terminal.sdk import openbb
>>> p = openbb.portfolio.po.load(symbols_file_path="~/openbb_terminal/miscellaneous/portfolio_examples/allocation/60_40_Portfolio.xlsx")
>>> weights, performance = openbb.portfolio.po.equal(portfolio_engine=p)
>>> p.get_available_categories()
['ASSET_CLASS',
'SECTOR',
'INDUSTRY',
'COUNTRY',
'CURRENCY']
>>> weights_df, category_df = openbb.portfolio.po.show(portfolio_engine=p, category="ASSET_CLASS")
"""
weights = portfolio_engine.get_weights_df()
if weights.empty:
return pd.DataFrame()
available_categories = portfolio_engine.get_available_categories()
if category is None:
print_categories_msg(available_categories)
return weights, pd.DataFrame()
if category not in available_categories:
print_categories_msg(available_categories)
return weights, pd.DataFrame()
category_df = portfolio_engine.get_category_df(category=category)
return weights, category_df
def print_categories_msg(available_categories: List[str]) -> None:
"""Print categories message"""
if not available_categories:
console.print("No categories found.")
else:
msg = ", ".join(available_categories)
console.print(f"Please specify a category from the following: {msg}.") | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_optimization/po_model.py | 0.900182 | 0.204898 | po_model.py | pypi |
import argparse
from datetime import datetime, date
from typing import Any, Dict, List
from pathlib import Path
from openbb_terminal.helper_funcs import log_and_raise
from openbb_terminal.core.config import paths
from openbb_terminal.rich_config import console
from openbb_terminal.portfolio.portfolio_optimization.statics import (
OPTIMIZATION_PARAMETERS,
TERMINAL_TEMPLATE_MAP,
)
def check_save_file(file: str) -> str:
"""Argparse type to check parameter file to be saved"""
if file == "defaults.ini":
log_and_raise(
argparse.ArgumentTypeError(
"Cannot overwrite defaults.ini file, please save with a different name"
)
)
else:
if not file.endswith(".ini"):
log_and_raise(
argparse.ArgumentTypeError("File to be saved needs to be .ini")
)
return file
def load_data_files() -> Dict[str, Path]:
"""Loads files from the misc directory and from the user's custom exports
Returns
-------
Dict[str, Path]
The dictionary of filenames and their paths
"""
default_path = paths.MISCELLANEOUS_DIRECTORY / "portfolio_examples" / "optimization"
custom_exports = paths.USER_PORTFOLIO_DATA_DIRECTORY / "optimization"
data_files = {}
for directory in [default_path, custom_exports]:
for file_type in ["xlsx", "ini"]:
for filepath in Path(directory).rglob(f"*.{file_type}"):
if filepath.is_file():
data_files[filepath.name] = filepath
return data_files
def check_convert_parameters(received_parameters: dict) -> dict:
"""Check if the argument is in the parameters list.
If it is, try to cast it to the correct type, else use default value.
Parameters
----------
received_parameters: dict
The parameters to be checked
Returns
-------
dict
The parameters with the correct types
"""
converted_parameters = check_convert_dates(
received_parameters, ["start_period", "start_date", "end_period", "end_date"]
)
for received_name, received_value in received_parameters.items():
# TODO: Remove this line when mapping between template and terminal is not needed
template_name = TERMINAL_TEMPLATE_MAP.get(received_name, received_name)
if template_name in OPTIMIZATION_PARAMETERS:
PARAMETER = OPTIMIZATION_PARAMETERS[template_name]
if not PARAMETER.validate_type(received_value):
converted_parameters[received_name] = check_convert_parameter(
name=received_name, value=received_value, parameter=PARAMETER
)
return converted_parameters
def check_convert_parameter(name, value, parameter):
"""Converts a parameter to the correct type
Parameters
----------
name: str
The name of the received parameter
value: str
The value of the received parameter
parameter: Parameter
The parameter object
Returns
-------
The converted parameter
"""
try:
# Try to cast the value to the correct type if int or float
if parameter.type_ is int:
new_value = parameter.type_(float(value))
elif parameter.type_ is float:
new_value = parameter.type_(value)
elif parameter.type_ is bool:
new_value = strtobool(value)
else:
new_value = value
except ValueError:
new_value = parameter.default
console.print(
f"[red]'{name}' format should be '{parameter.type_.__name__}' type[/red]",
f"[red]and could not be converted. Setting default '{new_value}'.\n[/red]",
)
return new_value
def check_convert_dates(params: dict, param_name_list: List[str]) -> dict:
"""Check if the argument is in the list and convert it to a str object
Parameters
----------
params : dict
The parameters to be converted
param_name_list : List[str]
The list of arguments to be converted
Returns
-------
dict
The converted parameters
"""
for param_name in param_name_list:
if param_name in params:
param_value = params[param_name]
if isinstance(param_value, date):
params[param_name] = param_value.strftime("%Y-%m-%d")
elif isinstance(param_value, str):
try:
param_value = datetime.strptime(param_value, "%Y-%m-%d")
except ValueError:
console.print(
f"[red]'{param_name}' format is not a valid date, must be YYYY-MM-DD.\n[/red]"
)
params.pop(param_name)
else:
console.print(
f"[red]'{param_name}' format is not a valid date, must be YYYY-MM-DD.\n[/red]"
)
params.pop(param_name)
return params
def booltostr(value: bool) -> Any:
"""Converts a bool to a string or returns the value itself if not bool
Parameters
----------
value: bool
The bool to be converted or the value itself if not bool
Returns
-------
Any
The converted value
"""
if isinstance(value, bool):
return "True" if value else "False"
return value
def strtobool(value: str) -> Any:
"""Converts a string to a bool or returns the value itself if not string
Parameters
----------
value: str
The string to be converted or the value itself if not string
Returns
-------
Any
The converted value
"""
if isinstance(value, str):
if value.lower() == "true":
return True
elif value.lower() == "false":
return False
return value | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_optimization/parameters/params_helpers.py | 0.715424 | 0.378201 | params_helpers.py | pypi |
import configparser
from typing import Tuple
from pathlib import Path
import pandas as pd
from openbb_terminal.helper_funcs import print_rich_table
from openbb_terminal.portfolio.portfolio_optimization import excel_model
from openbb_terminal.rich_config import console
from openbb_terminal.portfolio.portfolio_optimization.parameters import params_statics
from openbb_terminal.portfolio.portfolio_optimization.parameters.params_helpers import (
booltostr,
check_convert_parameters,
)
from openbb_terminal.core.config import paths
def load_file(path: str = "") -> Tuple[dict, str]:
"""
Loads in the configuration file and return the parameters in a dictionary including the model
if available.
Parameters
----------
path: str
The location of the file to be loaded in either xlsx or ini.
Returns
-------
Tuple[dict, str]
Return the parameters and the model, if available.
"""
if str(path).endswith(".ini"):
params_obj = configparser.RawConfigParser()
params_obj.read(path)
params_obj.optionxform = str # type: ignore
params: dict = dict(params_obj["OPENBB"].items())
if "technique" in params:
current_model = params["technique"]
else:
current_model = ""
elif str(path).endswith(".xlsx"):
params, _ = excel_model.load_configuration(path)
current_model = params["technique"]
else:
console.print("Cannot load in the file due to not being an .ini or .xlsx file.")
return {}, ""
converted_parameters = check_convert_parameters(received_parameters=params)
max_len = max(len(k) for k in converted_parameters.keys())
help_text = "[info]Parameters:[/info]\n"
if current_model:
for k, v in converted_parameters.items():
all_params = (
params_statics.DEFAULT_PARAMETERS
+ params_statics.MODEL_PARAMS[current_model]
)
if k in all_params:
v = booltostr(v)
help_text += f" [param]{k}{' ' * (max_len - len(k))} :[/param] {v}\n"
else:
for k, v in converted_parameters.items():
v = booltostr(v)
help_text += f" [param]{k}{' ' * (max_len - len(k))} :[/param] {v}\n"
console.print(help_text)
return converted_parameters, current_model
def save_file(path: str, params: dict) -> Path:
if not path.endswith(".ini"):
console.print("[red]File to be saved needs to be a .ini file.[/red]\n")
# Create file if it does not exist
base_path = paths.USER_PORTFOLIO_DATA_DIRECTORY / "optimization"
if not base_path.is_dir():
base_path.mkdir()
filepath = base_path / path
config_parser = configparser.RawConfigParser()
config_parser.add_section("OPENBB")
for key, value in params.items():
config_parser.set("OPENBB", key, value)
with open(filepath, "w") as configfile:
config_parser.write(configfile)
return filepath
def show_arguments(arguments, description=None):
"""
Show the available arguments and the choices you have for each. If available, also show
the description of the argument.
Parameters
----------
arguments: Dictionary
A dictionary containing the keys and the possible values.
description: Dictionary
A dictionary containing the keys equal to arguments and the descriptions.
Returns
-------
A table containing the parameter names, possible values and (if applicable) the description.
"""
adjusted_arguments = {}
for variable in arguments:
if len(arguments[variable]) > 15:
minimum = min(arguments[variable])
maximum = max(arguments[variable])
adjusted_arguments[variable] = (
f"Between {minimum} and {maximum} in steps of "
f"{maximum / sum(x > 0 for x in arguments[variable])}"
)
else:
adjusted_arguments[variable] = ", ".join(arguments[variable])
if description:
df = pd.DataFrame([adjusted_arguments, description]).T
columns = ["Options", "Description"]
else:
df = pd.DataFrame([adjusted_arguments]).T
columns = ["Options"]
df = df[df.index != "technique"]
print_rich_table(
df, headers=list(columns), show_index=True, index_name="Parameters"
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_optimization/parameters/params_view.py | 0.742422 | 0.281469 | params_view.py | pypi |
DEFAULT_RANGE = [value / 1000 for value in range(0, 1001)]
DEFAULT_BOOL = ["True", "False"]
AVAILABLE_OPTIONS = {
"historic_period": ["d", "w", "mo", "y", "ytd", "max"],
"start_period": ["Any"],
"end_period": ["Any"],
"log_returns": DEFAULT_BOOL,
"return_frequency": ["d", "w", "m"],
"max_nan": DEFAULT_RANGE,
"threshold_value": DEFAULT_RANGE,
"nan_fill_method": [
"linear",
"time",
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
],
"risk_free": DEFAULT_RANGE,
"significance_level": DEFAULT_RANGE,
"risk_measure": [
"MV",
"MAD",
"MSV",
"FLPM",
"SLPM",
"CVaR",
"EVaR",
"WR",
"ADD",
"UCI",
"CDaR",
"EDaR",
"MDD",
],
"target_return": DEFAULT_RANGE + [-x for x in DEFAULT_RANGE],
"target_risk": DEFAULT_RANGE + [-x for x in DEFAULT_RANGE],
"expected_return": ["hist", "ewma1", "ewma2"],
"covariance": [
"hist",
"ewma1",
"ewma2",
"ledoit",
"oas",
"shrunk",
"gl",
"jlogo",
"fixed",
"spectral",
"shrink",
],
"smoothing_factor_ewma": DEFAULT_RANGE,
"long_allocation": DEFAULT_RANGE,
"short_allocation": DEFAULT_RANGE,
"risk_aversion": [value / 100 for value in range(-500, 501)],
"amount_portfolios": range(1, 10001),
"random_seed": range(1, 10001),
"tangency": DEFAULT_BOOL,
"risk_parity_model": ["A", "B", "C"],
"penal_factor": DEFAULT_RANGE + [-x for x in DEFAULT_RANGE],
"co_dependence": [
"pearson",
"spearman",
"abs_pearson",
"abs_spearman",
"distance",
"mutual_info",
"tail",
],
"cvar_simulations": range(1, 10001),
"cvar_significance": DEFAULT_RANGE,
"linkage": [
"single",
"complete",
"average",
"weighted",
"centroid",
"ward",
"dbht",
],
"max_clusters": range(1, 101),
"amount_bins": ["KN", "FD", "SC", "HGR", "Integer"],
"alpha_tail": DEFAULT_RANGE,
"leaf_order": DEFAULT_BOOL,
"objective": ["MinRisk", "Utility", "Sharpe", "MaxRet"],
}
DEFAULT_PARAMETERS = [
"historic_period",
"start_period",
"end_period",
"log_returns",
"return_frequency",
"max_nan",
"threshold_value",
"nan_fill_method",
"risk_free",
"significance_level",
]
MODEL_PARAMS = {
"maxsharpe": [
"risk_measure",
"target_return",
"target_risk",
"expected_return",
"covariance",
"smoothing_factor_ewma",
"long_allocation",
"short_allocation",
],
"minrisk": [
"risk_measure",
"target_return",
"target_risk",
"expected_return",
"covariance",
"smoothing_factor_ewma",
"long_allocation",
"short_allocation",
],
"maxutil": [
"risk_measure",
"target_return",
"target_risk",
"expected_return",
"covariance",
"smoothing_factor_ewma",
"long_allocation",
"short_allocation",
"risk_aversion",
],
"maxret": [
"risk_measure",
"target_return",
"target_risk",
"expected_return",
"covariance",
"smoothing_factor_ewma",
"long_allocation",
],
"maxdiv": ["covariance", "long_allocation"],
"maxdecorr": ["covariance", "long_allocation"],
"ef": [
"risk_measure",
"long_allocation",
"short_allocation",
"amount_portfolios",
"random_seed",
"tangency",
],
"equal": ["risk_measure", "long_allocation"],
"mktcap": ["risk_measure", "long_allocation"],
"dividend": ["risk_measure", "long_allocation"],
"riskparity": [
"risk_measure",
"target_return",
"long_allocation",
"risk_contribution",
],
"relriskparity": [
"risk_measure",
"covariance",
"smoothing_factor_ewma",
"long_allocation",
"risk_contribution",
"risk_parity_model",
"penal_factor",
],
"hrp": [
"risk_measure",
"covariance",
"smoothing_factor_ewma",
"long_allocation",
"co_dependence",
"cvar_simulations",
"cvar_significance",
"linkage",
"amount_clusters",
"max_clusters",
"amount_bins",
"alpha_tail",
"leaf_order",
"objective",
],
"herc": [
"risk_measure",
"covariance",
"smoothing_factor_ewma",
"long_allocation",
"co_dependence",
"cvar_simulations",
"cvar_significance",
"linkage",
"amount_clusters",
"max_clusters",
"amount_bins",
"alpha_tail",
"leaf_order",
"objective",
],
"nco": [
"risk_measure",
"covariance",
"smoothing_factor_ewma",
"long_allocation",
"co_dependence",
"cvar_simulations",
"cvar_significance",
"linkage",
"amount_clusters",
"max_clusters",
"amount_bins",
"alpha_tail",
"leaf_order",
"objective",
],
} | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/portfolio_optimization/parameters/params_statics.py | 0.659405 | 0.547404 | params_statics.py | pypi |
__docformat__ = "numpy"
import logging
import os
import matplotlib.pyplot as plt
import mplfinance as mpf
from openbb_terminal.config_terminal import theme
from openbb_terminal import feature_flags as obbff
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
)
from openbb_terminal.portfolio.brokers.robinhood import robinhood_model
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
span_title_dict = {
"day": "Day",
"week": "Week",
"month": "Month",
"3month": "3 Months",
"year": "Year",
"5year": "5 Years",
"all": "All Time",
}
@log_start_end(log=logger)
def display_holdings(export: str = ""):
"""Display stock holdings in robinhood
Parameters
----------
export : str, optional
Format to export data, by default ""
"""
holdings = robinhood_model.get_holdings()
print_rich_table(
holdings, headers=list(holdings.columns), title="Robinhood Holdings"
)
export_data(
export,
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
"rh_holdings",
holdings,
)
@log_start_end(log=logger)
def display_historical(interval: str = "day", window: str = "3month", export: str = ""):
"""Display historical portfolio
Parameters
----------
interval : str
Interval to look at (candle width), default="day"
window : str
How long to look back, default="3month"
export : str, optional
Format to export data
"""
hist = robinhood_model.get_historical(interval, window)
mpf.plot(
hist,
type="candle",
style=theme.mpf_style,
title=f"\nPortfolio for {span_title_dict[window]}",
ylabel="Equity ($)",
xrotation=10,
figratio=(10, 7),
figscale=1.10,
scale_padding={"left": 0.3, "right": 1, "top": 0.8, "bottom": 0.8},
figsize=(plot_autoscale()),
update_width_config=dict(
candle_linewidth=0.6,
candle_width=0.8,
volume_linewidth=0.8,
volume_width=0.8,
),
)
if obbff.USE_ION:
plt.ion()
console.print()
export_data(
export,
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
"rh_hist",
hist,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/brokers/robinhood/robinhood_view.py | 0.537041 | 0.229352 | robinhood_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.portfolio.brokers.ally import ally_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_history(limit: int = 15, export: str = "") -> None:
history = ally_model.get_history(limit)
show_history = history[["amount", "date", "symbol", "transactiontype", "quantity"]]
print_rich_table(
show_history,
headers=list(show_history.columns),
show_index=False,
title="Ally History",
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "ally_history", history
)
@log_start_end(log=logger)
def display_holdings(export: str = "") -> None:
"""Display holdings from ally account
Parameters
----------
export : str, optional
Format to export data, by default ""
"""
holdings = ally_model.get_holdings()
holdings = holdings.set_index("Symbol")
print_rich_table(
holdings, headers=list(holdings.columns), show_index=True, title="Ally Holdings"
)
export_data(
export,
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
"ally_holdings",
holdings,
)
@log_start_end(log=logger)
def display_balances(export: str = "") -> None:
"""Display balances from ally account
Parameters
----------
export : str, optional
Format to export data, by default ""
"""
balances = ally_model.get_balances()
# Export data here before picking out certain columns
export_data(
export,
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
"ally_balances",
balances,
)
# Pick which balances to show
balances = balances[
[
"accountvalue",
"buyingpower.stock",
"money.cash",
"securities.stocks",
"securities.total",
]
]
print_rich_table(
balances,
headers=list(balances.columns),
show_index=False,
title="Ally Balances",
)
@log_start_end(log=logger)
def display_stock_quote(symbol: str) -> None:
"""Displays stock quote for ticker/tickers
Parameters
----------
ticker : str
Ticker to get. Can be in form of 'tick1,tick2...'
"""
quote = ally_model.get_stock_quote(symbol)
print_rich_table(
quote, headers=list(quote.columns), show_index=True, title="Stock Quote"
)
@log_start_end(log=logger)
def display_top_lists(
list_type: str = "", exchange: str = "", limit: int = 20, export: str = ""
):
"""
Display top lists from ally Invest API. Documentation for parameters below:
https://www.ally.com/api/invest/documentation/market-toplists-get/
Parameters
----------
list_type : str
Which list to get data for
exchange : str
Which exchange to look at
limit : int, optional
Number of top rows to show, by default 20
export : str, optional
Format to export data, by default ""
"""
movers = ally_model.get_top_movers(list_type, exchange, limit)
print_rich_table(
movers,
headers=list(movers.columns),
show_index=True,
title="Ally Top Lists",
)
export_data(
export,
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
"ally_movers",
movers,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/brokers/ally/ally_view.py | 0.55447 | 0.221645 | ally_view.py | pypi |
__docformat__ = "numpy"
import logging
import ally
import pandas as pd
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_holdings() -> pd.DataFrame:
"""Get holdings from Ally account in pandas df
Returns
-------
pd.DataFrame
Dataframe of positions
"""
a = ally.Ally()
return ally_positions_to_df(a.holdings(dataframe=True))
@log_start_end(log=logger)
def ally_positions_to_df(df: pd.DataFrame) -> pd.DataFrame:
"""Clean up ally holdings dataframe
Parameters
----------
df : pd.DataFrame
Input dataframe of holdings
Returns
-------
pd.DataFrame
Processed holdings
"""
names = {
"costbasis": "CostBasis",
"marketvalue": "MarketValue",
"sym": "Symbol",
"qty": "Quantity",
}
df = df.loc[:, ["qty", "costbasis", "marketvalue", "sym"]]
df[["qty", "costbasis", "marketvalue"]] = df[
["qty", "costbasis", "marketvalue"]
].astype(float)
df = df.rename(columns=names)
df["PnL"] = df["MarketValue"] - df["CostBasis"]
return df
@log_start_end(log=logger)
def get_history(limit: int = 50) -> pd.DataFrame:
"""Gets transaction history for the account."
Parameters
----------
limit : pd.DataFrame
Number of entries to return
Returns
-------
pd.DataFrame
Dataframe of transaction history
"""
a = ally.Ally()
df = a.history(dataframe=True)
return df.tail(limit)
@log_start_end(log=logger)
def get_balances() -> pd.DataFrame:
"""Gets balance details for the account."
Returns
-------
pd.DataFrame
Dataframe of transaction history
"""
a = ally.Ally()
return a.balances(dataframe=True)
@log_start_end(log=logger)
def get_stock_quote(symbol: str) -> pd.DataFrame:
"""Gets quote for stock ticker
Parameters
----------
symbol : str
Ticker to get. Can be in form of 'tick1,tick2...'
Returns
-------
pd.DataFrame
Dataframe of ticker quote
"""
a = ally.Ally()
return a.quote(
symbol,
fields=["last", "bid", "ask", "opn", "dollar_value", "chg", "vl"],
dataframe=True,
)
@log_start_end(log=logger)
def get_top_movers(
list_type: str = "", exchange: str = "", limit: int = 50
) -> pd.DataFrame:
"""
Gets top lists from ally Invest API. Documentation for parameters below:
https://www.ally.com/api/invest/documentation/market-toplists-get/
Parameters
----------
list_type : str
Which list to get data for
exchange : str
Which exchange to look at
limit: int
Number of top rows to return
Returns
-------
pd.DataFrame
DataFrame of top movers
"""
a = ally.Ally()
df = a.toplists(list_type, exchange, dataframe=True)
return df.tail(limit) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/brokers/ally/ally_model.py | 0.85753 | 0.359673 | ally_model.py | pypi |
import argparse
import datetime
import logging
from typing import List
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
# IMPORTATION INTERNAL
from openbb_terminal import feature_flags as obbff
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_ONLY_RAW_DATA_ALLOWED,
valid_date,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.portfolio.brokers.degiro.degiro_view import DegiroView
logger = logging.getLogger(__name__)
class DegiroController(BaseController):
"""Degiro Controller class"""
CHOICES_COMMANDS = [
"cancel",
"companynews",
"create",
"hold",
"lastnews",
"login",
"logout",
"lookup",
"pending",
"topnews",
"update",
"paexport",
]
PATH = "/portfolio/bro/degiro/"
def __init__(self, queue: List[str] = None):
"""Constructor"""
super().__init__(queue)
self.__degiro_view = DegiroView()
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = {c: {} for c in self.controller_choices}
zero_to_hundred: dict = {str(c): {} for c in range(0, 100)}
choices["login"] = {
"--one-time-password": None,
"-otp": "--one-time-password",
}
choices["lookup"] = {
"--limit": None,
"-l": "--limit",
"--offset": zero_to_hundred,
"-o": "--offset",
}
choices["create"] = {
"--action": {c: {} for c in DegiroView.ORDER_ACTION},
"-a": "--action",
"--product": None,
"-prod": "--product",
"--symbol": None,
"-sym": "--symbol",
"--price": None,
"-p": "--price",
"--size": None,
"-s": "--size",
"--up-to": None,
"-up": "--up-to",
"--duration": {c: {} for c in DegiroView.ORDER_DURATION},
"-d": "--duration",
"--type": {c: {} for c in DegiroView.ORDER_TYPE},
"-t": "--type",
}
choices["update"] = {
"--price": None,
"-p": "--price",
}
choices["lastnews"] = {
"--limit": None,
"-l": "--limit",
}
choices["paexport"] = {
"--start": None,
"-s": "--start",
"--end": None,
"-e": "--end",
"--currency": None,
"-c": "--currency",
}
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help."""
DegiroView.help_display()
@log_start_end(log=logger)
def call_cancel(self, other_args: List[str]):
"""Cancel an order using the `id`."""
# PARSE ARGS
parser = argparse.ArgumentParser(
add_help=False,
prog="cancel",
)
parser.add_argument(
"id",
help="Order's id.",
type=str,
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
self.__degiro_view.cancel(ns_parser=ns_parser)
@log_start_end(log=logger)
def call_companynews(self, other_args: List[str]):
"""Display news related to a company using its ISIN."""
# PARSE ARGS
parser = argparse.ArgumentParser(
add_help=False,
prog="companynews",
)
parser.add_argument(
"-s",
"--symbol",
type=str,
help="ISIN code of the company.",
required="-h" not in other_args,
action="store",
dest="symbol",
)
parser.add_argument(
"-l",
"--limit",
type=int,
default=10,
help="Number of news to display.",
required=False,
action="store",
dest="limit",
)
parser.add_argument(
"-o",
"--offset",
type=int,
default=0,
help="Offset of news to display.",
required=False,
action="store",
dest="offset",
)
parser.add_argument(
"-lang",
"--languages",
type=str,
default="en,fr",
help="Languages of news to display.",
required=False,
action="store",
dest="languages",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
self.__degiro_view.companynews(
symbol=ns_parser.symbol,
limit=ns_parser.limit,
offset=ns_parser.offset,
languages=ns_parser.languages,
)
@log_start_end(log=logger)
def call_create(self, other_args: List[str]):
"""Create an order."""
# PARSE ARGS
parser = argparse.ArgumentParser(
add_help=False,
prog="create",
)
parser.add_argument(
"-a",
"--action",
choices=DegiroView.ORDER_ACTION.keys(),
default="buy",
help="Action wanted.",
required=False,
type=str,
)
product_group = parser.add_mutually_exclusive_group(
required=True,
)
product_group.add_argument(
"-prod",
"--product",
help="Id of the product wanted.",
required=False,
type=int,
)
product_group.add_argument(
"-sym",
"--symbol",
help="Symbol wanted.",
required=False,
type=str,
)
parser.add_argument(
"-p",
"--price",
help="Price wanted.",
required="-h" not in other_args,
type=float,
)
size_group = parser.add_mutually_exclusive_group(required=True)
size_group.add_argument(
"-s",
"--size",
help="Price wanted.",
required=False,
type=int,
)
size_group.add_argument(
"-up",
"--up-to",
help="Up to price.",
required=False,
type=float,
)
parser.add_argument(
"-d",
"--duration",
default="gtd",
choices=DegiroView.ORDER_DURATION.keys(),
help="Duration of the Order.",
required=False,
type=str,
)
parser.add_argument(
"-t",
"--type",
choices=DegiroView.ORDER_TYPE.keys(),
default="limit",
help="Type of the Order.",
required=False,
type=str,
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
self.__degiro_view.create(ns_parser=ns_parser)
@log_start_end(log=logger)
def call_hold(self, other_args):
"""Display held products."""
# PARSE ARGS
parser = argparse.ArgumentParser(
add_help=False,
prog="hold",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
self.__degiro_view.hold(ns_parser=ns_parser)
@log_start_end(log=logger)
def call_lastnews(self, other_args: List[str]):
"""Display latest news."""
# PARSE ARGS
parser = argparse.ArgumentParser(
add_help=False,
prog="lastnews",
)
parser.add_argument(
"-l",
"--limit",
default=10,
type=int,
help="Number of news to display.",
required=False,
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
self.__degiro_view.lastnews(ns_parser=ns_parser)
@log_start_end(log=logger)
def call_login(self, other_args: List[str]):
"""Connect to Degiro's API."""
# PARSE ARGS
parser = argparse.ArgumentParser(
add_help=False,
prog="login",
)
parser.add_argument(
"-otp",
"--one-time-password",
default=None,
help="One-time-password for 2FA.",
required=False,
type=int,
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
self.__degiro_view.login(otp=ns_parser.one_time_password)
@log_start_end(log=logger)
def call_logout(self, other_args: List[str]):
"""Log out from Degiro's API."""
# PARSE ARGS
parser = argparse.ArgumentParser(
add_help=False,
prog="logout",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
self.__degiro_view.logout()
@log_start_end(log=logger)
def call_lookup(self, other_args: List[str]):
"""Search for products by their name."""
# PARSING ARGS
parser = argparse.ArgumentParser(
add_help=False,
prog="lookup",
)
parser.add_argument(
"search_text",
type=str,
help="Name of the company or a text.",
)
parser.add_argument(
"-l",
"--limit",
type=int,
default=10,
help="Number of result expected (0 for unlimited).",
)
parser.add_argument(
"-o",
"--offset",
type=int,
default=0,
help="To use an offset.",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
self.__degiro_view.lookup(ns_parser=ns_parser)
@log_start_end(log=logger)
def call_pending(self, other_args: List[str]):
"""Display pending orders."""
# PARSING ARGS
parser = argparse.ArgumentParser(
add_help=False,
prog="pending",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
self.__degiro_view.pending(ns_parser=ns_parser)
@log_start_end(log=logger)
def call_topnews(self, other_args: List[str]):
"""Display top news."""
# PARSING ARGS
parser = argparse.ArgumentParser(
add_help=False,
prog="topnews",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
self.__degiro_view.topnews(ns_parser=ns_parser)
@log_start_end(log=logger)
def call_update(self, other_args: List[str]):
"""Update an order."""
# PARSING ARGS
parser = argparse.ArgumentParser(
add_help=False,
prog="update",
)
parser.add_argument(
"id",
help="Order's id.",
type=str,
)
parser.add_argument(
"-p",
"--price",
help="Price wanted.",
required="-h" not in other_args,
type=float,
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
self.__degiro_view.update(ns_parser=ns_parser)
@log_start_end(log=logger)
def call_paexport(self, other_args: List[str]):
"""Export transactions for Portfolio menu into csv format. The transactions
file is exported to the portfolio/holdings folder and can be loaded directly
in the Portfolio menu."""
# PARSING ARGS
parser = argparse.ArgumentParser(
add_help=False,
prog="paexport",
)
parser.add_argument(
"-s",
"--start",
help="Start date.",
required=True,
type=valid_date,
)
parser.add_argument(
"-e",
"--end",
help="End date.",
type=valid_date,
default=datetime.datetime.now(),
)
parser.add_argument(
"-c",
"--currency",
help="Used currency.",
default="USD",
type=str,
)
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED,
)
if ns_parser:
self.__degiro_view.transactions_export(ns_parser=ns_parser) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/brokers/degiro/degiro_controller.py | 0.673406 | 0.229686 | degiro_controller.py | pypi |
__docformat__ = "numpy"
import logging
import os
from openbb_terminal.decorators import log_start_end
from openbb_terminal.decorators import check_api_key
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.portfolio.brokers.coinbase import coinbase_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_COINBASE_KEY", "API_COINBASE_SECRET", "API_COINBASE_PASS_PHRASE"])
def display_account(currency: str = "USD", export: str = "") -> None:
"""Display list of all your trading accounts. [Source: Coinbase]
Parameters
----------
currency: str
Currency to show current value in, default 'USD'
export : str
Export dataframe data to csv,json,xlsx file
"""
df = coinbase_model.get_accounts(currency=currency, add_current_price=True)
if df.empty:
return
df.balance = df["balance"].astype(float)
df = df[df.balance > 0]
df_data = df.copy()
df = df.drop(columns=["id"])
print_rich_table(
df, headers=list(df.columns), show_index=False, title="All Trading Accounts"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"account",
df_data,
)
@log_start_end(log=logger)
@check_api_key(["API_COINBASE_KEY", "API_COINBASE_SECRET", "API_COINBASE_PASS_PHRASE"])
def display_history(account: str, export: str = "", limit: int = 20) -> None:
"""Display account history. [Source: Coinbase]
Parameters
----------
account: str
Symbol or account id
limit: int
For all accounts display only top n
export : str
Export dataframe data to csv,json,xlsx file
"""
df = coinbase_model.get_account_history(account)
df_data = df.copy()
if df.empty:
return
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Account History",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"history",
df_data,
)
@log_start_end(log=logger)
@check_api_key(["API_COINBASE_KEY", "API_COINBASE_SECRET", "API_COINBASE_PASS_PHRASE"])
def display_orders(
limit: int = 20, sortby: str = "price", descend: bool = False, export: str = ""
) -> None:
"""List your current open orders [Source: Coinbase]
Parameters
----------
limit: int
Last `limit` of trades. Maximum is 1000.
sortby: str
Key to sort by
descend: bool
Flag to sort descending
export : str
Export dataframe data to csv,json,xlsx file
"""
df = coinbase_model.get_orders(limit, sortby, descend)
df_data = df.copy()
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Current Open Doors",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"orders",
df_data,
)
@log_start_end(log=logger)
@check_api_key(["API_COINBASE_KEY", "API_COINBASE_SECRET", "API_COINBASE_PASS_PHRASE"])
def display_deposits(
limit: int = 20,
sortby: str = "amount",
deposit_type: str = "deposit",
descend: bool = False,
export: str = "",
) -> None:
"""Display deposits into account [Source: Coinbase]
Parameters
----------
limit: int
Last `limit` of trades. Maximum is 1000.
sortby: str
Key to sort by
descend: bool
Flag to sort descending
deposit_type: str
internal_deposits (transfer between portfolios) or deposit
export : str
Export dataframe data to csv,json,xlsx file
"""
df = coinbase_model.get_deposits(limit, sortby, deposit_type, descend)
if df.empty:
return
df_data = df.copy()
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Account Deposits"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"deposits",
df_data,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/brokers/coinbase/coinbase_view.py | 0.659844 | 0.177347 | coinbase_view.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
import openbb_terminal.config_terminal as cfg
from openbb_terminal.cryptocurrency.coinbase_helpers import (
CoinbaseProAuth,
_check_account_validity,
make_coinbase_request,
CoinbaseApiException,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
import openbb_terminal.cryptocurrency.due_diligence.coinbase_model as cbm
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_accounts(add_current_price: bool = True, currency: str = "USD") -> pd.DataFrame:
"""Get list of all your trading accounts. [Source: Coinbase]
Single account information:
.. code-block:: json
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
}
.
Parameters
----------
add_current_price: bool
Boolean to query coinbase for current price
currency: str
Currency to convert to, defaults to 'USD'
Returns
-------
pd.DataFrame
DataFrame with all your trading accounts.
"""
try:
auth = CoinbaseProAuth(
cfg.API_COINBASE_KEY, cfg.API_COINBASE_SECRET, cfg.API_COINBASE_PASS_PHRASE
)
resp = make_coinbase_request("/accounts", auth=auth)
except CoinbaseApiException as e:
if "Invalid API Key" in str(e):
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(e)
return pd.DataFrame()
if not resp:
console.print("No data found.\n")
return pd.DataFrame()
df = pd.DataFrame(resp)
df = df[df.balance.astype(float) > 0]
if add_current_price:
current_prices = []
for index, row in df.iterrows():
_, pairs = cbm.show_available_pairs_for_given_symbol(row.currency)
if currency not in pairs:
df.drop(index, inplace=True)
continue
to_get = f"{row.currency}-{currency}"
# Check pair validity. This is needed for delisted products like XRP
try:
cb_request = make_coinbase_request(
f"/products/{to_get}/stats", auth=auth
)
except Exception as e:
if "Not allowed for delisted products" in str(e):
message = f"Coinbase product is delisted {str(e)}"
logger.debug(message)
else:
message = (
f"Coinbase does not recognize this pair {to_get}: {str(e)}"
)
logger.debug(message)
df.drop(index, inplace=True)
continue
current_prices.append(float(cb_request["last"]))
df["current_price"] = current_prices
df[f"BalanceValue({currency})"] = df.current_price * df.balance.astype(float)
return df[
[
"id",
"currency",
"balance",
"available",
"hold",
f"BalanceValue({currency})",
]
]
return df[["id", "currency", "balance", "available", "hold"]]
@log_start_end(log=logger)
def get_account_history(account: str) -> pd.DataFrame:
"""Get your account history. Account activity either increases or decreases your account balance. [Source: Coinbase]
Example api response:
.. code-block:: json
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
}
.
Parameters
----------
account: str
id ("71452118-efc7-4cc4-8780-a5e22d4baa53") or currency (BTC)
Returns
-------
pd.DataFrame
DataFrame with account history.
"""
try:
auth = CoinbaseProAuth(
cfg.API_COINBASE_KEY, cfg.API_COINBASE_SECRET, cfg.API_COINBASE_PASS_PHRASE
)
account = _check_account_validity(account)
resp = make_coinbase_request(f"/accounts/{account}/holds", auth=auth)
except CoinbaseApiException as e:
if "Invalid API Key" in str(e):
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(e)
return pd.DataFrame()
if not account:
console.print(f"Account {account} not exist.\n")
return pd.DataFrame()
if not resp:
console.print(
f"Your account {account} doesn't have any funds."
f"To check all your accounts use command account --all\n"
)
return pd.DataFrame()
df = pd.json_normalize(resp)
try:
df.columns = [
col.replace("details.", "") if "details" in col else col
for col in df.columns
]
except Exception as e:
logger.exception(str(e))
console.print(e)
return df
@log_start_end(log=logger)
def get_orders(
limit: int = 20, sortby: str = "price", descend: bool = False
) -> pd.DataFrame:
"""List your current open orders. Only open or un-settled orders are returned. [Source: Coinbase]
Example response from API:
.. code-block:: json
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
}
.
Parameters
----------
limit: int
Last `limit` of trades. Maximum is 1000.
sortby: str
Key to sort by
descend: bool
Flag to sort descending
Returns
-------
pd.DataFrame
Open orders in your account
"""
try:
auth = CoinbaseProAuth(
cfg.API_COINBASE_KEY, cfg.API_COINBASE_SECRET, cfg.API_COINBASE_PASS_PHRASE
)
resp = make_coinbase_request("/orders", auth=auth)
except CoinbaseApiException as e:
if "Invalid API Key" in str(e):
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(e)
return pd.DataFrame()
if not resp:
console.print("No orders found for your account\n")
return pd.DataFrame(
columns=[
"product_id",
"side",
"price",
"size",
"type",
"created_at",
"status",
]
)
df = pd.DataFrame(resp)
if df.empty:
return pd.DataFrame()
df = df[["product_id", "side", "price", "size", "type", "created_at", "status"]]
if df.empty:
return pd.DataFrame()
df = df.sort_values(by=sortby, ascending=descend).head(limit)
return df
@log_start_end(log=logger)
def get_deposits(
limit: int = 50,
sortby: str = "amount",
deposit_type: str = "deposit",
descend: bool = False,
) -> pd.DataFrame:
"""Get a list of deposits for your account. [Source: Coinbase]
Parameters
----------
deposit_type: str
internal_deposits (transfer between portfolios) or deposit
Returns
-------
pd.DataFrame
List of deposits
"""
try:
auth = CoinbaseProAuth(
cfg.API_COINBASE_KEY, cfg.API_COINBASE_SECRET, cfg.API_COINBASE_PASS_PHRASE
)
params = {"type": deposit_type}
if deposit_type not in ["internal_deposit", "deposit"]:
params["type"] = "deposit"
resp = make_coinbase_request("/transfers", auth=auth, params=params)
except CoinbaseApiException as e:
if "Invalid API Key" in str(e):
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(e)
return pd.DataFrame()
if not resp:
console.print("No deposits found for your account\n")
return pd.DataFrame()
if isinstance(resp, tuple):
resp = resp[0]
# pylint:disable=no-else-return
if deposit_type == "deposit":
df = pd.json_normalize(resp)
else:
df = pd.DataFrame(resp)[["type", "created_at", "amount", "currency"]]
df = df.sort_values(by=sortby, ascending=descend).head(limit)
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/portfolio/brokers/coinbase/coinbase_model.py | 0.575588 | 0.174744 | coinbase_model.py | pypi |
from typing import Any, Callable
from IPython.terminal.interactiveshell import TerminalInteractiveShell
from IPython.terminal.prompts import ClassicPrompts, Prompts
from pygments.token import _TokenType
from traitlets.config.loader import Config, LazyConfigValue
from . import wakatime_hook
def get_new_prompts_class(
prompts_class: type,
hook: Callable = wakatime_hook,
args: tuple = (),
kwargs: dict[str, Any] = {},
) -> type:
"""Get new prompts class.
:param prompts_class:
:type prompts_class: type
:param hook:
:type hook: Callable
:param args:
:type args: tuple
:param kwargs:
:type kwargs: dict[str, Any]
:rtype: type
"""
if isinstance(prompts_class, LazyConfigValue):
prompts_class = ClassicPrompts
shell = TerminalInteractiveShell()
class Ps(Prompts):
"""Ps."""
def in_prompt_tokens(self) -> list[tuple[_TokenType, str]]:
"""In prompt tokens.
:rtype: list[tuple[_TokenType, str]]
"""
return prompts_class(shell).in_prompt_tokens()
def continuation_prompt_tokens(
self, width: int | None = None
) -> list[tuple[_TokenType, str]]:
"""Continuation prompt tokens.
:param width:
:type width: int | None
:rtype: list[tuple[_TokenType, str]]
"""
return prompts_class(shell).continuation_prompt_tokens(width)
def rewrite_prompt_tokens(self) -> list[tuple[_TokenType, str]]:
"""Rewrite prompt tokens.
:rtype: list[tuple[_TokenType, str]]
"""
return prompts_class(shell).rewrite_prompt_tokens()
def out_prompt_tokens(self) -> list[tuple[_TokenType, str]]:
"""Out prompt tokens.
:rtype: list[tuple[_TokenType, str]]
"""
hook(*args, **kwargs)
return prompts_class(shell).out_prompt_tokens()
return Ps
def install_hook(
c: Config,
hook: Callable = wakatime_hook,
args: tuple = (),
kwargs: dict[str, Any] = {"plugin": "repl-ipython-wakatime"},
) -> Config:
"""Install hook.
:param c:
:type c: Config
:param hook:
:type hook: Callable
:param args:
:type args: tuple
:param kwargs:
:type kwargs: dict[str, Any]
:rtype: Config
"""
c.TerminalInteractiveShell.prompts_class = get_new_prompts_class( # type: ignore
c.TerminalInteractiveShell.prompts_class, hook, args, kwargs # type: ignore
)
return c | /repl-python-wakatime-0.0.6.tar.gz/repl-python-wakatime-0.0.6/src/repl_python_wakatime/ipython.py | 0.809653 | 0.244295 | ipython.py | pypi |
from typing import Any, Callable
from prompt_toolkit.formatted_text import AnyFormattedText
from ptpython.prompt_style import PromptStyle
from ptpython.repl import PythonRepl
from . import wakatime_hook
class Ps(PromptStyle):
"""Ps."""
def __init__(
self,
prompt_style: PromptStyle,
hook: Callable = wakatime_hook,
args: tuple = (),
kwargs: dict[str, Any] = {},
) -> None:
"""Init.
:param prompt_style:
:type prompt_style: PromptStyle
:param hook:
:type hook: Callable
:param args:
:type args: tuple
:param kwargs:
:type kwargs: dict[str, Any]
:rtype: None
"""
super().__init__()
self.prompt_style = prompt_style
self.hook = hook
self.args = args
self.kwargs = kwargs
def in_prompt(self) -> AnyFormattedText:
"""Return the input tokens.
:rtype: AnyFormattedText
"""
return self.prompt_style.in_prompt()
def in2_prompt(self, width: int) -> AnyFormattedText:
"""Tokens for every following input line.
:param width: The available width. This is coming from the width taken
by `in_prompt`.
:type width: int
:rtype: AnyFormattedText
"""
return self.prompt_style.in2_prompt(width)
def out_prompt(self) -> AnyFormattedText:
"""Return the output tokens.
:rtype: AnyFormattedText
"""
self.hook(*self.args, **self.kwargs)
return self.prompt_style.out_prompt()
def install_hook(
repl: PythonRepl,
hook: Callable = wakatime_hook,
args: tuple = (),
kwargs: dict[str, Any] = {"plugin": "repl-ptpython-wakatime"},
hook_prefix: str = "ps1_",
) -> PythonRepl:
"""Install hook.
:param repl:
:type repl: PythonRepl
:param hook:
:type hook: Callable
:param args:
:type args: tuple
:param kwargs:
:type kwargs: dict[str, Any]
:param hook_prefix:
:type hook_prefix: str
:rtype: PythonRepl
"""
ps = Ps(repl.all_prompt_styles[repl.prompt_style], hook, args, kwargs)
length = len(hook_prefix)
names = map(
lambda x: x[length:],
filter(lambda x: x.startswith(hook_prefix), repl.all_prompt_styles),
)
number = 0
while str(number) in names:
number += 1
name = hook_prefix + str(number)
repl.all_prompt_styles |= {name: ps}
repl.prompt_style = name
return repl | /repl-python-wakatime-0.0.6.tar.gz/repl-python-wakatime-0.0.6/src/repl_python_wakatime/ptpython.py | 0.873242 | 0.188212 | ptpython.py | pypi |
import functools
from typing import Any, Callable, List, TypeVar, cast
import streamlit
TFunc = TypeVar("TFunc", bound=Callable[..., Any])
TObj = TypeVar("TObj", bound=object)
def _show_deprecated_name_warning_in_browser(
old_name: str, new_name: str, removal_date: str
) -> None:
streamlit.warning(
f"Please replace `st.{old_name}` with `st.{new_name}`.\n\n"
f"`st.{old_name}` will be removed after {removal_date}."
)
def deprecate_func_name(func: TFunc, old_name: str, removal_date: str) -> TFunc:
"""Wrap an `st` function whose name has changed.
Wrapped functions will run as normal, but will also show an st.warning
saying that the old name will be removed after removal_date.
(We generally set `removal_date` to 3 months from the deprecation date.)
Parameters
----------
func
The `st.` function whose name has changed.
old_name
The function's deprecated name within __init__.py.
removal_date
A date like "2020-01-01", indicating the last day we'll guarantee
support for the deprecated name.
"""
@functools.wraps(func)
def wrapped_func(*args, **kwargs):
result = func(*args, **kwargs)
_show_deprecated_name_warning_in_browser(old_name, func.__name__, removal_date)
return result
# Update the wrapped func's name & docstring so st.help does the right thing
wrapped_func.__name__ = old_name
wrapped_func.__doc__ = func.__doc__
return cast(TFunc, wrapped_func)
def deprecate_obj_name(
obj: TObj, old_name: str, new_name: str, removal_date: str
) -> TObj:
"""Wrap an `st` object whose name has changed.
Wrapped objects will behave as normal, but will also show an st.warning
saying that the old name will be removed after `removal_date`.
(We generally set `removal_date` to 3 months from the deprecation date.)
Parameters
----------
obj
The `st.` object whose name has changed.
old_name
The object's deprecated name within __init__.py.
new_name
The object's new name within __init__.py.
removal_date
A date like "2020-01-01", indicating the last day we'll guarantee
support for the deprecated name.
"""
return _create_deprecated_obj_wrapper(
obj,
lambda: _show_deprecated_name_warning_in_browser(
old_name, new_name, removal_date
),
)
def _create_deprecated_obj_wrapper(obj: TObj, show_warning: Callable[[], Any]) -> TObj:
"""Create a wrapper for an object that has been deprecated. The first
time one of the object's properties or functions is accessed, the
given `show_warning` callback will be called.
"""
has_shown_warning = False
def maybe_show_warning() -> None:
# Call `show_warning` if it hasn't already been called once.
nonlocal has_shown_warning
if not has_shown_warning:
has_shown_warning = True
show_warning()
class Wrapper:
def __init__(self):
# Override all the Wrapped object's magic functions
for name in Wrapper._get_magic_functions(obj.__class__):
setattr(
self.__class__,
name,
property(self._make_magic_function_proxy(name)),
)
def __getattr__(self, attr):
# We handle __getattr__ separately from our other magic
# functions. The wrapped class may not actually implement it,
# but we still need to implement it to call all its normal
# functions.
if attr in self.__dict__:
return getattr(self, attr)
maybe_show_warning()
return getattr(obj, attr)
@staticmethod
def _get_magic_functions(cls) -> List[str]:
# ignore the handful of magic functions we cannot override without
# breaking the Wrapper.
ignore = ("__class__", "__dict__", "__getattribute__", "__getattr__")
return [
name
for name in dir(cls)
if name not in ignore and name.startswith("__")
]
@staticmethod
def _make_magic_function_proxy(name):
def proxy(self, *args):
maybe_show_warning()
return getattr(obj, name)
return proxy
return cast(TObj, Wrapper()) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/deprecation_util.py | 0.872646 | 0.257053 | deprecation_util.py | pypi |
import re
import threading
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Tuple, cast
from blinker import Signal
from streamlit.logger import get_logger
from streamlit.string_util import extract_leading_emoji
from streamlit.util import calc_md5
LOGGER = get_logger(__name__)
def open_python_file(filename):
"""Open a read-only Python file taking proper care of its encoding.
In Python 3, we would like all files to be opened with utf-8 encoding.
However, some author like to specify PEP263 headers in their source files
with their own encodings. In that case, we should respect the author's
encoding.
"""
import tokenize
if hasattr(tokenize, "open"): # Added in Python 3.2
# Open file respecting PEP263 encoding. If no encoding header is
# found, opens as utf-8.
return tokenize.open(filename)
else:
return open(filename, "r", encoding="utf-8")
PAGE_FILENAME_REGEX = re.compile(r"([0-9]*)[_ -]*(.*)\.py")
def page_sort_key(script_path: Path) -> Tuple[float, str]:
matches = re.findall(PAGE_FILENAME_REGEX, script_path.name)
# Failing this assert should only be possible if script_path isn't a Python
# file, which should never happen.
assert len(matches) > 0, f"{script_path} is not a Python file"
[(number, label)] = matches
label = label.lower()
if number == "":
return (float("inf"), label)
return (float(number), label)
def page_icon_and_name(script_path: Path) -> Tuple[str, str]:
"""Compute the icon and name of a page from its script path.
This is *almost* the page name displayed in the nav UI, but it has
underscores instead of spaces. The reason we do this is because having
spaces in URLs both looks bad and is hard to deal with due to the need to
URL-encode them. To solve this, we only swap the underscores for spaces
right before we render page names.
"""
extraction = re.search(PAGE_FILENAME_REGEX, script_path.name)
if extraction is None:
return "", ""
# This cast to Any+type annotation weirdness is done because
# cast(re.Match[str], ...) explodes at runtime since Python interprets it
# as an attempt to index into re.Match instead of as a type annotation.
extraction: re.Match[str] = cast(Any, extraction)
icon_and_name = re.sub(
r"[_ ]+", "_", extraction.group(2)
).strip() or extraction.group(1)
return extract_leading_emoji(icon_and_name)
_pages_cache_lock = threading.RLock()
_cached_pages: Optional[Dict[str, Dict[str, str]]] = None
_on_pages_changed = Signal(doc="Emitted when the pages directory is changed")
def invalidate_pages_cache():
global _cached_pages
LOGGER.debug("Pages directory changed")
with _pages_cache_lock:
_cached_pages = None
_on_pages_changed.send()
def get_pages(main_script_path_str: str) -> Dict[str, Dict[str, str]]:
global _cached_pages
# Avoid taking the lock if the pages cache hasn't been invalidated.
pages = _cached_pages
if pages is not None:
return pages
with _pages_cache_lock:
# The cache may have been repopulated while we were waiting to grab
# the lock.
if _cached_pages is not None:
return _cached_pages
main_script_path = Path(main_script_path_str)
main_page_icon, main_page_name = page_icon_and_name(main_script_path)
main_page_script_hash = calc_md5(main_script_path_str)
# NOTE: We include the page_script_hash in the dict even though it is
# already used as the key because that occasionally makes things
# easier for us when we need to iterate over pages.
pages = {
main_page_script_hash: {
"page_script_hash": main_page_script_hash,
"page_name": main_page_name,
"icon": main_page_icon,
"script_path": str(main_script_path.resolve()),
}
}
pages_dir = main_script_path.parent / "pages"
page_scripts = sorted(
[f for f in pages_dir.glob("*.py") if not f.name.startswith(".")],
key=page_sort_key,
)
for script_path in page_scripts:
script_path_str = str(script_path.resolve())
pi, pn = page_icon_and_name(script_path)
psh = calc_md5(script_path_str)
pages[psh] = {
"page_script_hash": psh,
"page_name": pn,
"icon": pi,
"script_path": script_path_str,
}
_cached_pages = pages
return pages
def register_pages_changed_callback(
callback: Callable[[str], None],
):
def disconnect():
_on_pages_changed.disconnect(callback)
# weak=False so that we have control of when the pages changed
# callback is deregistered.
_on_pages_changed.connect(callback, weak=False)
return disconnect | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/source_util.py | 0.696578 | 0.234977 | source_util.py | pypi |
import re
import textwrap
from datetime import datetime
from typing import TYPE_CHECKING, Any, Tuple, cast
from streamlit.emojis import ALL_EMOJIS
from streamlit.errors import StreamlitAPIException
if TYPE_CHECKING:
from streamlit.type_util import SupportsStr
# The ESCAPED_EMOJI list is sorted in descending order to make that longer emoji appear
# first in the regex compiled below. This ensures that we grab the full emoji in a
# multi-character emoji sequence that starts with a shorter emoji (emoji are weird...).
ESCAPED_EMOJI = [re.escape(e) for e in sorted(ALL_EMOJIS, reverse=True)]
EMOJI_EXTRACTION_REGEX = re.compile(f"^({'|'.join(ESCAPED_EMOJI)})[_ -]*(.*)")
def decode_ascii(string: bytes) -> str:
"""Decodes a string as ascii."""
return string.decode("ascii")
def clean_text(text: "SupportsStr") -> str:
"""Convert an object to text, dedent it, and strip whitespace."""
return textwrap.dedent(str(text)).strip()
def is_emoji(text: str) -> bool:
"""Check if input string is a valid emoji."""
return text.replace("\U0000FE0F", "") in ALL_EMOJIS
def extract_leading_emoji(text: str) -> Tuple[str, str]:
"""Return a tuple containing the first emoji found in the given string and
the rest of the string (minus an optional separator between the two).
"""
re_match = re.search(EMOJI_EXTRACTION_REGEX, text)
if re_match is None:
return "", text
# This cast to Any+type annotation weirdness is done because
# cast(re.Match[str], ...) explodes at runtime since Python interprets it
# as an attempt to index into re.Match instead of as a type annotation.
re_match: re.Match[str] = cast(Any, re_match)
return re_match.group(1), re_match.group(2)
def escape_markdown(raw_string: str) -> str:
r"""Returns a new string which escapes all markdown metacharacters.
Args
----
raw_string : str
A string, possibly with markdown metacharacters, e.g. "1 * 2"
Returns
-------
A string with all metacharacters escaped.
Examples
--------
::
escape_markdown("1 * 2") -> "1 \\* 2"
"""
metacharacters = ["\\", "*", "-", "=", "`", "!", "#", "|"]
result = raw_string
for character in metacharacters:
result = result.replace(character, "\\" + character)
return result
TEXTCHARS = bytearray({7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) - {0x7F})
def is_binary_string(inp):
"""Guess if an input bytesarray can be encoded as a string."""
# From https://stackoverflow.com/a/7392391
return bool(inp.translate(None, TEXTCHARS))
def clean_filename(name: str) -> str:
"""
Taken from https://github.com/django/django/blob/196a99da5d9c4c33a78259a58d38fb114a4d2ee8/django/utils/text.py#L225-L238
Return the given string converted to a string that can be used for a clean
filename. Remove leading and trailing spaces; convert other spaces to
underscores; and remove anything that is not an alphanumeric, dash,
underscore, or dot.
"""
s = str(name).strip().replace(" ", "_")
s = re.sub(r"(?u)[^-\w.]", "", s)
if s in {"", ".", ".."}:
raise StreamlitAPIException("Could not derive file name from '%s'" % name)
return s
def snake_case_to_camel_case(snake_case_string: str) -> str:
"""Transform input string from snake_case to CamelCase."""
words = snake_case_string.split("_")
capitalized_words_arr = []
for word in words:
if word:
try:
capitalized_words_arr.append(word.title())
except Exception:
capitalized_words_arr.append(word)
return "".join(capitalized_words_arr)
def append_date_time_to_string(input_string: str) -> str:
"""Append datetime string to input string.
Returns datetime string if input is empty string.
"""
now = datetime.now()
if not input_string:
return now.strftime("%Y-%m-%d_%H-%M-%S")
else:
return f'{input_string}_{now.strftime("%Y-%m-%d_%H-%M-%S")}'
def generate_download_filename_from_title(title_string: str) -> str:
"""Generated download filename from page title string."""
title_string = title_string.replace(" · Streamlit", "")
file_name_string = clean_filename(title_string)
title_string = snake_case_to_camel_case(file_name_string)
return append_date_time_to_string(title_string)
def simplify_number(num: int) -> str:
"""Simplifies number into Human readable format, returns str"""
num_converted = float("{:.2g}".format(num))
magnitude = 0
while abs(num_converted) >= 1000:
magnitude += 1
num_converted /= 1000.0
return "{}{}".format(
"{:f}".format(num_converted).rstrip("0").rstrip("."),
["", "k", "m", "b", "t"][magnitude],
) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/string_util.py | 0.854597 | 0.315314 | string_util.py | pypi |
import socket
from typing import Optional
import requests
from typing_extensions import Final
from streamlit import util
from streamlit.logger import get_logger
LOGGER = get_logger(__name__)
# URL for checking the current machine's external IP address.
_AWS_CHECK_IP: Final = "http://checkip.amazonaws.com"
_external_ip: Optional[str] = None
_internal_ip: Optional[str] = None
def get_external_ip() -> Optional[str]:
"""Get the *external* IP address of the current machine.
Returns
-------
string
The external IPv4 address of the current machine.
"""
global _external_ip
if _external_ip is not None:
return _external_ip
response = _make_blocking_http_get(_AWS_CHECK_IP, timeout=5)
if _looks_like_an_ip_adress(response):
_external_ip = response
else:
LOGGER.warning(
# fmt: off
"Did not auto detect external IP.\n"
"Please go to %s for debugging hints.",
# fmt: on
util.HELP_DOC
)
_external_ip = None
return _external_ip
def get_internal_ip() -> Optional[str]:
"""Get the *local* IP address of the current machine.
From: https://stackoverflow.com/a/28950776
Returns
-------
string
The local IPv4 address of the current machine.
"""
global _internal_ip
if _internal_ip is not None:
return _internal_ip
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
try:
# Doesn't even have to be reachable
s.connect(("8.8.8.8", 1))
_internal_ip = s.getsockname()[0]
except Exception:
_internal_ip = "127.0.0.1"
return _internal_ip
def _make_blocking_http_get(url: str, timeout: float = 5) -> Optional[str]:
try:
text = requests.get(url, timeout=timeout).text
if isinstance(text, str):
text = text.strip()
return text
except Exception:
return None
def _looks_like_an_ip_adress(address: Optional[str]) -> bool:
if address is None:
return False
try:
socket.inet_pton(socket.AF_INET, address)
return True # Yup, this is an IPv4 address!
except (AttributeError, OSError):
pass
try:
socket.inet_pton(socket.AF_INET6, address)
return True # Yup, this is an IPv6 address!
except (AttributeError, OSError):
pass
# Nope, this is not an IP address.
return False | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/net_util.py | 0.744285 | 0.175467 | net_util.py | pypi |
import contextlib
import errno
import fnmatch
import io
import os
from streamlit import env_util, util
from streamlit.string_util import is_binary_string
# Configuration and credentials are stored inside the ~/.streamlit folder
CONFIG_FOLDER_NAME = ".streamlit"
def get_encoded_file_data(data, encoding="auto"):
"""Coerce bytes to a BytesIO or a StringIO.
Parameters
----------
data : bytes
encoding : str
Returns
-------
BytesIO or StringIO
If the file's data is in a well-known textual format (or if the encoding
parameter is set), return a StringIO. Otherwise, return BytesIO.
"""
if encoding == "auto":
if is_binary_string(data):
encoding = None
else:
# If the file does not look like a pure binary file, assume
# it's utf-8. It would be great if we could guess it a little
# more smartly here, but it is what it is!
encoding = "utf-8"
if encoding:
return io.StringIO(data.decode(encoding))
return io.BytesIO(data)
@contextlib.contextmanager
def streamlit_read(path, binary=False):
"""Opens a context to read this file relative to the streamlit path.
For example:
with streamlit_read('foo.txt') as foo:
...
opens the file `.streamlit/foo.txt`
path - the path to write to (within the streamlit directory)
binary - set to True for binary IO
"""
filename = get_streamlit_file_path(path)
if os.stat(filename).st_size == 0:
raise util.Error('Read zero byte file: "%s"' % filename)
mode = "r"
if binary:
mode += "b"
with open(os.path.join(CONFIG_FOLDER_NAME, path), mode) as handle:
yield handle
@contextlib.contextmanager
def streamlit_write(path, binary=False):
"""Opens a file for writing within the streamlit path, and
ensuring that the path exists. For example:
with streamlit_write('foo/bar.txt') as bar:
...
opens the file .streamlit/foo/bar.txt for writing,
creating any necessary directories along the way.
path - the path to write to (within the streamlit directory)
binary - set to True for binary IO
"""
mode = "w"
if binary:
mode += "b"
path = get_streamlit_file_path(path)
os.makedirs(os.path.dirname(path), exist_ok=True)
try:
with open(path, mode) as handle:
yield handle
except OSError as e:
msg = ["Unable to write file: %s" % os.path.abspath(path)]
if e.errno == errno.EINVAL and env_util.IS_DARWIN:
msg.append(
"Python is limited to files below 2GB on OSX. "
"See https://bugs.python.org/issue24658"
)
raise util.Error("\n".join(msg))
def get_static_dir():
"""Get the folder where static HTML/JS/CSS files live."""
dirname = os.path.dirname(os.path.normpath(__file__))
return os.path.normpath(os.path.join(dirname, "static"))
def get_assets_dir():
"""Get the folder where static assets live."""
dirname = os.path.dirname(os.path.normpath(__file__))
return os.path.normpath(os.path.join(dirname, "static/assets"))
def get_streamlit_file_path(*filepath) -> str:
"""Return the full path to a file in ~/.streamlit.
This doesn't guarantee that the file (or its directory) exists.
"""
# os.path.expanduser works on OSX, Linux and Windows
home = os.path.expanduser("~")
if home is None:
raise RuntimeError("No home directory.")
return os.path.join(home, CONFIG_FOLDER_NAME, *filepath)
def get_project_streamlit_file_path(*filepath):
"""Return the full path to a filepath in ${CWD}/.streamlit.
This doesn't guarantee that the file (or its directory) exists.
"""
return os.path.join(os.getcwd(), CONFIG_FOLDER_NAME, *filepath)
def file_is_in_folder_glob(filepath, folderpath_glob) -> bool:
"""Test whether a file is in some folder with globbing support.
Parameters
----------
filepath : str
A file path.
folderpath_glob: str
A path to a folder that may include globbing.
"""
# Make the glob always end with "/*" so we match files inside subfolders of
# folderpath_glob.
if not folderpath_glob.endswith("*"):
if folderpath_glob.endswith("/"):
folderpath_glob += "*"
else:
folderpath_glob += "/*"
file_dir = os.path.dirname(filepath) + "/"
return fnmatch.fnmatch(file_dir, folderpath_glob)
def file_in_pythonpath(filepath) -> bool:
"""Test whether a filepath is in the same folder of a path specified in the PYTHONPATH env variable.
Parameters
----------
filepath : str
An absolute file path.
Returns
-------
boolean
True if contained in PYTHONPATH, False otherwise. False if PYTHONPATH is not defined or empty.
"""
pythonpath = os.environ.get("PYTHONPATH", "")
if len(pythonpath) == 0:
return False
absolute_paths = [os.path.abspath(path) for path in pythonpath.split(os.pathsep)]
return any(
file_is_in_folder_glob(os.path.normpath(filepath), path)
for path in absolute_paths
) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/file_util.py | 0.625667 | 0.229816 | file_util.py | pypi |
from __future__ import annotations
import re
import types
from typing import (
TYPE_CHECKING,
Any,
Iterable,
List,
NamedTuple,
Optional,
Sequence,
TypeVar,
Union,
cast,
overload,
)
import pyarrow as pa
from pandas import MultiIndex
from pandas.api.types import infer_dtype
from typing_extensions import Final, Literal, Protocol, TypeAlias, TypeGuard, get_args
import streamlit as st
from streamlit import errors
from streamlit import logger as _logger
from streamlit import string_util
if TYPE_CHECKING:
import graphviz
import sympy
from pandas import DataFrame, Index, Series
from pandas.core.indexing import _iLocIndexer
from pandas.io.formats.style import Styler
from plotly.graph_objs import Figure
from pydeck import Deck
# Maximum number of rows to request from an unevaluated (out-of-core) dataframe
MAX_UNEVALUATED_DF_ROWS = 10000
_LOGGER = _logger.get_logger("root")
# The array value field names are part of the larger set of possible value
# field names. See the explanation for said set below. The message types
# associated with these fields are distinguished by storing data in a `data`
# field in their messages, meaning they need special treatment in certain
# circumstances. Hence, they need their own, dedicated, sub-type.
ArrayValueFieldName: TypeAlias = Literal[
"double_array_value",
"int_array_value",
"string_array_value",
]
# A frozenset containing the allowed values of the ArrayValueFieldName type.
# Useful for membership checking.
ARRAY_VALUE_FIELD_NAMES: Final = frozenset(
cast(
"tuple[ArrayValueFieldName, ...]",
# NOTE: get_args is not recursive, so this only works as long as
# ArrayValueFieldName remains flat.
get_args(ArrayValueFieldName),
)
)
# These are the possible field names that can be set in the `value` oneof-field
# of the WidgetState message (schema found in .proto/WidgetStates.proto).
# We need these as a literal type to ensure correspondence with the protobuf
# schema in certain parts of the python code.
# TODO(harahu): It would be preferable if this type was automatically derived
# from the protobuf schema, rather than manually maintained. Not sure how to
# achieve that, though.
ValueFieldName: TypeAlias = Literal[
ArrayValueFieldName,
"arrow_value",
"bool_value",
"bytes_value",
"double_value",
"file_uploader_state_value",
"int_value",
"json_value",
"string_value",
"trigger_value",
]
V_co = TypeVar(
"V_co",
covariant=True, # https://peps.python.org/pep-0484/#covariance-and-contravariance
)
T = TypeVar("T")
class DataFrameGenericAlias(Protocol[V_co]):
"""Technically not a GenericAlias, but serves the same purpose in
OptionSequence below, in that it is a type which admits DataFrame,
but is generic. This allows OptionSequence to be a fully generic type,
significantly increasing its usefulness.
We can't use types.GenericAlias, as it is only available from python>=3.9,
and isn't easily back-ported.
"""
@property
def iloc(self) -> _iLocIndexer:
...
OptionSequence: TypeAlias = Union[
Iterable[V_co],
DataFrameGenericAlias[V_co],
]
Key: TypeAlias = Union[str, int]
LabelVisibility = Literal["visible", "hidden", "collapsed"]
# This should really be a Protocol, but can't be, due to:
# https://github.com/python/mypy/issues/12933
# https://github.com/python/mypy/issues/13081
SupportsStr: TypeAlias = object
def is_array_value_field_name(obj: object) -> TypeGuard[ArrayValueFieldName]:
return obj in ARRAY_VALUE_FIELD_NAMES
@overload
def is_type(
obj: object, fqn_type_pattern: Literal["pydeck.bindings.deck.Deck"]
) -> TypeGuard[Deck]:
...
@overload
def is_type(
obj: object, fqn_type_pattern: Literal["plotly.graph_objs._figure.Figure"]
) -> TypeGuard[Figure]:
...
@overload
def is_type(obj: object, fqn_type_pattern: Union[str, re.Pattern[str]]) -> bool:
...
def is_type(obj: object, fqn_type_pattern: Union[str, re.Pattern[str]]) -> bool:
"""Check type without importing expensive modules.
Parameters
----------
obj : object
The object to type-check.
fqn_type_pattern : str or regex
The fully-qualified type string or a regular expression.
Regexes should start with `^` and end with `$`.
Example
-------
To check whether something is a Matplotlib Figure without importing
matplotlib, use:
>>> is_type(foo, 'matplotlib.figure.Figure')
"""
fqn_type = get_fqn_type(obj)
if isinstance(fqn_type_pattern, str):
return fqn_type_pattern == fqn_type
else:
return fqn_type_pattern.match(fqn_type) is not None
def get_fqn(the_type: type) -> str:
"""Get module.type_name for a given type."""
return f"{the_type.__module__}.{the_type.__qualname__}"
def get_fqn_type(obj: object) -> str:
"""Get module.type_name for a given object."""
return get_fqn(type(obj))
_PANDAS_DF_TYPE_STR: Final = "pandas.core.frame.DataFrame"
_PANDAS_INDEX_TYPE_STR: Final = "pandas.core.indexes.base.Index"
_PANDAS_SERIES_TYPE_STR: Final = "pandas.core.series.Series"
_PANDAS_STYLER_TYPE_STR: Final = "pandas.io.formats.style.Styler"
_NUMPY_ARRAY_TYPE_STR: Final = "numpy.ndarray"
_SNOWPARK_DF_TYPE_STR: Final = "snowflake.snowpark.dataframe.DataFrame"
_SNOWPARK_DF_ROW_TYPE_STR: Final = "snowflake.snowpark.row.Row"
_SNOWPARK_TABLE_TYPE_STR: Final = "snowflake.snowpark.table.Table"
_PYSPARK_DF_TYPE_STR: Final = "pyspark.sql.dataframe.DataFrame"
_DATAFRAME_LIKE_TYPES: Final[tuple[str, ...]] = (
_PANDAS_DF_TYPE_STR,
_PANDAS_INDEX_TYPE_STR,
_PANDAS_SERIES_TYPE_STR,
_PANDAS_STYLER_TYPE_STR,
_NUMPY_ARRAY_TYPE_STR,
)
DataFrameLike: TypeAlias = "Union[DataFrame, Index, Series, Styler]"
_DATAFRAME_COMPATIBLE_TYPES: Final[tuple[type, ...]] = (
dict,
list,
type(None),
)
_DataFrameCompatible: TypeAlias = Union[dict, list, None]
DataFrameCompatible: TypeAlias = Union[_DataFrameCompatible, DataFrameLike]
_BYTES_LIKE_TYPES: Final[tuple[type, ...]] = (
bytes,
bytearray,
)
BytesLike: TypeAlias = Union[bytes, bytearray]
def is_dataframe(obj: object) -> TypeGuard[DataFrame]:
return is_type(obj, _PANDAS_DF_TYPE_STR)
def is_dataframe_like(obj: object) -> TypeGuard[DataFrameLike]:
return any(is_type(obj, t) for t in _DATAFRAME_LIKE_TYPES)
def is_snowpark_or_pyspark_data_object(obj: object) -> bool:
"""True if if obj is of type snowflake.snowpark.dataframe.DataFrame, snowflake.snowpark.table.Table or
True when obj is a list which contains snowflake.snowpark.row.Row or True when obj is of type pyspark.sql.dataframe.DataFrame
False otherwise.
"""
return is_snowpark_data_object(obj) or is_pyspark_data_object(obj)
def is_snowpark_data_object(obj: object) -> bool:
"""True if obj is of type snowflake.snowpark.dataframe.DataFrame, snowflake.snowpark.table.Table or
True when obj is a list which contains snowflake.snowpark.row.Row,
False otherwise.
"""
if is_type(obj, _SNOWPARK_TABLE_TYPE_STR):
return True
if is_type(obj, _SNOWPARK_DF_TYPE_STR):
return True
if not isinstance(obj, list):
return False
if len(obj) < 1:
return False
if not hasattr(obj[0], "__class__"):
return False
return is_type(obj[0], _SNOWPARK_DF_ROW_TYPE_STR)
def is_pyspark_data_object(obj: object) -> bool:
"""True if obj is of type pyspark.sql.dataframe.DataFrame"""
return (
is_type(obj, _PYSPARK_DF_TYPE_STR)
and hasattr(obj, "toPandas")
and callable(getattr(obj, "toPandas"))
)
def is_dataframe_compatible(obj: object) -> TypeGuard[DataFrameCompatible]:
"""True if type that can be passed to convert_anything_to_df."""
return is_dataframe_like(obj) or type(obj) in _DATAFRAME_COMPATIBLE_TYPES
def is_bytes_like(obj: object) -> TypeGuard[BytesLike]:
"""True if the type is considered bytes-like for the purposes of
protobuf data marshalling.
"""
return isinstance(obj, _BYTES_LIKE_TYPES)
def to_bytes(obj: BytesLike) -> bytes:
"""Converts the given object to bytes.
Only types for which `is_bytes_like` is true can be converted; anything
else will result in an exception.
"""
if isinstance(obj, bytearray):
return bytes(obj)
elif isinstance(obj, bytes):
return obj
raise RuntimeError(f"{obj} is not convertible to bytes")
_SYMPY_RE: Final = re.compile(r"^sympy.*$")
def is_sympy_expession(obj: object) -> TypeGuard[sympy.Expr]:
"""True if input is a SymPy expression."""
if not is_type(obj, _SYMPY_RE):
return False
try:
import sympy
return isinstance(obj, sympy.Expr)
except ImportError:
return False
_ALTAIR_RE: Final = re.compile(r"^altair\.vegalite\.v\d+\.api\.\w*Chart$")
def is_altair_chart(obj: object) -> bool:
"""True if input looks like an Altair chart."""
return is_type(obj, _ALTAIR_RE)
def is_keras_model(obj: object) -> bool:
"""True if input looks like a Keras model."""
return (
is_type(obj, "keras.engine.sequential.Sequential")
or is_type(obj, "keras.engine.training.Model")
or is_type(obj, "tensorflow.python.keras.engine.sequential.Sequential")
or is_type(obj, "tensorflow.python.keras.engine.training.Model")
)
def is_plotly_chart(obj: object) -> TypeGuard[Union[Figure, list[Any], dict[str, Any]]]:
"""True if input looks like a Plotly chart."""
return (
is_type(obj, "plotly.graph_objs._figure.Figure")
or _is_list_of_plotly_objs(obj)
or _is_probably_plotly_dict(obj)
)
def is_graphviz_chart(
obj: object,
) -> TypeGuard[Union[graphviz.Graph, graphviz.Digraph]]:
"""True if input looks like a GraphViz chart."""
return (
# GraphViz < 0.18
is_type(obj, "graphviz.dot.Graph")
or is_type(obj, "graphviz.dot.Digraph")
# GraphViz >= 0.18
or is_type(obj, "graphviz.graphs.Graph")
or is_type(obj, "graphviz.graphs.Digraph")
)
def _is_plotly_obj(obj: object) -> bool:
"""True if input if from a type that lives in plotly.plotly_objs."""
the_type = type(obj)
return the_type.__module__.startswith("plotly.graph_objs")
def _is_list_of_plotly_objs(obj: object) -> TypeGuard[list[Any]]:
if not isinstance(obj, list):
return False
if len(obj) == 0:
return False
return all(_is_plotly_obj(item) for item in obj)
def _is_probably_plotly_dict(obj: object) -> TypeGuard[dict[str, Any]]:
if not isinstance(obj, dict):
return False
if len(obj.keys()) == 0:
return False
if any(k not in ["config", "data", "frames", "layout"] for k in obj.keys()):
return False
if any(_is_plotly_obj(v) for v in obj.values()):
return True
if any(_is_list_of_plotly_objs(v) for v in obj.values()):
return True
return False
def is_function(x: object) -> TypeGuard[types.FunctionType]:
"""Return True if x is a function."""
return isinstance(x, types.FunctionType)
def is_namedtuple(x: object) -> TypeGuard[NamedTuple]:
t = type(x)
b = t.__bases__
if len(b) != 1 or b[0] != tuple:
return False
f = getattr(t, "_fields", None)
if not isinstance(f, tuple):
return False
return all(type(n).__name__ == "str" for n in f)
def is_pandas_styler(obj: object) -> TypeGuard[Styler]:
return is_type(obj, _PANDAS_STYLER_TYPE_STR)
def is_pydeck(obj: object) -> TypeGuard[Deck]:
"""True if input looks like a pydeck chart."""
return is_type(obj, "pydeck.bindings.deck.Deck")
def is_iterable(obj: object) -> TypeGuard[Iterable[Any]]:
try:
# The ignore statement here is intentional, as this is a
# perfectly fine way of checking for iterables.
iter(obj) # type: ignore[call-overload]
except TypeError:
return False
return True
def is_sequence(seq: Any) -> bool:
"""True if input looks like a sequence."""
if isinstance(seq, str):
return False
try:
len(seq)
except Exception:
return False
return True
def convert_anything_to_df(
df: Any, max_unevaluated_rows: int = MAX_UNEVALUATED_DF_ROWS
) -> DataFrame:
"""Try to convert different formats to a Pandas Dataframe.
Parameters
----------
df : ndarray, Iterable, dict, DataFrame, Styler, pa.Table, None, dict, list, or any
max_unevaluated_rows: int
If unevaluated data is detected this func will evaluate it,
taking max_unevaluated_rows, defaults to 10k and 100 for st.table
Returns
-------
pandas.DataFrame
"""
# This is inefficient as the data will be converted back to Arrow
# when marshalled to protobuf, but area/bar/line charts need
# DataFrame magic to generate the correct output.
if isinstance(df, pa.Table):
return df.to_pandas()
if is_type(df, _PANDAS_DF_TYPE_STR):
return df
if is_pandas_styler(df):
return df.data
import pandas as pd
if is_type(df, "numpy.ndarray") and len(df.shape) == 0:
return pd.DataFrame([])
if (
is_type(df, _SNOWPARK_DF_TYPE_STR)
or is_type(df, _SNOWPARK_TABLE_TYPE_STR)
or is_type(df, _PYSPARK_DF_TYPE_STR)
):
if is_type(df, _PYSPARK_DF_TYPE_STR):
df = df.limit(max_unevaluated_rows).toPandas()
else:
df = pd.DataFrame(df.take(max_unevaluated_rows))
if df.shape[0] == max_unevaluated_rows:
st.caption(
f"⚠️ Showing only {string_util.simplify_number(max_unevaluated_rows)} rows. "
"Call `collect()` on the dataframe to show more."
)
return df
# Try to convert to pandas.DataFrame. This will raise an error is df is not
# compatible with the pandas.DataFrame constructor.
try:
return pd.DataFrame(df)
except ValueError:
raise errors.StreamlitAPIException(
"""
Unable to convert object of type `%(type)s` to `pandas.DataFrame`.
Offending object:
```py
%(object)s
```"""
% {
"type": type(df),
"object": df,
}
)
@overload
def ensure_iterable(obj: Iterable[V_co]) -> Iterable[V_co]:
...
@overload
def ensure_iterable(obj: DataFrame) -> Iterable[Any]:
...
def ensure_iterable(obj: Union[DataFrame, Iterable[V_co]]) -> Iterable[Any]:
"""Try to convert different formats to something iterable. Most inputs
are assumed to be iterable, but if we have a DataFrame, we can just
select the first column to iterate over. If the input is not iterable,
a TypeError is raised.
Parameters
----------
obj : list, tuple, numpy.ndarray, pandas.Series, pandas.DataFrame, pyspark.sql.DataFrame, snowflake.snowpark.dataframe.DataFrame or snowflake.snowpark.table.Table
Returns
-------
iterable
"""
if is_snowpark_or_pyspark_data_object(obj):
obj = convert_anything_to_df(obj)
if is_dataframe(obj):
# Return first column as a pd.Series
# The type of the elements in this column is not known up front, hence
# the Iterable[Any] return type.
return cast(Iterable[Any], obj.iloc[:, 0])
if is_iterable(obj):
return obj
raise TypeError(
f"Object is not an iterable and could not be converted to one. Object: {obj}"
)
def ensure_indexable(obj: OptionSequence[V_co]) -> Sequence[V_co]:
"""Try to ensure a value is an indexable Sequence. If the collection already
is one, it has the index method that we need. Otherwise, convert it to a list.
"""
it = ensure_iterable(obj)
# This is an imperfect check because there is no guarantee that an `index`
# function actually does the thing we want.
index_fn = getattr(it, "index", None)
if callable(index_fn):
return it # type: ignore[return-value]
else:
return list(it)
def is_pandas_version_less_than(v: str) -> bool:
"""Return True if the current Pandas version is less than the input version.
Parameters
----------
v : str
Version string, e.g. "0.25.0"
Returns
-------
bool
"""
import pandas as pd
from packaging import version
return version.parse(pd.__version__) < version.parse(v)
def pyarrow_table_to_bytes(table: pa.Table) -> bytes:
"""Serialize pyarrow.Table to bytes using Apache Arrow.
Parameters
----------
table : pyarrow.Table
A table to convert.
"""
sink = pa.BufferOutputStream()
writer = pa.RecordBatchStreamWriter(sink, table.schema)
writer.write_table(table)
writer.close()
return cast(bytes, sink.getvalue().to_pybytes())
def _is_colum_type_arrow_incompatible(column: Union[Series, Index]) -> bool:
"""Return True if the column type is known to cause issues during Arrow conversion."""
# Check all columns for mixed types and complex128 type
# The dtype of mixed type columns is always object, the actual type of the column
# values can be determined via the infer_dtype function:
# https://pandas.pydata.org/docs/reference/api/pandas.api.types.infer_dtype.html
return (
column.dtype == "object" and infer_dtype(column) in ["mixed", "mixed-integer"]
) or column.dtype == "complex128"
def fix_arrow_incompatible_column_types(
df: DataFrame, selected_columns: Optional[List[str]] = None
) -> DataFrame:
"""Fix column types that are not supported by Arrow table.
This includes mixed types (e.g. mix of integers and strings)
as well as complex numbers (complex128 type). These types will cause
errors during conversion of the dataframe to an Arrow table.
It is fixed by converting all values of the column to strings
This is sufficient for displaying the data on the frontend.
Parameters
----------
df : pandas.DataFrame
A dataframe to fix.
selected_columns: Optional[List[str]]
A list of columns to fix. If None, all columns are evaluated.
Returns
-------
The fixed dataframe.
"""
for col in selected_columns or df.columns:
if _is_colum_type_arrow_incompatible(df[col]):
df[col] = df[col].astype(str)
# The index can also contain mixed types
# causing Arrow issues during conversion.
# Skipping multi-indices since they won't return
# the correct value from infer_dtype
if not selected_columns and (
not isinstance(
df.index,
MultiIndex,
)
and _is_colum_type_arrow_incompatible(df.index)
):
df.index = df.index.astype(str)
return df
def data_frame_to_bytes(df: DataFrame) -> bytes:
"""Serialize pandas.DataFrame to bytes using Apache Arrow.
Parameters
----------
df : pandas.DataFrame
A dataframe to convert.
"""
try:
table = pa.Table.from_pandas(df)
except (pa.ArrowTypeError, pa.ArrowInvalid, pa.ArrowNotImplementedError):
_LOGGER.info(
"Applying automatic fixes for column types to make the dataframe Arrow-compatible."
)
df = fix_arrow_incompatible_column_types(df)
table = pa.Table.from_pandas(df)
return pyarrow_table_to_bytes(table)
def bytes_to_data_frame(source: bytes) -> DataFrame:
"""Convert bytes to pandas.DataFrame.
Parameters
----------
source : bytes
A bytes object to convert.
"""
reader = pa.RecordBatchStreamReader(source)
return reader.read_pandas()
@overload
def to_key(key: None) -> None:
...
@overload
def to_key(key: Key) -> str:
...
def to_key(key: Optional[Key]) -> Optional[str]:
if key is None:
return None
else:
return str(key)
def maybe_raise_label_warnings(label: Optional[str], label_visibility: Optional[str]):
if not label:
_LOGGER.warning(
"`label` got an empty value. This is discouraged for accessibility "
"reasons and may be disallowed in the future by raising an exception. "
"Please provide a non-empty label and hide it with label_visibility "
"if needed."
)
if label_visibility not in ("visible", "hidden", "collapsed"):
raise errors.StreamlitAPIException(
f"Unsupported label_visibility option '{label_visibility}'. "
f"Valid values are 'visible', 'hidden' or 'collapsed'."
) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/type_util.py | 0.773131 | 0.276541 | type_util.py | pypi |
from typing import Any, List, Optional, Tuple
from streamlit import util
from streamlit.runtime.scriptrunner import get_script_run_ctx
def make_delta_path(
root_container: int, parent_path: Tuple[int, ...], index: int
) -> List[int]:
delta_path = [root_container]
delta_path.extend(parent_path)
delta_path.append(index)
return delta_path
def get_container_cursor(
root_container: Optional[int],
) -> Optional["RunningCursor"]:
"""Return the top-level RunningCursor for the given container.
This is the cursor that is used when user code calls something like
`st.foo` (which uses the main container) or `st.sidebar.foo` (which uses
the sidebar container).
"""
if root_container is None:
return None
ctx = get_script_run_ctx()
if ctx is None:
return None
if root_container in ctx.cursors:
return ctx.cursors[root_container]
cursor = RunningCursor(root_container=root_container)
ctx.cursors[root_container] = cursor
return cursor
class Cursor:
"""A pointer to a delta location in the app.
When adding an element to the app, you should always call
get_locked_cursor() on that element's respective Cursor.
"""
def __repr__(self) -> str:
return util.repr_(self)
@property
def root_container(self) -> int:
"""The top-level container this cursor lives within - either
RootContainer.MAIN or RootContainer.SIDEBAR.
"""
raise NotImplementedError()
@property
def parent_path(self) -> Tuple[int, ...]:
"""The cursor's parent's path within its container."""
raise NotImplementedError()
@property
def index(self) -> int:
"""The index of the Delta within its parent block."""
raise NotImplementedError()
@property
def delta_path(self) -> List[int]:
"""The complete path of the delta pointed to by this cursor - its
container, parent path, and index.
"""
return make_delta_path(self.root_container, self.parent_path, self.index)
@property
def is_locked(self) -> bool:
raise NotImplementedError()
def get_locked_cursor(self, **props) -> "LockedCursor":
raise NotImplementedError()
@property
def props(self) -> Any:
"""Other data in this cursor. This is a temporary measure that will go
away when we implement improved return values for elements.
This is only implemented in LockedCursor.
"""
raise NotImplementedError()
class RunningCursor(Cursor):
def __init__(self, root_container: int, parent_path: Tuple[int, ...] = ()):
"""A moving pointer to a delta location in the app.
RunningCursors auto-increment to the next available location when you
call get_locked_cursor() on them.
Parameters
----------
root_container: int
The root container this cursor lives in.
parent_path: tuple of ints
The full path of this cursor, consisting of the IDs of all ancestors.
The 0th item is the topmost ancestor.
"""
self._root_container = root_container
self._parent_path = parent_path
self._index = 0
@property
def root_container(self) -> int:
return self._root_container
@property
def parent_path(self) -> Tuple[int, ...]:
return self._parent_path
@property
def index(self) -> int:
return self._index
@property
def is_locked(self) -> bool:
return False
def get_locked_cursor(self, **props) -> "LockedCursor":
locked_cursor = LockedCursor(
root_container=self._root_container,
parent_path=self._parent_path,
index=self._index,
**props,
)
self._index += 1
return locked_cursor
class LockedCursor(Cursor):
def __init__(
self,
root_container: int,
parent_path: Tuple[int, ...] = (),
index: int = 0,
**props,
):
"""A locked pointer to a location in the app.
LockedCursors always point to the same location, even when you call
get_locked_cursor() on them.
Parameters
----------
root_container: int
The root container this cursor lives in.
parent_path: tuple of ints
The full path of this cursor, consisting of the IDs of all ancestors. The
0th item is the topmost ancestor.
index: int
**props: any
Anything else you want to store in this cursor. This is a temporary
measure that will go away when we implement improved return values
for elements.
"""
self._root_container = root_container
self._index = index
self._parent_path = parent_path
self._props = props
@property
def root_container(self) -> int:
return self._root_container
@property
def parent_path(self) -> Tuple[int, ...]:
return self._parent_path
@property
def index(self) -> int:
return self._index
@property
def is_locked(self) -> bool:
return True
def get_locked_cursor(self, **props) -> "LockedCursor":
self._props = props
return self
@property
def props(self) -> Any:
return self._props | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/cursor.py | 0.933681 | 0.367015 | cursor.py | pypi |
"""Streamlit version utilities."""
import random
import packaging.version
import requests
from importlib_metadata import version as _version
from typing_extensions import Final
import streamlit.logger as logger
_LOGGER = logger.get_logger(__name__)
PYPI_STREAMLIT_URL = "https://pypi.org/pypi/streamlit/json"
# Probability that we'll make a network call to PyPI to check
# the latest version of streamlit. This is used each time
# should_show_new_version_notice() is called.
CHECK_PYPI_PROBABILITY = 0.10
STREAMLIT_VERSION_STRING: Final[str] = _version("streamlit")
def _version_str_to_obj(version_str) -> packaging.version.Version:
return packaging.version.Version(version_str)
def _get_installed_streamlit_version() -> packaging.version.Version:
"""Return the streamlit version string from setup.py.
Returns
-------
str
The version string specified in setup.py.
"""
return _version_str_to_obj(STREAMLIT_VERSION_STRING)
def _get_latest_streamlit_version(timeout=None):
"""Request the latest streamlit version string from PyPI.
NB: this involves a network call, so it could raise an error
or take a long time.
Parameters
----------
timeout : float or None
The request timeout.
Returns
-------
str
The version string for the latest version of streamlit
on PyPI.
"""
rsp = requests.get(PYPI_STREAMLIT_URL, timeout=timeout)
try:
version_str = rsp.json()["info"]["version"]
except Exception as e:
raise RuntimeError("Got unexpected response from PyPI", e)
return _version_str_to_obj(version_str)
def should_show_new_version_notice():
"""True if streamlit should show a 'new version!' notice to the user.
We need to make a network call to PyPI to determine the latest streamlit
version. Since we don't want to do this every time streamlit is run,
we'll only perform the check ~5% of the time.
If we do make the request to PyPI and there's any sort of error,
we log it and return False.
Returns
-------
bool
True if we should tell the user that their streamlit is out of date.
"""
if random.random() >= CHECK_PYPI_PROBABILITY:
# We don't check PyPI every time this function is called.
_LOGGER.debug("Skipping PyPI version check")
return False
try:
installed_version = _get_installed_streamlit_version()
latest_version = _get_latest_streamlit_version(timeout=1)
except Exception as ex:
# Log this as a debug. We don't care if the user sees it.
_LOGGER.debug("Failed PyPI version check.", exc_info=ex)
return False
return latest_version > installed_version | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/version.py | 0.858822 | 0.199678 | version.py | pypi |
from urllib.error import URLError
import pandas as pd
import pydeck as pdk
import streamlit as st
from streamlit.hello.utils import show_code
def mapping_demo():
@st.cache
def from_data_file(filename):
url = (
"http://raw.githubusercontent.com/streamlit/"
"example-data/master/hello/v1/%s" % filename
)
return pd.read_json(url)
try:
ALL_LAYERS = {
"Bike Rentals": pdk.Layer(
"HexagonLayer",
data=from_data_file("bike_rental_stats.json"),
get_position=["lon", "lat"],
radius=200,
elevation_scale=4,
elevation_range=[0, 1000],
extruded=True,
),
"Bart Stop Exits": pdk.Layer(
"ScatterplotLayer",
data=from_data_file("bart_stop_stats.json"),
get_position=["lon", "lat"],
get_color=[200, 30, 0, 160],
get_radius="[exits]",
radius_scale=0.05,
),
"Bart Stop Names": pdk.Layer(
"TextLayer",
data=from_data_file("bart_stop_stats.json"),
get_position=["lon", "lat"],
get_text="name",
get_color=[0, 0, 0, 200],
get_size=15,
get_alignment_baseline="'bottom'",
),
"Outbound Flow": pdk.Layer(
"ArcLayer",
data=from_data_file("bart_path_stats.json"),
get_source_position=["lon", "lat"],
get_target_position=["lon2", "lat2"],
get_source_color=[200, 30, 0, 160],
get_target_color=[200, 30, 0, 160],
auto_highlight=True,
width_scale=0.0001,
get_width="outbound",
width_min_pixels=3,
width_max_pixels=30,
),
}
st.sidebar.markdown("### Map Layers")
selected_layers = [
layer
for layer_name, layer in ALL_LAYERS.items()
if st.sidebar.checkbox(layer_name, True)
]
if selected_layers:
st.pydeck_chart(
pdk.Deck(
map_style=None,
initial_view_state={
"latitude": 37.76,
"longitude": -122.4,
"zoom": 11,
"pitch": 50,
},
layers=selected_layers,
)
)
else:
st.error("Please choose at least one layer above.")
except URLError as e:
st.error(
"""
**This demo requires internet access.**
Connection error: %s
"""
% e.reason
)
st.set_page_config(page_title="Mapping Demo", page_icon="🌍")
st.markdown("# Mapping Demo")
st.sidebar.header("Mapping Demo")
st.write(
"""This demo shows how to use
[`st.pydeck_chart`](https://docs.streamlit.io/library/api-reference/charts/st.pydeck_chart)
to display geospatial data."""
)
mapping_demo()
show_code(mapping_demo) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/hello/pages/2_Mapping_Demo.py | 0.496826 | 0.192748 | 2_Mapping_Demo.py | pypi |
import copy
import json
from typing import TYPE_CHECKING, Any, Dict, Iterable, Optional, Union, cast
import pandas as pd
from typing_extensions import Final, TypeAlias
import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart
from streamlit import type_util
from streamlit.errors import StreamlitAPIException
from streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as DeckGlJsonChartProto
from streamlit.runtime.metrics_util import gather_metrics
if TYPE_CHECKING:
from pandas.io.formats.style import Styler
from streamlit.delta_generator import DeltaGenerator
Data: TypeAlias = Union[
pd.DataFrame,
"Styler",
Iterable[Any],
Dict[Any, Any],
None,
]
# Map used as the basis for st.map.
_DEFAULT_MAP: Final[Dict[str, Any]] = dict(deck_gl_json_chart.EMPTY_MAP)
# Other default parameters for st.map.
_DEFAULT_COLOR: Final = [200, 30, 0, 160]
_DEFAULT_ZOOM_LEVEL: Final = 12
_ZOOM_LEVELS: Final = [
360,
180,
90,
45,
22.5,
11.25,
5.625,
2.813,
1.406,
0.703,
0.352,
0.176,
0.088,
0.044,
0.022,
0.011,
0.005,
0.003,
0.001,
0.0005,
0.00025,
]
class MapMixin:
@gather_metrics("map")
def map(
self,
data: Data = None,
zoom: Optional[int] = None,
use_container_width: bool = True,
) -> "DeltaGenerator":
"""Display a map with points on it.
This is a wrapper around st.pydeck_chart to quickly create scatterplot
charts on top of a map, with auto-centering and auto-zoom.
When using this command, we advise all users to use a personal Mapbox
token. This ensures the map tiles used in this chart are more
robust. You can do this with the mapbox.token config option.
To get a token for yourself, create an account at
https://mapbox.com. It's free! (for moderate usage levels). For more
info on how to set config options, see
https://docs.streamlit.io/library/advanced-features/configuration#set-configuration-options
Parameters
----------
data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, pyspark.sql.DataFrame, snowflake.snowpark.dataframe.DataFrame, snowflake.snowpark.table.Table, Iterable, dict, or None
The data to be plotted. Must have two columns:
- latitude called 'lat', 'latitude', 'LAT', 'LATITUDE'
- longitude called 'lon', 'longitude', 'LON', 'LONGITUDE'.
zoom : int
Zoom level as specified in
https://wiki.openstreetmap.org/wiki/Zoom_levels
use_container_width: bool
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>>
>>> df = pd.DataFrame(
... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],
... columns=['lat', 'lon'])
>>>
>>> st.map(df)
.. output::
https://doc-map.streamlitapp.com/
height: 650px
"""
map_proto = DeckGlJsonChartProto()
map_proto.json = to_deckgl_json(data, zoom)
map_proto.use_container_width = use_container_width
return self.dg._enqueue("deck_gl_json_chart", map_proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
def _get_zoom_level(distance: float) -> int:
"""Get the zoom level for a given distance in degrees.
See https://wiki.openstreetmap.org/wiki/Zoom_levels for reference.
Parameters
----------
distance : float
How many degrees of longitude should fit in the map.
Returns
-------
int
The zoom level, from 0 to 20.
"""
for i in range(len(_ZOOM_LEVELS) - 1):
if _ZOOM_LEVELS[i + 1] < distance <= _ZOOM_LEVELS[i]:
return i
# For small number of points the default zoom level will be used.
return _DEFAULT_ZOOM_LEVEL
def to_deckgl_json(data: Data, zoom: Optional[int]) -> str:
if data is None:
return json.dumps(_DEFAULT_MAP)
# TODO(harahu): iterables don't have the empty attribute. This is either
# a bug, or the documented data type is too broad. One or the other
# should be addressed
if hasattr(data, "empty") and data.empty:
return json.dumps(_DEFAULT_MAP)
data = type_util.convert_anything_to_df(data)
formmated_column_names = ", ".join(map(repr, list(data.columns)))
allowed_lat_columns = {"lat", "latitude", "LAT", "LATITUDE"}
lat = next((d for d in allowed_lat_columns if d in data), None)
if not lat:
formatted_allowed_column_name = ", ".join(
map(repr, sorted(allowed_lat_columns))
)
raise StreamlitAPIException(
f"Map data must contain a latitude column named: {formatted_allowed_column_name}. "
f"Existing columns: {formmated_column_names}"
)
allowed_lon_columns = {"lon", "longitude", "LON", "LONGITUDE"}
lon = next((d for d in allowed_lon_columns if d in data), None)
if not lon:
formatted_allowed_column_name = ", ".join(
map(repr, sorted(allowed_lon_columns))
)
raise StreamlitAPIException(
f"Map data must contain a longitude column named: {formatted_allowed_column_name}. "
f"Existing columns: {formmated_column_names}"
)
if data[lon].isnull().values.any() or data[lat].isnull().values.any():
raise StreamlitAPIException("Latitude and longitude data must be numeric.")
min_lat = data[lat].min()
max_lat = data[lat].max()
min_lon = data[lon].min()
max_lon = data[lon].max()
center_lat = (max_lat + min_lat) / 2.0
center_lon = (max_lon + min_lon) / 2.0
range_lon = abs(max_lon - min_lon)
range_lat = abs(max_lat - min_lat)
if zoom is None:
if range_lon > range_lat:
longitude_distance = range_lon
else:
longitude_distance = range_lat
zoom = _get_zoom_level(longitude_distance)
# "+1" because itertuples includes the row index.
lon_col_index = data.columns.get_loc(lon) + 1
lat_col_index = data.columns.get_loc(lat) + 1
final_data = []
for row in data.itertuples():
final_data.append(
{"lon": float(row[lon_col_index]), "lat": float(row[lat_col_index])}
)
default = copy.deepcopy(_DEFAULT_MAP)
default["initialViewState"]["latitude"] = center_lat
default["initialViewState"]["longitude"] = center_lon
default["initialViewState"]["zoom"] = zoom
default["layers"] = [
{
"@@type": "ScatterplotLayer",
"getPosition": "@@=[lon, lat]",
"getRadius": 10,
"radiusScale": 10,
"radiusMinPixels": 3,
"getFillColor": _DEFAULT_COLOR,
"data": final_data,
}
]
return json.dumps(default) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/map.py | 0.833866 | 0.35502 | map.py | pypi |
import hashlib
from typing import TYPE_CHECKING, Union, cast
from typing_extensions import Final, TypeAlias
from streamlit import type_util
from streamlit.errors import StreamlitAPIException
from streamlit.logger import get_logger
from streamlit.proto.GraphVizChart_pb2 import GraphVizChart as GraphVizChartProto
from streamlit.runtime.metrics_util import gather_metrics
if TYPE_CHECKING:
import graphviz
from streamlit.delta_generator import DeltaGenerator
LOGGER: Final = get_logger(__name__)
FigureOrDot: TypeAlias = Union["graphviz.Graph", "graphviz.Digraph", str]
class GraphvizMixin:
@gather_metrics("graphviz_chart")
def graphviz_chart(
self,
figure_or_dot: FigureOrDot,
use_container_width: bool = False,
) -> "DeltaGenerator":
"""Display a graph using the dagre-d3 library.
Parameters
----------
figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str
The Graphlib graph object or dot string to display
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over the figure's native `width` value.
Example
-------
>>> import streamlit as st
>>> import graphviz
>>>
>>> # Create a graphlib graph object
>>> graph = graphviz.Digraph()
>>> graph.edge('run', 'intr')
>>> graph.edge('intr', 'runbl')
>>> graph.edge('runbl', 'run')
>>> graph.edge('run', 'kernel')
>>> graph.edge('kernel', 'zombie')
>>> graph.edge('kernel', 'sleep')
>>> graph.edge('kernel', 'runmem')
>>> graph.edge('sleep', 'swap')
>>> graph.edge('swap', 'runswap')
>>> graph.edge('runswap', 'new')
>>> graph.edge('runswap', 'runmem')
>>> graph.edge('new', 'runmem')
>>> graph.edge('sleep', 'runmem')
>>>
>>> st.graphviz_chart(graph)
Or you can render the chart from the graph using GraphViz's Dot
language:
>>> st.graphviz_chart('''
digraph {
run -> intr
intr -> runbl
runbl -> run
run -> kernel
kernel -> zombie
kernel -> sleep
kernel -> runmem
sleep -> swap
swap -> runswap
runswap -> new
runswap -> runmem
new -> runmem
sleep -> runmem
}
''')
.. output::
https://doc-graphviz-chart.streamlitapp.com/
height: 600px
"""
# Generate element ID from delta path
delta_path = self.dg._get_delta_path_str()
element_id = hashlib.md5(delta_path.encode()).hexdigest()
graphviz_chart_proto = GraphVizChartProto()
marshall(graphviz_chart_proto, figure_or_dot, use_container_width, element_id)
return self.dg._enqueue("graphviz_chart", graphviz_chart_proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
def marshall(
proto: GraphVizChartProto,
figure_or_dot: FigureOrDot,
use_container_width: bool,
element_id: str,
) -> None:
"""Construct a GraphViz chart object.
See DeltaGenerator.graphviz_chart for docs.
"""
if type_util.is_graphviz_chart(figure_or_dot):
dot = figure_or_dot.source
elif isinstance(figure_or_dot, str):
dot = figure_or_dot
else:
raise StreamlitAPIException(
"Unhandled type for graphviz chart: %s" % type(figure_or_dot)
)
proto.spec = dot
proto.use_container_width = use_container_width
proto.element_id = element_id | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/graphviz_chart.py | 0.904118 | 0.217254 | graphviz_chart.py | pypi |
from datetime import date
from typing import TYPE_CHECKING, Hashable, cast
import altair as alt
import pandas as pd
import pyarrow as pa
import streamlit.elements.legacy_vega_lite as vega_lite
from streamlit import errors, type_util
from streamlit.elements.utils import last_index_for_melted_dataframes
from streamlit.proto.VegaLiteChart_pb2 import VegaLiteChart as VegaLiteChartProto
from streamlit.runtime.metrics_util import gather_metrics
if TYPE_CHECKING:
from altair.vegalite.v4.api import Chart
from streamlit.delta_generator import DeltaGenerator
from streamlit.elements.arrow import Data
class LegacyAltairMixin:
@gather_metrics("_legacy_line_chart")
def _legacy_line_chart(
self,
data: "Data" = None,
width: int = 0,
height: int = 0,
use_container_width: bool = True,
) -> "DeltaGenerator":
"""Display a line chart.
This is syntax-sugar around st._legacy_altair_chart. The main difference
is this command uses the data's own column and indices to figure out
the chart's spec. As a result this is easier to use for many "just plot
this" scenarios, while being less customizable.
If st._legacy_line_chart does not guess the data specification
correctly, try specifying your desired chart using st._legacy_altair_chart.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict
or None
Data to be plotted.
width : int
The chart width in pixels. If 0, selects the width automatically.
height : int
The chart width in pixels. If 0, selects the height automatically.
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over the width argument.
Example
-------
>>> import streamlit as st
>>> import numpy as np
>>> import pandas as pd
>>>
>>> chart_data = pd.DataFrame(
... np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
...
>>> st._legacy_line_chart(chart_data)
.. output::
https://static.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8
height: 220px
"""
vega_lite_chart_proto = VegaLiteChartProto()
chart = generate_chart("line", data, width, height)
marshall(vega_lite_chart_proto, chart, use_container_width)
last_index = last_index_for_melted_dataframes(data)
return self.dg._enqueue(
"line_chart", vega_lite_chart_proto, last_index=last_index
)
@gather_metrics("_legacy_area_chart")
def _legacy_area_chart(
self,
data: "Data" = None,
width: int = 0,
height: int = 0,
use_container_width: bool = True,
) -> "DeltaGenerator":
"""Display an area chart.
This is just syntax-sugar around st._legacy_altair_chart. The main difference
is this command uses the data's own column and indices to figure out
the chart's spec. As a result this is easier to use for many "just plot
this" scenarios, while being less customizable.
If st._legacy_area_chart does not guess the data specification
correctly, try specifying your desired chart using st._legacy_altair_chart.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict
Data to be plotted.
width : int
The chart width in pixels. If 0, selects the width automatically.
height : int
The chart width in pixels. If 0, selects the height automatically.
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over the width argument.
Example
-------
>>> import streamlit as st
>>> import numpy as np
>>> import pandas as pd
>>>
>>> chart_data = pd.DataFrame(
... np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
...
>>> st._legacy_area_chart(chart_data)
.. output::
https://static.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt
height: 220px
"""
vega_lite_chart_proto = VegaLiteChartProto()
chart = generate_chart("area", data, width, height)
marshall(vega_lite_chart_proto, chart, use_container_width)
last_index = last_index_for_melted_dataframes(data)
return self.dg._enqueue(
"area_chart", vega_lite_chart_proto, last_index=last_index
)
@gather_metrics("_legacy_bar_chart")
def _legacy_bar_chart(
self,
data: "Data" = None,
width: int = 0,
height: int = 0,
use_container_width: bool = True,
) -> "DeltaGenerator":
"""Display a bar chart.
This is just syntax-sugar around st._legacy_altair_chart. The main difference
is this command uses the data's own column and indices to figure out
the chart's spec. As a result this is easier to use for many "just plot
this" scenarios, while being less customizable.
If st._legacy_bar_chart does not guess the data specification
correctly, try specifying your desired chart using st._legacy_altair_chart.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict
Data to be plotted.
width : int
The chart width in pixels. If 0, selects the width automatically.
height : int
The chart width in pixels. If 0, selects the height automatically.
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over the width argument.
Example
-------
>>> import streamlit as st
>>> import numpy as np
>>> import pandas as pd
>>>
>>> chart_data = pd.DataFrame(
... np.random.randn(50, 3),
... columns=["a", "b", "c"])
...
>>> st._legacy_bar_chart(chart_data)
.. output::
https://static.streamlit.io/0.66.0-2BLtg/index.html?id=GaYDn6vxskvBUkBwsGVEaL
height: 220px
"""
vega_lite_chart_proto = VegaLiteChartProto()
chart = generate_chart("bar", data, width, height)
marshall(vega_lite_chart_proto, chart, use_container_width)
last_index = last_index_for_melted_dataframes(data)
return self.dg._enqueue(
"bar_chart", vega_lite_chart_proto, last_index=last_index
)
@gather_metrics("_legacy_altair_chart")
def _legacy_altair_chart(
self, altair_chart: "Chart", use_container_width: bool = False
) -> "DeltaGenerator":
"""Display a chart using the Altair library.
Parameters
----------
altair_chart : altair.vegalite.v4.api.Chart
The Altair chart object to display.
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over Altair's native `width` value.
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>> import altair as alt
>>>
>>> df = pd.DataFrame(
... np.random.randn(200, 3),
... columns=['a', 'b', 'c'])
...
>>> c = alt.Chart(df).mark_circle().encode(
... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])
>>>
>>> st._legacy_altair_chart(c, use_container_width=True)
.. output::
https://static.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5
height: 200px
Examples of Altair charts can be found at
https://altair-viz.github.io/gallery/.
"""
vega_lite_chart_proto = VegaLiteChartProto()
marshall(
vega_lite_chart_proto,
altair_chart,
use_container_width=use_container_width,
)
return self.dg._enqueue("vega_lite_chart", vega_lite_chart_proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
def _is_date_column(df: pd.DataFrame, name: Hashable) -> bool:
"""True if the column with the given name stores datetime.date values.
This function just checks the first value in the given column, so
it's meaningful only for columns whose values all share the same type.
Parameters
----------
df : pd.DataFrame
name : hashable
The column name
Returns
-------
bool
"""
column = df[name]
if column.size == 0:
return False
return isinstance(column[0], date)
def generate_chart(chart_type, data, width: int = 0, height: int = 0):
if data is None:
# Use an empty-ish dict because if we use None the x axis labels rotate
# 90 degrees. No idea why. Need to debug.
data = {"": []}
if isinstance(data, pa.Table):
raise errors.StreamlitAPIException(
"""
pyarrow tables are not supported by Streamlit's legacy DataFrame serialization (i.e. with `config.dataFrameSerialization = "legacy"`).
To be able to use pyarrow tables, please enable pyarrow by changing the config setting,
`config.dataFrameSerialization = "arrow"`
"""
)
if not isinstance(data, pd.DataFrame):
data = type_util.convert_anything_to_df(data)
index_name = data.index.name
if index_name is None:
index_name = "index"
data = pd.melt(data.reset_index(), id_vars=[index_name])
if chart_type == "area":
opacity = {"value": 0.7}
else:
opacity = {"value": 1.0}
# Set the X and Y axes' scale to "utc" if they contain date values.
# This causes time data to be displayed in UTC, rather the user's local
# time zone. (By default, vega-lite displays time data in the browser's
# local time zone, regardless of which time zone the data specifies:
# https://vega.github.io/vega-lite/docs/timeunit.html#output).
x_scale = (
alt.Scale(type="utc") if _is_date_column(data, index_name) else alt.Undefined
)
y_scale = alt.Scale(type="utc") if _is_date_column(data, "value") else alt.Undefined
x_type = alt.Undefined
# Bar charts should have a discrete (ordinal) x-axis, UNLESS type is date/time
# https://github.com/streamlit/streamlit/pull/2097#issuecomment-714802475
if chart_type == "bar" and not _is_date_column(data, index_name):
x_type = "ordinal"
chart = (
getattr(alt.Chart(data, width=width, height=height), "mark_" + chart_type)()
.encode(
alt.X(index_name, title="", scale=x_scale, type=x_type),
alt.Y("value", title="", scale=y_scale),
alt.Color("variable", title="", type="nominal"),
alt.Tooltip([index_name, "value", "variable"]),
opacity=opacity,
)
.interactive()
)
return chart
def marshall(
vega_lite_chart: VegaLiteChartProto,
altair_chart,
use_container_width: bool = False,
**kwargs,
) -> None:
import altair as alt
# Normally altair_chart.to_dict() would transform the dataframe used by the
# chart into an array of dictionaries. To avoid that, we install a
# transformer that replaces datasets with a reference by the object id of
# the dataframe. We then fill in the dataset manually later on.
datasets = {}
def id_transform(data):
"""Altair data transformer that returns a fake named dataset with the
object id.
"""
datasets[id(data)] = data
return {"name": str(id(data))}
alt.data_transformers.register("id", id_transform)
with alt.data_transformers.enable("id"):
chart_dict = altair_chart.to_dict()
# Put datasets back into the chart dict but note how they weren't
# transformed.
chart_dict["datasets"] = datasets
vega_lite.marshall(
vega_lite_chart,
chart_dict,
use_container_width=use_container_width,
**kwargs,
) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/legacy_altair.py | 0.929935 | 0.356671 | legacy_altair.py | pypi |
from typing import TYPE_CHECKING, Optional, Union, cast
from streamlit.proto.Markdown_pb2 import Markdown as MarkdownProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.string_util import clean_text
from streamlit.type_util import SupportsStr, is_sympy_expession
if TYPE_CHECKING:
import sympy
from streamlit.delta_generator import DeltaGenerator
class MarkdownMixin:
@gather_metrics("markdown")
def markdown(
self, body: SupportsStr, unsafe_allow_html: bool = False
) -> "DeltaGenerator":
"""Display string formatted as Markdown.
Parameters
----------
body : str
The string to display as Github-flavored Markdown. Syntax
information can be found at: https://github.github.com/gfm.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
unsafe_allow_html : bool
By default, any HTML tags found in the body will be escaped and
therefore treated as pure text. This behavior may be turned off by
setting this argument to True.
That said, we *strongly advise against it*. It is hard to write
secure HTML, so by using this argument you may be compromising your
users' security. For more information, see:
https://github.com/streamlit/streamlit/issues/152
Examples
--------
>>> import streamlit as st
>>>
>>> st.markdown('Streamlit is **_really_ cool**.')
>>> st.markdown(”This text is :red[colored red], and this is **:blue[colored]** and bold.”)
>>> st.markdown(":green[$\sqrt{x^2+y^2}=1$] is a Pythagorean identity. :pencil:")
"""
markdown_proto = MarkdownProto()
markdown_proto.body = clean_text(body)
markdown_proto.allow_html = unsafe_allow_html
return self.dg._enqueue("markdown", markdown_proto)
@gather_metrics("code")
def code(
self, body: SupportsStr, language: Optional[str] = "python"
) -> "DeltaGenerator":
"""Display a code block with optional syntax highlighting.
(This is a convenience wrapper around `st.markdown()`)
Parameters
----------
body : str
The string to display as code.
language : str or None
The language that the code is written in, for syntax highlighting.
If ``None``, the code will be unstyled. Defaults to ``"python"``.
For a list of available ``language`` values, see:
https://github.com/react-syntax-highlighter/react-syntax-highlighter/blob/master/AVAILABLE_LANGUAGES_PRISM.MD
Example
-------
>>> import streamlit as st
>>>
>>> code = '''def hello():
... print("Hello, Streamlit!")'''
>>> st.code(code, language='python')
"""
code_proto = MarkdownProto()
markdown = f'```{language or ""}\n{body}\n```'
code_proto.body = clean_text(markdown)
return self.dg._enqueue("markdown", code_proto)
@gather_metrics("caption")
def caption(
self, body: SupportsStr, unsafe_allow_html: bool = False
) -> "DeltaGenerator":
"""Display text in small font.
This should be used for captions, asides, footnotes, sidenotes, and
other explanatory text.
Parameters
----------
body : str
The text to display as Github-flavored Markdown. Syntax
information can be found at: https://github.github.com/gfm.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
unsafe_allow_html : bool
By default, any HTML tags found in strings will be escaped and
therefore treated as pure text. This behavior may be turned off by
setting this argument to True.
That said, *we strongly advise against it*. It is hard to write secure
HTML, so by using this argument you may be compromising your users'
security. For more information, see:
https://github.com/streamlit/streamlit/issues/152
Examples
--------
>>> import streamlit as st
>>>
>>> st.caption('This is a string that explains something above.')
>>> st.caption('A caption with _italics_ :blue[colors] and emojis :sunglasses:')
"""
caption_proto = MarkdownProto()
caption_proto.body = clean_text(body)
caption_proto.allow_html = unsafe_allow_html
caption_proto.is_caption = True
return self.dg._enqueue("markdown", caption_proto)
@gather_metrics("latex")
def latex(self, body: Union[SupportsStr, "sympy.Expr"]) -> "DeltaGenerator":
# This docstring needs to be "raw" because of the backslashes in the
# example below.
r"""Display mathematical expressions formatted as LaTeX.
Supported LaTeX functions are listed at
https://katex.org/docs/supported.html.
Parameters
----------
body : str or SymPy expression
The string or SymPy expression to display as LaTeX. If str, it's
a good idea to use raw Python strings since LaTeX uses backslashes
a lot.
Example
-------
>>> import streamlit as st
>>>
>>> st.latex(r'''
... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} =
... \sum_{k=0}^{n-1} ar^k =
... a \left(\frac{1-r^{n}}{1-r}\right)
... ''')
"""
if is_sympy_expession(body):
import sympy
body = sympy.latex(body)
latex_proto = MarkdownProto()
latex_proto.body = "$$\n%s\n$$" % clean_text(body)
return self.dg._enqueue("markdown", latex_proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/markdown.py | 0.903062 | 0.588121 | markdown.py | pypi |
import textwrap
from typing import NamedTuple, Optional, cast
from typing_extensions import Literal
import streamlit
from streamlit import runtime
from streamlit.errors import StreamlitAPIException
from streamlit.proto import Block_pb2
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
class FormData(NamedTuple):
"""Form data stored on a DeltaGenerator."""
# The form's unique ID.
form_id: str
def _current_form(
this_dg: "streamlit.delta_generator.DeltaGenerator",
) -> Optional[FormData]:
"""Find the FormData for the given DeltaGenerator.
Forms are blocks, and can have other blocks nested inside them.
To find the current form, we walk up the dg_stack until we find
a DeltaGenerator that has FormData.
"""
if not runtime.exists():
return None
if this_dg._form_data is not None:
return this_dg._form_data
if this_dg == this_dg._main_dg:
# We were created via an `st.foo` call.
# Walk up the dg_stack to see if we're nested inside a `with st.form` statement.
ctx = get_script_run_ctx()
if ctx is None or len(ctx.dg_stack) == 0:
return None
for dg in reversed(ctx.dg_stack):
if dg._form_data is not None:
return dg._form_data
else:
# We were created via an `dg.foo` call.
# Take a look at our parent's form data to see if we're nested inside a form.
parent = this_dg._parent
if parent is not None and parent._form_data is not None:
return parent._form_data
return None
def current_form_id(dg: "streamlit.delta_generator.DeltaGenerator") -> str:
"""Return the form_id for the current form, or the empty string if we're
not inside an `st.form` block.
(We return the empty string, instead of None, because this value is
assigned to protobuf message fields, and None is not valid.)
"""
form_data = _current_form(dg)
if form_data is None:
return ""
return form_data.form_id
def is_in_form(dg: "streamlit.delta_generator.DeltaGenerator") -> bool:
"""True if the DeltaGenerator is inside an st.form block."""
return current_form_id(dg) != ""
def _build_duplicate_form_message(user_key: Optional[str] = None) -> str:
if user_key is not None:
message = textwrap.dedent(
f"""
There are multiple identical forms with `key='{user_key}'`.
To fix this, please make sure that the `key` argument is unique for
each `st.form` you create.
"""
)
else:
message = textwrap.dedent(
"""
There are multiple identical forms with the same generated key.
When a form is created, it's assigned an internal key based on
its structure. Multiple forms with an identical structure will
result in the same internal key, which causes this error.
To fix this error, please pass a unique `key` argument to
`st.form`.
"""
)
return message.strip("\n")
class FormMixin:
@gather_metrics("form")
def form(self, key: str, clear_on_submit: bool = False):
"""Create a form that batches elements together with a "Submit" button.
A form is a container that visually groups other elements and
widgets together, and contains a Submit button. When the form's
Submit button is pressed, all widget values inside the form will be
sent to Streamlit in a batch.
To add elements to a form object, you can use "with" notation
(preferred) or just call methods directly on the form. See
examples below.
Forms have a few constraints:
* Every form must contain a ``st.form_submit_button``.
* ``st.button`` and ``st.download_button`` cannot be added to a form.
* Forms can appear anywhere in your app (sidebar, columns, etc),
but they cannot be embedded inside other forms.
For more information about forms, check out our
`blog post <https://blog.streamlit.io/introducing-submit-button-and-forms/>`_.
Parameters
----------
key : str
A string that identifies the form. Each form must have its own
key. (This key is not displayed to the user in the interface.)
clear_on_submit : bool
If True, all widgets inside the form will be reset to their default
values after the user presses the Submit button. Defaults to False.
(Note that Custom Components are unaffected by this flag, and
will not be reset to their defaults on form submission.)
Examples
--------
Inserting elements using "with" notation:
>>> import streamlit as st
>>>
>>> with st.form("my_form"):
... st.write("Inside the form")
... slider_val = st.slider("Form slider")
... checkbox_val = st.checkbox("Form checkbox")
...
... # Every form must have a submit button.
... submitted = st.form_submit_button("Submit")
... if submitted:
... st.write("slider", slider_val, "checkbox", checkbox_val)
...
>>> st.write("Outside the form")
Inserting elements out of order:
>>> import streamlit as st
>>>
>>> form = st.form("my_form")
>>> form.slider("Inside the form")
>>> st.slider("Outside the form")
>>>
>>> # Now add a submit button to the form:
>>> form.form_submit_button("Submit")
"""
# Import this here to avoid circular imports.
from streamlit.elements.utils import check_session_state_rules
if is_in_form(self.dg):
raise StreamlitAPIException("Forms cannot be nested in other forms.")
check_session_state_rules(default_value=None, key=key, writes_allowed=False)
# A form is uniquely identified by its key.
form_id = key
ctx = get_script_run_ctx()
if ctx is not None:
new_form_id = form_id not in ctx.form_ids_this_run
if new_form_id:
ctx.form_ids_this_run.add(form_id)
else:
raise StreamlitAPIException(_build_duplicate_form_message(key))
block_proto = Block_pb2.Block()
block_proto.form.form_id = form_id
block_proto.form.clear_on_submit = clear_on_submit
block_dg = self.dg._block(block_proto)
# Attach the form's button info to the newly-created block's
# DeltaGenerator.
block_dg._form_data = FormData(form_id)
return block_dg
@gather_metrics("form_submit_button")
def form_submit_button(
self,
label: str = "Submit",
help: Optional[str] = None,
on_click=None,
args=None,
kwargs=None,
*, # keyword-only arguments:
type: Literal["primary", "secondary"] = "secondary",
disabled: bool = False,
) -> bool:
"""Display a form submit button.
When this button is clicked, all widget values inside the form will be
sent to Streamlit in a batch.
Every form must have a form_submit_button. A form_submit_button
cannot exist outside a form.
For more information about forms, check out our
`blog post <https://blog.streamlit.io/introducing-submit-button-and-forms/>`_.
Parameters
----------
label : str
A short label explaining to the user what this button is for.
Defaults to "Submit".
help : str or None
A tooltip that gets displayed when the button is hovered over.
Defaults to None.
on_click : callable
An optional callback invoked when this button is clicked.
args : tuple
An optional tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
type : "secondary" or "primary"
An optional string that specifies the button type. Can be "primary" for a
button with additional emphasis or "secondary" for a normal button. This
argument can only be supplied by keyword. Defaults to "secondary".
disabled : bool
An optional boolean, which disables the button if set to True. The
default is False. This argument can only be supplied by keyword.
Returns
-------
bool
True if the button was clicked.
"""
ctx = get_script_run_ctx()
# Checks whether the entered button type is one of the allowed options - either "primary" or "secondary"
if type not in ["primary", "secondary"]:
raise StreamlitAPIException(
'The type argument to st.button must be "primary" or "secondary". \n'
f'The argument passed was "{type}".'
)
return self._form_submit_button(
label=label,
help=help,
on_click=on_click,
args=args,
kwargs=kwargs,
type=type,
disabled=disabled,
ctx=ctx,
)
def _form_submit_button(
self,
label: str = "Submit",
help: Optional[str] = None,
on_click=None,
args=None,
kwargs=None,
*, # keyword-only arguments:
type: Literal["primary", "secondary"] = "secondary",
disabled: bool = False,
ctx: Optional[ScriptRunContext] = None,
) -> bool:
form_id = current_form_id(self.dg)
submit_button_key = f"FormSubmitter:{form_id}-{label}"
return self.dg._button(
label=label,
key=submit_button_key,
help=help,
is_form_submitter=True,
on_click=on_click,
args=args,
kwargs=kwargs,
type=type,
disabled=disabled,
ctx=ctx,
)
@property
def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("streamlit.delta_generator.DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/form.py | 0.909247 | 0.278015 | form.py | pypi |
from dataclasses import dataclass
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Generic,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from typing_extensions import TypeGuard
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Slider_pb2 import Slider as SliderProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.type_util import (
Key,
LabelVisibility,
OptionSequence,
T,
ensure_indexable,
maybe_raise_label_warnings,
to_key,
)
from streamlit.util import index_
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
def _is_range_value(value: Union[T, Sequence[T]]) -> TypeGuard[Sequence[T]]:
return isinstance(value, (list, tuple))
@dataclass
class SelectSliderSerde(Generic[T]):
options: Sequence[T]
value: List[int]
is_range_value: bool
def serialize(self, v: object) -> List[int]:
return self._as_index_list(v)
def deserialize(
self,
ui_value: Optional[List[int]],
widget_id: str = "",
) -> Union[T, Tuple[T, T]]:
if not ui_value:
# Widget has not been used; fallback to the original value,
ui_value = self.value
# The widget always returns floats, so convert to ints before indexing
return_value: Tuple[T, T] = cast(
Tuple[T, T],
tuple(map(lambda x: self.options[int(x)], ui_value)),
)
# If the original value was a list/tuple, so will be the output (and vice versa)
return return_value if self.is_range_value else return_value[0]
def _as_index_list(self, v: object) -> List[int]:
if _is_range_value(v):
slider_value = [index_(self.options, val) for val in v]
start, end = slider_value
if start > end:
slider_value = [end, start]
return slider_value
else:
return [index_(self.options, v)]
class SelectSliderMixin:
@gather_metrics("select_slider")
def select_slider(
self,
label: str,
options: OptionSequence[T] = (),
value: object = None,
format_func: Callable[[Any], Any] = str,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
) -> Union[T, Tuple[T, T]]:
"""
Display a slider widget to select items from a list.
This also allows you to render a range slider by passing a two-element
tuple or list as the `value`.
The difference between `st.select_slider` and `st.slider` is that
`select_slider` accepts any datatype and takes an iterable set of
options, while `slider` only accepts numerical or date/time data and
takes a range as input.
Parameters
----------
label : str
A short label explaining to the user what this slider is for.
The label can optionally contain Markdown and supports the following
elements: Bold, Italics, Strikethroughs, Inline Code, Emojis, and Links.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
For accessibility reasons, you should never set an empty label (label="")
but hide it with label_visibility if needed. In the future, we may disallow
empty labels by raising an exception.
options : Sequence, numpy.ndarray, pandas.Series, pandas.DataFrame, or pandas.Index
Labels for the slider options. All options will be cast to str
internally by default. For pandas.DataFrame, the first column is
selected.
value : a supported type or a tuple/list of supported types or None
The value of the slider when it first renders. If a tuple/list
of two values is passed here, then a range slider with those lower
and upper bounds is rendered. For example, if set to `(1, 10)` the
slider will have a selectable range between 1 and 10.
Defaults to first option.
format_func : function
Function to modify the display of the labels from the options.
argument. It receives the option as an argument and its output
will be cast to str.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. Multiple widgets of the same type may
not share the same key.
help : str
An optional tooltip that gets displayed next to the select slider.
on_change : callable
An optional callback invoked when this select_slider's value changes.
args : tuple
An optional tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean, which disables the select slider if set to True.
The default is False. This argument can only be supplied by keyword.
label_visibility : "visible" or "hidden" or "collapsed"
The visibility of the label. If "hidden", the label doesn't show but there
is still empty space for it above the widget (equivalent to label="").
If "collapsed", both the label and the space are removed. Default is
"visible". This argument can only be supplied by keyword.
Returns
-------
any value or tuple of any value
The current value of the slider widget. The return type will match
the data type of the value parameter.
Examples
--------
>>> import streamlit as st
>>>
>>> color = st.select_slider(
... 'Select a color of the rainbow',
... options=['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet'])
>>> st.write('My favorite color is', color)
And here's an example of a range select slider:
>>> import streamlit as st
>>>
>>> start_color, end_color = st.select_slider(
... 'Select a range of color wavelength',
... options=['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet'],
... value=('red', 'blue'))
>>> st.write('You selected wavelengths between', start_color, 'and', end_color)
.. output::
https://doc-select-slider.streamlitapp.com/
height: 450px
"""
ctx = get_script_run_ctx()
return self._select_slider(
label=label,
options=options,
value=value,
format_func=format_func,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
label_visibility=label_visibility,
ctx=ctx,
)
def _select_slider(
self,
label: str,
options: OptionSequence[T] = (),
value: object = None,
format_func: Callable[[Any], Any] = str,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
ctx: Optional[ScriptRunContext] = None,
) -> Union[T, Tuple[T, T]]:
key = to_key(key)
check_callback_rules(self.dg, on_change)
check_session_state_rules(default_value=value, key=key)
maybe_raise_label_warnings(label, label_visibility)
opt = ensure_indexable(options)
if len(opt) == 0:
raise StreamlitAPIException("The `options` argument needs to be non-empty")
def as_index_list(v: object) -> List[int]:
if _is_range_value(v):
slider_value = [index_(opt, val) for val in v]
start, end = slider_value
if start > end:
slider_value = [end, start]
return slider_value
else:
# Simplify future logic by always making value a list
try:
return [index_(opt, v)]
except ValueError:
if value is not None:
raise
return [0]
# Convert element to index of the elements
slider_value = as_index_list(value)
slider_proto = SliderProto()
slider_proto.label = label
slider_proto.format = "%s"
slider_proto.default[:] = slider_value
slider_proto.min = 0
slider_proto.max = len(opt) - 1
slider_proto.step = 1 # default for index changes
slider_proto.data_type = SliderProto.INT
slider_proto.options[:] = [str(format_func(option)) for option in opt]
slider_proto.form_id = current_form_id(self.dg)
if help is not None:
slider_proto.help = dedent(help)
serde = SelectSliderSerde(opt, slider_value, _is_range_value(value))
widget_state = register_widget(
"slider",
slider_proto,
user_key=key,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
)
# This needs to be done after register_widget because we don't want
# the following proto fields to affect a widget's ID.
slider_proto.disabled = disabled
slider_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if widget_state.value_changed:
slider_proto.value[:] = serde.serialize(widget_state.value)
slider_proto.set_value = True
self.dg._enqueue("slider", slider_proto)
return widget_state.value
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/select_slider.py | 0.938787 | 0.308744 | select_slider.py | pypi |
import json
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, cast
from typing_extensions import Final
from streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as PydeckProto
from streamlit.runtime.metrics_util import gather_metrics
if TYPE_CHECKING:
from pydeck import Deck
from streamlit.delta_generator import DeltaGenerator
# Mapping used when no data is passed.
EMPTY_MAP: Final[Mapping[str, Any]] = {
"initialViewState": {"latitude": 0, "longitude": 0, "pitch": 0, "zoom": 1},
}
class PydeckMixin:
@gather_metrics("pydeck_chart")
def pydeck_chart(
self,
pydeck_obj: Optional["Deck"] = None,
use_container_width: bool = False,
) -> "DeltaGenerator":
"""Draw a chart using the PyDeck library.
This supports 3D maps, point clouds, and more! More info about PyDeck
at https://deckgl.readthedocs.io/en/latest/.
These docs are also quite useful:
- DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs
- DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json
When using this command, we advise all users to use a personal Mapbox
token. This ensures the map tiles used in this chart are more
robust. You can do this with the mapbox.token config option.
To get a token for yourself, create an account at
https://mapbox.com. It's free! (for moderate usage levels). For more info
on how to set config options, see
https://docs.streamlit.io/library/advanced-features/configuration#set-configuration-options
Parameters
----------
pydeck_obj: pydeck.Deck or None
Object specifying the PyDeck chart to draw.
use_container_width: bool
Example
-------
Here's a chart using a HexagonLayer and a ScatterplotLayer. It uses either the
light or dark map style, based on which Streamlit theme is currently active:
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>> import pydeck as pdk
>>>
>>> chart_data = pd.DataFrame(
... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],
... columns=['lat', 'lon'])
>>>
>>> st.pydeck_chart(pdk.Deck(
... map_style=None,
... initial_view_state=pdk.ViewState(
... latitude=37.76,
... longitude=-122.4,
... zoom=11,
... pitch=50,
... ),
... layers=[
... pdk.Layer(
... 'HexagonLayer',
... data=chart_data,
... get_position='[lon, lat]',
... radius=200,
... elevation_scale=4,
... elevation_range=[0, 1000],
... pickable=True,
... extruded=True,
... ),
... pdk.Layer(
... 'ScatterplotLayer',
... data=chart_data,
... get_position='[lon, lat]',
... get_color='[200, 30, 0, 160]',
... get_radius=200,
... ),
... ],
... ))
.. output::
https://doc-pydeck-chart.streamlitapp.com/
height: 530px
.. note::
To make the PyDeck chart's style consistent with Streamlit's theme,
you can set ``map_style=None`` in the ``pydeck.Deck`` object.
"""
pydeck_proto = PydeckProto()
marshall(pydeck_proto, pydeck_obj, use_container_width)
return self.dg._enqueue("deck_gl_json_chart", pydeck_proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
def _get_pydeck_tooltip(pydeck_obj: Optional["Deck"]) -> Optional[Dict[str, str]]:
if pydeck_obj is None:
return None
# For pydeck <0.8.1 or pydeck>=0.8.1 when jupyter extra is installed.
desk_widget = getattr(pydeck_obj, "deck_widget", None)
if desk_widget is not None and isinstance(desk_widget.tooltip, dict):
return desk_widget.tooltip
# For pydeck >=0.8.1 when jupyter extra is not installed.
# For details, see: https://github.com/visgl/deck.gl/pull/7125/files
tooltip = getattr(pydeck_obj, "_tooltip", None)
if tooltip is not None and isinstance(tooltip, dict):
return tooltip
return None
def marshall(
pydeck_proto: PydeckProto,
pydeck_obj: Optional["Deck"],
use_container_width: bool,
) -> None:
if pydeck_obj is None:
spec = json.dumps(EMPTY_MAP)
else:
spec = pydeck_obj.to_json()
pydeck_proto.json = spec
pydeck_proto.use_container_width = use_container_width
tooltip = _get_pydeck_tooltip(pydeck_obj)
if tooltip:
pydeck_proto.tooltip = json.dumps(tooltip) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/deck_gl_json_chart.py | 0.866429 | 0.341308 | deck_gl_json_chart.py | pypi |
import sys
from typing import Any
import streamlit as st
from streamlit import code_util, string_util
from streamlit.runtime.metrics_util import gather_metrics
@gather_metrics("experimental_show")
def show(*args: Any) -> None:
"""Write arguments and *argument names* to your app for debugging purposes.
Show() has similar properties to write():
1. You can pass in multiple arguments, all of which will be debugged.
2. It returns None, so it's "slot" in the app cannot be reused.
Note: This is an experimental feature. See
https://docs.streamlit.io/library/advanced-features/prerelease#experimental for more information.
Parameters
----------
*args : any
One or many objects to debug in the App.
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>>
>>> dataframe = pd.DataFrame({
... 'first column': [1, 2, 3, 4],
... 'second column': [10, 20, 30, 40],
... })
>>> st.experimental_show(dataframe)
Notes
-----
This is an experimental feature with usage limitations:
- The method must be called with the name `show`.
- Must be called in one line of code, and only once per line.
- When passing multiple arguments the inclusion of `,` or `)` in a string
argument may cause an error.
"""
if not args:
return
try:
import inspect
# Get the calling line of code
current_frame = inspect.currentframe()
if current_frame is None:
st.warning("`show` not enabled in the shell")
return
# Use two f_back because of telemetry decorator
if current_frame.f_back is not None and current_frame.f_back.f_back is not None:
lines = inspect.getframeinfo(current_frame.f_back.f_back)[3]
else:
lines = None
if not lines:
st.warning("`show` not enabled in the shell")
return
# Parse arguments from the line
line = lines[0].split("show", 1)[1]
inputs = code_util.get_method_args_from_code(args, line)
# Escape markdown and add deltas
for idx, input in enumerate(inputs):
escaped = string_util.escape_markdown(input)
st.markdown("**%s**" % escaped)
st.write(args[idx])
except Exception as raised_exc:
_, exc, exc_tb = sys.exc_info()
if exc is None:
# Presumably, exc should never be None, but it is typed as
# Optional, and I don't know the internals of sys.exc_info() well
# enough to just use a cast here. Hence, the runtime check.
raise RuntimeError(
"Unexpected state: exc was None. If you see this message, "
"please create an issue at "
"https://github.com/streamlit/streamlit/issues"
) from raised_exc
st.exception(exc) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/show.py | 0.538255 | 0.322499 | show.py | pypi |
import hashlib
import json
from typing import TYPE_CHECKING, cast
from typing_extensions import Final
from streamlit.errors import StreamlitAPIException
from streamlit.proto.BokehChart_pb2 import BokehChart as BokehChartProto
from streamlit.runtime.metrics_util import gather_metrics
if TYPE_CHECKING:
from bokeh.plotting.figure import Figure
from streamlit.delta_generator import DeltaGenerator
ST_BOKEH_VERSION: Final = "2.4.3"
class BokehMixin:
@gather_metrics("bokeh_chart")
def bokeh_chart(
self,
figure: "Figure",
use_container_width: bool = False,
) -> "DeltaGenerator":
"""Display an interactive Bokeh chart.
Bokeh is a charting library for Python. The arguments to this function
closely follow the ones for Bokeh's `show` function. You can find
more about Bokeh at https://bokeh.pydata.org.
To show Bokeh charts in Streamlit, call `st.bokeh_chart`
wherever you would call Bokeh's `show`.
Parameters
----------
figure : bokeh.plotting.figure.Figure
A Bokeh figure to plot.
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over Bokeh's native `width` value.
Example
-------
>>> import streamlit as st
>>> from bokeh.plotting import figure
>>>
>>> x = [1, 2, 3, 4, 5]
>>> y = [6, 7, 2, 4, 5]
>>>
>>> p = figure(
... title='simple line example',
... x_axis_label='x',
... y_axis_label='y')
...
>>> p.line(x, y, legend_label='Trend', line_width=2)
>>>
>>> st.bokeh_chart(p, use_container_width=True)
.. output::
https://doc-bokeh-chart.streamlitapp.com/
height: 700px
"""
import bokeh
if bokeh.__version__ != ST_BOKEH_VERSION:
raise StreamlitAPIException(
f"Streamlit only supports Bokeh version {ST_BOKEH_VERSION}, "
f"but you have version {bokeh.__version__} installed. Please "
f"run `pip install --force-reinstall --no-deps bokeh=="
f"{ST_BOKEH_VERSION}` to install the correct version."
)
# Generate element ID from delta path
delta_path = self.dg._get_delta_path_str()
element_id = hashlib.md5(delta_path.encode()).hexdigest()
bokeh_chart_proto = BokehChartProto()
marshall(bokeh_chart_proto, figure, use_container_width, element_id)
return self.dg._enqueue("bokeh_chart", bokeh_chart_proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
def marshall(
proto: BokehChartProto,
figure: "Figure",
use_container_width: bool,
element_id: str,
) -> None:
"""Construct a Bokeh chart object.
See DeltaGenerator.bokeh_chart for docs.
"""
from bokeh.embed import json_item
data = json_item(figure)
proto.figure = json.dumps(data)
proto.use_container_width = use_container_width
proto.element_id = element_id | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/bokeh_chart.py | 0.886439 | 0.412175 | bokeh_chart.py | pypi |
from typing import TYPE_CHECKING, List, Optional, Sequence, Union, cast
from streamlit.deprecation_util import deprecate_func_name
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Block_pb2 import Block as BlockProto
from streamlit.runtime.metrics_util import gather_metrics
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
SpecType = Union[int, Sequence[Union[int, float]]]
class LayoutsMixin:
@gather_metrics("container")
def container(self) -> "DeltaGenerator":
"""Insert a multi-element container.
Inserts an invisible container into your app that can be used to hold
multiple elements. This allows you to, for example, insert multiple
elements into your app out of order.
To add elements to the returned container, you can use "with" notation
(preferred) or just call methods directly on the returned object. See
examples below.
Examples
--------
Inserting elements using "with" notation:
>>> import streamlit as st
>>>
>>> with st.container():
... st.write("This is inside the container")
...
... # You can call any Streamlit command, including custom components:
... st.bar_chart(np.random.randn(50, 3))
...
>>> st.write("This is outside the container")
.. output ::
https://doc-container1.streamlitapp.com/
height: 520px
Inserting elements out of order:
>>> import streamlit as st
>>>
>>> container = st.container()
>>> container.write("This is inside the container")
>>> st.write("This is outside the container")
>>>
>>> # Now insert some more in the container
>>> container.write("This is inside too")
.. output ::
https://doc-container2.streamlitapp.com/
height: 480px
"""
return self.dg._block()
# TODO: Enforce that columns are not nested or in Sidebar
@gather_metrics("columns")
def columns(
self, spec: SpecType, *, gap: Optional[str] = "small"
) -> List["DeltaGenerator"]:
"""Insert containers laid out as side-by-side columns.
Inserts a number of multi-element containers laid out side-by-side and
returns a list of container objects.
To add elements to the returned containers, you can use "with" notation
(preferred) or just call methods directly on the returned object. See
examples below.
.. warning::
Currently, you may not put columns inside another column.
Parameters
----------
spec : int or list of numbers
If an int
Specifies the number of columns to insert, and all columns
have equal width.
If a list of numbers
Creates a column for each number, and each
column's width is proportional to the number provided. Numbers can
be ints or floats, but they must be positive.
For example, `st.columns([3, 1, 2])` creates 3 columns where
the first column is 3 times the width of the second, and the last
column is 2 times that width.
gap : string ("small", "medium", or "large")
An optional string, which indicates the size of the gap between each column.
The default is a small gap between columns. This argument can only be supplied by
keyword.
Returns
-------
list of containers
A list of container objects.
Examples
--------
You can use `with` notation to insert any element into a column:
>>> import streamlit as st
>>>
>>> col1, col2, col3 = st.columns(3)
>>>
>>> with col1:
... st.header("A cat")
... st.image("https://static.streamlit.io/examples/cat.jpg")
...
>>> with col2:
... st.header("A dog")
... st.image("https://static.streamlit.io/examples/dog.jpg")
...
>>> with col3:
... st.header("An owl")
... st.image("https://static.streamlit.io/examples/owl.jpg")
.. output ::
https://doc-columns1.streamlitapp.com/
height: 620px
Or you can just call methods directly in the returned objects:
>>> import streamlit as st
>>> import numpy as np
>>>
>>> col1, col2 = st.columns([3, 1])
>>> data = np.random.randn(10, 1)
>>>
>>> col1.subheader("A wide column with a chart")
>>> col1.line_chart(data)
>>>
>>> col2.subheader("A narrow column with the data")
>>> col2.write(data)
.. output ::
https://doc-columns2.streamlitapp.com/
height: 550px
"""
weights = spec
weights_exception = StreamlitAPIException(
"The input argument to st.columns must be either a "
+ "positive integer or a list of positive numeric weights. "
+ "See [documentation](https://docs.streamlit.io/library/api-reference/layout/st.columns) "
+ "for more information."
)
if isinstance(weights, int):
# If the user provided a single number, expand into equal weights.
# E.g. (1,) * 3 => (1, 1, 1)
# NOTE: A negative/zero spec will expand into an empty tuple.
weights = (1,) * weights
if len(weights) == 0 or any(weight <= 0 for weight in weights):
raise weights_exception
def column_gap(gap):
if type(gap) == str:
gap_size = gap.lower()
valid_sizes = ["small", "medium", "large"]
if gap_size in valid_sizes:
return gap_size
raise StreamlitAPIException(
'The gap argument to st.columns must be "small", "medium", or "large". \n'
f"The argument passed was {gap}."
)
gap_size = column_gap(gap)
def column_proto(normalized_weight: float) -> BlockProto:
col_proto = BlockProto()
col_proto.column.weight = normalized_weight
col_proto.column.gap = gap_size
col_proto.allow_empty = True
return col_proto
block_proto = BlockProto()
block_proto.horizontal.gap = gap_size
row = self.dg._block(block_proto)
total_weight = sum(weights)
return [row._block(column_proto(w / total_weight)) for w in weights]
@gather_metrics("tabs")
def tabs(self, tabs: Sequence[str]) -> Sequence["DeltaGenerator"]:
"""Insert containers separated into tabs.
Inserts a number of multi-element containers as tabs.
Tabs are a navigational element that allows users to easily
move between groups of related content.
To add elements to the returned containers, you can use "with" notation
(preferred) or just call methods directly on the returned object. See
examples below.
.. warning::
All the content of every tab is always sent to and rendered on the frontend.
Conditional rendering is currently not supported.
Parameters
----------
tabs : list of strings
Creates a tab for each string in the list. The first tab is selected by default.
The string is used as the name of the tab and can optionally contain Markdown,
supporting the following elements: Bold, Italics, Strikethroughs, Inline Code,
Emojis, and Links.
Returns
-------
list of containers
A list of container objects.
Examples
--------
You can use `with` notation to insert any element into a tab:
>>> import streamlit as st
>>>
>>> tab1, tab2, tab3 = st.tabs(["Cat", "Dog", "Owl"])
>>>
>>> with tab1:
... st.header("A cat")
... st.image("https://static.streamlit.io/examples/cat.jpg", width=200)
...
>>> with tab2:
... st.header("A dog")
... st.image("https://static.streamlit.io/examples/dog.jpg", width=200)
...
>>> with tab3:
... st.header("An owl")
... st.image("https://static.streamlit.io/examples/owl.jpg", width=200)
.. output ::
https://doc-tabs1.streamlitapp.com/
height: 620px
Or you can just call methods directly in the returned objects:
>>> import streamlit as st
>>> import numpy as np
>>>
>>> tab1, tab2 = st.tabs(["📈 Chart", "🗃 Data"])
>>> data = np.random.randn(10, 1)
>>>
>>> tab1.subheader("A tab with a chart")
>>> tab1.line_chart(data)
>>>
>>> tab2.subheader("A tab with the data")
>>> tab2.write(data)
.. output ::
https://doc-tabs2.streamlitapp.com/
height: 700px
"""
if not tabs:
raise StreamlitAPIException(
"The input argument to st.tabs must contain at least one tab label."
)
if any(isinstance(tab, str) == False for tab in tabs):
raise StreamlitAPIException(
"The tabs input list to st.tabs is only allowed to contain strings."
)
def tab_proto(label: str) -> BlockProto:
tab_proto = BlockProto()
tab_proto.tab.label = label
tab_proto.allow_empty = True
return tab_proto
block_proto = BlockProto()
block_proto.tab_container.SetInParent()
tab_container = self.dg._block(block_proto)
return tuple(tab_container._block(tab_proto(tab_label)) for tab_label in tabs)
@gather_metrics("expander")
def expander(self, label: str, expanded: bool = False) -> "DeltaGenerator":
r"""Insert a multi-element container that can be expanded/collapsed.
Inserts a container into your app that can be used to hold multiple elements
and can be expanded or collapsed by the user. When collapsed, all that is
visible is the provided label.
To add elements to the returned container, you can use "with" notation
(preferred) or just call methods directly on the returned object. See
examples below.
.. warning::
Currently, you may not put expanders inside another expander.
Parameters
----------
label : str
A string to use as the header for the expander. The label can optionally
contain Markdown and supports the following elements: Bold, Italics,
Strikethroughs, Inline Code, Emojis, and Links.
expanded : bool
If True, initializes the expander in "expanded" state. Defaults to
False (collapsed).
Examples
--------
You can use `with` notation to insert any element into an expander
>>> import streamlit as st
>>>
>>> st.bar_chart({"data": [1, 5, 2, 6, 2, 1]})
>>>
>>> with st.expander("See explanation"):
... st.write(\"\"\"
... The chart above shows some numbers I picked for you.
... I rolled actual dice for these, so they're *guaranteed* to
... be random.
... \"\"\")
... st.image("https://static.streamlit.io/examples/dice.jpg")
.. output ::
https://doc-expander.streamlitapp.com/
height: 750px
Or you can just call methods directly in the returned objects:
>>> import streamlit as st
>>>
>>> st.bar_chart({"data": [1, 5, 2, 6, 2, 1]})
>>>
>>> expander = st.expander("See explanation")
>>> expander.write(\"\"\"
... The chart above shows some numbers I picked for you.
... I rolled actual dice for these, so they're *guaranteed* to
... be random.
... \"\"\")
>>> expander.image("https://static.streamlit.io/examples/dice.jpg")
.. output ::
https://doc-expander.streamlitapp.com/
height: 750px
"""
if label is None:
raise StreamlitAPIException("A label is required for an expander")
expandable_proto = BlockProto.Expandable()
expandable_proto.expanded = expanded
expandable_proto.label = label
block_proto = BlockProto()
block_proto.allow_empty = True
block_proto.expandable.CopyFrom(expandable_proto)
return self.dg._block(block_proto=block_proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
# Deprecated beta_ functions
beta_container = deprecate_func_name(container, "beta_container", "2021-11-02")
beta_expander = deprecate_func_name(expander, "beta_expander", "2021-11-02")
beta_columns = deprecate_func_name(columns, "beta_columns", "2021-11-02") | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/layouts.py | 0.914184 | 0.527621 | layouts.py | pypi |
import json
from typing import TYPE_CHECKING, Any, Dict, Optional, Union, cast
from typing_extensions import Final, Literal
import streamlit.elements.lib.dicttools as dicttools
from streamlit.elements import arrow
from streamlit.elements.arrow import Data
from streamlit.errors import StreamlitAPIException
from streamlit.logger import get_logger
from streamlit.proto.ArrowVegaLiteChart_pb2 import (
ArrowVegaLiteChart as ArrowVegaLiteChartProto,
)
from streamlit.runtime.metrics_util import gather_metrics
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
LOGGER: Final = get_logger(__name__)
class ArrowVegaLiteMixin:
@gather_metrics("_arrow_vega_lite_chart")
def _arrow_vega_lite_chart(
self,
data: Data = None,
spec: Optional[Dict[str, Any]] = None,
use_container_width: bool = False,
theme: Union[None, Literal["streamlit"]] = "streamlit",
**kwargs: Any,
) -> "DeltaGenerator":
"""Display a chart using the Vega-Lite library.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, pyspark.sql.DataFrame, snowflake.snowpark.DataFrame, Iterable, dict, or None
Either the data to be plotted or a Vega-Lite spec containing the
data (which more closely follows the Vega-Lite API).
spec : dict or None
The Vega-Lite spec for the chart. If the spec was already passed in
the previous argument, this must be set to None. See
https://vega.github.io/vega-lite/docs/ for more info.
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over Vega-Lite's native `width` value.
theme : "streamlit" or None
The theme of the chart. Currently, we only support "streamlit" for the Streamlit
defined design or None to fallback to the default behavior of the library.
**kwargs : any
Same as spec, but as keywords.
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>>
>>> df = pd.DataFrame(
... np.random.randn(200, 3),
... columns=['a', 'b', 'c'])
>>>
>>> st._arrow_vega_lite_chart(df, {
... 'mark': {'type': 'circle', 'tooltip': True},
... 'encoding': {
... 'x': {'field': 'a', 'type': 'quantitative'},
... 'y': {'field': 'b', 'type': 'quantitative'},
... 'size': {'field': 'c', 'type': 'quantitative'},
... 'color': {'field': 'c', 'type': 'quantitative'},
... },
... })
Examples of Vega-Lite usage without Streamlit can be found at
https://vega.github.io/vega-lite/examples/. Most of those can be easily
translated to the syntax shown above.
"""
if theme != "streamlit" and theme != None:
raise StreamlitAPIException(
f'You set theme="{theme}" while Streamlit charts only support theme=”streamlit” or theme=None to fallback to the default library theme.'
)
proto = ArrowVegaLiteChartProto()
marshall(
proto,
data,
spec,
use_container_width=use_container_width,
theme=theme,
**kwargs,
)
return self.dg._enqueue("arrow_vega_lite_chart", proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
def marshall(
proto: ArrowVegaLiteChartProto,
data: Data = None,
spec: Optional[Dict[str, Any]] = None,
use_container_width: bool = False,
theme: Union[None, Literal["streamlit"]] = "streamlit",
**kwargs,
):
"""Construct a Vega-Lite chart object.
See DeltaGenerator.vega_lite_chart for docs.
"""
# Support passing data inside spec['datasets'] and spec['data'].
# (The data gets pulled out of the spec dict later on.)
if isinstance(data, dict) and spec is None:
spec = data
data = None
# Support passing no spec arg, but filling it with kwargs.
# Example:
# marshall(proto, baz='boz')
if spec is None:
spec = dict()
else:
# Clone the spec dict, since we may be mutating it.
spec = dict(spec)
# Support passing in kwargs. Example:
# marshall(proto, {foo: 'bar'}, baz='boz')
if len(kwargs):
# Merge spec with unflattened kwargs, where kwargs take precedence.
# This only works for string keys, but kwarg keys are strings anyways.
spec = dict(spec, **dicttools.unflatten(kwargs, _CHANNELS))
if len(spec) == 0:
raise ValueError("Vega-Lite charts require a non-empty spec dict.")
if "autosize" not in spec:
spec["autosize"] = {"type": "fit", "contains": "padding"}
# Pull data out of spec dict when it's in a 'datasets' key:
# marshall(proto, {datasets: {foo: df1, bar: df2}, ...})
if "datasets" in spec:
for k, v in spec["datasets"].items():
dataset = proto.datasets.add()
dataset.name = str(k)
dataset.has_name = True
arrow.marshall(dataset.data, v)
del spec["datasets"]
# Pull data out of spec dict when it's in a top-level 'data' key:
# marshall(proto, {data: df})
# marshall(proto, {data: {values: df, ...}})
# marshall(proto, {data: {url: 'url'}})
# marshall(proto, {data: {name: 'foo'}})
if "data" in spec:
data_spec = spec["data"]
if isinstance(data_spec, dict):
if "values" in data_spec:
data = data_spec["values"]
del spec["data"]
else:
data = data_spec
del spec["data"]
proto.spec = json.dumps(spec)
proto.use_container_width = use_container_width
proto.theme = theme or ""
if data is not None:
arrow.marshall(proto.data, data)
# See https://vega.github.io/vega-lite/docs/encoding.html
_CHANNELS = {
"x",
"y",
"x2",
"y2",
"xError",
"yError2",
"xError",
"yError2",
"longitude",
"latitude",
"color",
"opacity",
"fillOpacity",
"strokeOpacity",
"strokeWidth",
"size",
"shape",
"text",
"tooltip",
"href",
"key",
"order",
"detail",
"facet",
"row",
"column",
} | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/arrow_vega_lite.py | 0.848046 | 0.251912 | arrow_vega_lite.py | pypi |
from datetime import date
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
import altair as alt
import pandas as pd
from altair.vegalite.v4.api import Chart
from pandas.api.types import infer_dtype, is_integer_dtype
from typing_extensions import Literal
import streamlit.elements.arrow_vega_lite as arrow_vega_lite
from streamlit import type_util
from streamlit.elements.arrow import Data
from streamlit.elements.utils import last_index_for_melted_dataframes
from streamlit.errors import StreamlitAPIException
from streamlit.proto.ArrowVegaLiteChart_pb2 import (
ArrowVegaLiteChart as ArrowVegaLiteChartProto,
)
from streamlit.runtime.metrics_util import gather_metrics
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
# Create and enable streamlit theme
STREAMLIT_THEME = {"embedOptions": {"theme": "streamlit"}}
# This allows to use alt.themes.enable("streamlit") to activate Streamlit theme.
alt.themes.register("streamlit", lambda: {"usermeta": STREAMLIT_THEME})
# no theme applied to charts
alt.themes.enable("none")
class ChartType(Enum):
AREA = "area"
BAR = "bar"
LINE = "line"
class ArrowAltairMixin:
@gather_metrics("_arrow_line_chart")
def _arrow_line_chart(
self,
data: Data = None,
*,
x: Union[str, None] = None,
y: Union[str, Sequence[str], None] = None,
width: int = 0,
height: int = 0,
use_container_width: bool = True,
) -> "DeltaGenerator":
"""Display a line chart.
This is syntax-sugar around st._arrow_altair_chart. The main difference
is this command uses the data's own column and indices to figure out
the chart's spec. As a result this is easier to use for many "just plot
this" scenarios, while being less customizable.
If st._arrow_line_chart does not guess the data specification
correctly, try specifying your desired chart using st._arrow_altair_chart.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, Iterable, dict or None
Data to be plotted.
x : str or None
Column name to use for the x-axis. If None, uses the data index for the x-axis.
This argument can only be supplied by keyword.
y : str, sequence of str, or None
Column name(s) to use for the y-axis. If a sequence of strings, draws several series
on the same chart by melting your wide-format table into a long-format table behind
the scenes. If None, draws the data of all remaining columns as data series.
This argument can only be supplied by keyword.
width : int
The chart width in pixels. If 0, selects the width automatically.
This argument can only be supplied by keyword.
height : int
The chart height in pixels. If 0, selects the height automatically.
This argument can only be supplied by keyword.
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over the width argument.
This argument can only be supplied by keyword.
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>>
>>> chart_data = pd.DataFrame(
... np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
...
>>> st._arrow_line_chart(chart_data)
.. output::
https://static.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8
height: 220px
"""
proto = ArrowVegaLiteChartProto()
chart = _generate_chart(ChartType.LINE, data, x, y, width, height)
marshall(proto, chart, use_container_width, theme="streamlit")
last_index = last_index_for_melted_dataframes(data)
return self.dg._enqueue("arrow_line_chart", proto, last_index=last_index)
@gather_metrics("_arrow_area_chart")
def _arrow_area_chart(
self,
data: Data = None,
*,
x: Union[str, None] = None,
y: Union[str, Sequence[str], None] = None,
width: int = 0,
height: int = 0,
use_container_width: bool = True,
) -> "DeltaGenerator":
"""Display an area chart.
This is just syntax-sugar around st._arrow_altair_chart. The main difference
is this command uses the data's own column and indices to figure out
the chart's spec. As a result this is easier to use for many "just plot
this" scenarios, while being less customizable.
If st._arrow_area_chart does not guess the data specification
correctly, try specifying your desired chart using st._arrow_altair_chart.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, Iterable, or dict
Data to be plotted.
x : str or None
Column name to use for the x-axis. If None, uses the data index for the x-axis.
This argument can only be supplied by keyword.
y : str, sequence of str, or None
Column name(s) to use for the y-axis. If a sequence of strings, draws several series
on the same chart by melting your wide-format table into a long-format table behind
the scenes. If None, draws the data of all remaining columns as data series.
This argument can only be supplied by keyword.
width : int
The chart width in pixels. If 0, selects the width automatically.
This argument can only be supplied by keyword.
height : int
The chart height in pixels. If 0, selects the height automatically.
This argument can only be supplied by keyword.
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over the width argument.
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>>
>>> chart_data = pd.DataFrame(
... np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
...
>>> st._arrow_area_chart(chart_data)
.. output::
https://static.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt
height: 220px
"""
proto = ArrowVegaLiteChartProto()
chart = _generate_chart(ChartType.AREA, data, x, y, width, height)
marshall(proto, chart, use_container_width, theme="streamlit")
last_index = last_index_for_melted_dataframes(data)
return self.dg._enqueue("arrow_area_chart", proto, last_index=last_index)
@gather_metrics("_arrow_bar_chart")
def _arrow_bar_chart(
self,
data: Data = None,
*,
x: Union[str, None] = None,
y: Union[str, Sequence[str], None] = None,
width: int = 0,
height: int = 0,
use_container_width: bool = True,
) -> "DeltaGenerator":
"""Display a bar chart.
This is just syntax-sugar around st._arrow_altair_chart. The main difference
is this command uses the data's own column and indices to figure out
the chart's spec. As a result this is easier to use for many "just plot
this" scenarios, while being less customizable.
If st._arrow_bar_chart does not guess the data specification
correctly, try specifying your desired chart using st._arrow_altair_chart.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, Iterable, or dict
Data to be plotted.
x : str or None
Column name to use for the x-axis. If None, uses the data index for the x-axis.
This argument can only be supplied by keyword.
y : str, sequence of str, or None
Column name(s) to use for the y-axis. If a sequence of strings, draws several series
on the same chart by melting your wide-format table into a long-format table behind
the scenes. If None, draws the data of all remaining columns as data series.
This argument can only be supplied by keyword.
width : int
The chart width in pixels. If 0, selects the width automatically.
This argument can only be supplied by keyword.
height : int
The chart height in pixels. If 0, selects the height automatically.
This argument can only be supplied by keyword.
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over the width argument.
This argument can only be supplied by keyword.
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>>
>>> chart_data = pd.DataFrame(
... np.random.randn(50, 3),
... columns=["a", "b", "c"])
...
>>> st._arrow_bar_chart(chart_data)
.. output::
https://static.streamlit.io/0.66.0-2BLtg/index.html?id=GaYDn6vxskvBUkBwsGVEaL
height: 220px
"""
proto = ArrowVegaLiteChartProto()
chart = _generate_chart(ChartType.BAR, data, x, y, width, height)
marshall(proto, chart, use_container_width, theme="streamlit")
last_index = last_index_for_melted_dataframes(data)
return self.dg._enqueue("arrow_bar_chart", proto, last_index=last_index)
@gather_metrics("_arrow_altair_chart")
def _arrow_altair_chart(
self,
altair_chart: Chart,
use_container_width: bool = False,
theme: Union[None, Literal["streamlit"]] = "streamlit",
) -> "DeltaGenerator":
"""Display a chart using the Altair library.
Parameters
----------
altair_chart : altair.vegalite.v2.api.Chart
The Altair chart object to display.
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over Altair's native `width` value.
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>> import altair as alt
>>>
>>> df = pd.DataFrame(
... np.random.randn(200, 3),
... columns=['a', 'b', 'c'])
...
>>> c = alt.Chart(df).mark_circle().encode(
... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])
>>>
>>> st._arrow_altair_chart(c, use_container_width=True)
.. output::
https://static.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5
height: 200px
Examples of Altair charts can be found at
https://altair-viz.github.io/gallery/.
"""
if theme != "streamlit" and theme != None:
raise StreamlitAPIException(
f'You set theme="{theme}" while Streamlit charts only support theme=”streamlit” or theme=None to fallback to the default library theme.'
)
proto = ArrowVegaLiteChartProto()
marshall(
proto,
altair_chart,
use_container_width=use_container_width,
theme=theme,
)
return self.dg._enqueue("arrow_vega_lite_chart", proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
def _is_date_column(df: pd.DataFrame, name: str) -> bool:
"""True if the column with the given name stores datetime.date values.
This function just checks the first value in the given column, so
it's meaningful only for columns whose values all share the same type.
Parameters
----------
df : pd.DataFrame
name : str
The column name
Returns
-------
bool
"""
column = df[name]
if column.size == 0:
return False
return isinstance(column.iloc[0], date)
def _melt_data(
data_df: pd.DataFrame,
x_column: str,
y_column: str,
color_column: str,
value_columns: Optional[List[str]] = None,
) -> pd.DataFrame:
"""Converts a wide-format dataframe to a long-format dataframe."""
data_df = pd.melt(
data_df,
id_vars=[x_column],
value_vars=value_columns,
var_name=color_column,
value_name=y_column,
)
y_series = data_df[y_column]
if (
y_series.dtype == "object"
and "mixed" in infer_dtype(y_series)
and len(y_series.unique()) > 100
):
raise StreamlitAPIException(
"The columns used for rendering the chart contain too many values with mixed types. Please select the columns manually via the y parameter."
)
# Arrow has problems with object types after melting two different dtypes
# pyarrow.lib.ArrowTypeError: "Expected a <TYPE> object, got a object"
data_df = type_util.fix_arrow_incompatible_column_types(
data_df, selected_columns=[x_column, color_column, y_column]
)
return data_df
def _maybe_melt(
data_df: pd.DataFrame,
x: Union[str, None] = None,
y: Union[str, Sequence[str], None] = None,
) -> Tuple[pd.DataFrame, str, str, str, str, Optional[str], Optional[str]]:
"""Determines based on the selected x & y parameter, if the data needs to
be converted to a long-format dataframe. If so, it returns the melted dataframe
and the x, y, and color columns used for rendering the chart.
"""
color_column: Optional[str]
# This has to contain an empty space, otherwise the
# full y-axis disappears (maybe a bug in vega-lite)?
color_title: Optional[str] = " "
y_column = "value"
y_title = ""
if x and isinstance(x, str):
# x is a single string -> use for x-axis
x_column = x
x_title = x
if x_column not in data_df.columns:
raise StreamlitAPIException(
f"{x_column} (x parameter) was not found in the data columns or keys”."
)
else:
# use index for x-axis
x_column = data_df.index.name or "index"
x_title = ""
data_df = data_df.reset_index()
if y and type_util.is_sequence(y) and len(y) == 1:
# Sequence is only a single element
y = str(y[0])
if y and isinstance(y, str):
# y is a single string -> use for y-axis
y_column = y
y_title = y
if y_column not in data_df.columns:
raise StreamlitAPIException(
f"{y_column} (y parameter) was not found in the data columns or keys”."
)
# Set var name to None since it should not be used
color_column = None
elif y and type_util.is_sequence(y):
color_column = "variable"
# y is a list -> melt dataframe into value vars provided in y
value_columns: List[str] = []
for col in y:
if str(col) not in data_df.columns:
raise StreamlitAPIException(
f"{str(col)} in y parameter was not found in the data columns or keys”."
)
value_columns.append(str(col))
if x_column in [y_column, color_column]:
raise StreamlitAPIException(
f"Unable to melt the table. Please rename the columns used for x ({x_column}) or y ({y_column})."
)
data_df = _melt_data(data_df, x_column, y_column, color_column, value_columns)
else:
color_column = "variable"
# -> data will be melted into the value prop for y
data_df = _melt_data(data_df, x_column, y_column, color_column)
relevant_columns = []
if x_column and x_column not in relevant_columns:
relevant_columns.append(x_column)
if color_column and color_column not in relevant_columns:
relevant_columns.append(color_column)
if y_column and y_column not in relevant_columns:
relevant_columns.append(y_column)
# Only select the relevant columns required for the chart
# Other columns can be ignored
data_df = data_df[relevant_columns]
return data_df, x_column, x_title, y_column, y_title, color_column, color_title
def _generate_chart(
chart_type: ChartType,
data: Data,
x: Union[str, None] = None,
y: Union[str, Sequence[str], None] = None,
width: int = 0,
height: int = 0,
) -> Chart:
"""Function to use the chart's type, data columns and indices to figure out the chart's spec."""
if data is None:
# Use an empty-ish dict because if we use None the x axis labels rotate
# 90 degrees. No idea why. Need to debug.
data = {"": []}
if not isinstance(data, pd.DataFrame):
data = type_util.convert_anything_to_df(data)
data, x_column, x_title, y_column, y_title, color_column, color_title = _maybe_melt(
data, x, y
)
opacity = None
if chart_type == ChartType.AREA and color_column:
opacity = {y_column: 0.7}
# Set the X and Y axes' scale to "utc" if they contain date values.
# This causes time data to be displayed in UTC, rather the user's local
# time zone. (By default, vega-lite displays time data in the browser's
# local time zone, regardless of which time zone the data specifies:
# https://vega.github.io/vega-lite/docs/timeunit.html#output).
x_scale = (
alt.Scale(type="utc") if _is_date_column(data, x_column) else alt.Undefined
)
y_scale = (
alt.Scale(type="utc") if _is_date_column(data, y_column) else alt.Undefined
)
x_type = alt.Undefined
# Bar charts should have a discrete (ordinal) x-axis, UNLESS type is date/time
# https://github.com/streamlit/streamlit/pull/2097#issuecomment-714802475
if chart_type == ChartType.BAR and not _is_date_column(data, x_column):
x_type = "ordinal"
# Use a max tick size of 1 for integer columns (prevents zoom into float numbers)
# and deactivate grid lines for x-axis
x_axis_config = alt.Axis(
tickMinStep=1 if is_integer_dtype(data[x_column]) else alt.Undefined, grid=False
)
y_axis_config = alt.Axis(
tickMinStep=1 if is_integer_dtype(data[y_column]) else alt.Undefined
)
tooltips = [
alt.Tooltip(x_column, title=x_column),
alt.Tooltip(y_column, title=y_column),
]
color = None
if color_column:
color = alt.Color(
color_column,
title=color_title,
type="nominal",
legend=alt.Legend(titlePadding=0, offset=10, orient="bottom"),
)
tooltips.append(alt.Tooltip(color_column, title="label"))
chart = getattr(
alt.Chart(data, width=width, height=height),
"mark_" + chart_type.value,
)().encode(
x=alt.X(
x_column,
title=x_title,
scale=x_scale,
type=x_type,
axis=x_axis_config,
),
y=alt.Y(y_column, title=y_title, scale=y_scale, axis=y_axis_config),
tooltip=tooltips,
)
if color:
chart = chart.encode(color=color)
if opacity:
chart = chart.encode(opacity=opacity)
return chart.interactive()
def marshall(
vega_lite_chart: ArrowVegaLiteChartProto,
altair_chart: Chart,
use_container_width: bool = False,
theme: Union[None, Literal["streamlit"]] = "streamlit",
**kwargs: Any,
) -> None:
"""Marshall chart's data into proto."""
import altair as alt
# Normally altair_chart.to_dict() would transform the dataframe used by the
# chart into an array of dictionaries. To avoid that, we install a
# transformer that replaces datasets with a reference by the object id of
# the dataframe. We then fill in the dataset manually later on.
datasets = {}
def id_transform(data) -> Dict[str, str]:
"""Altair data transformer that returns a fake named dataset with the
object id.
"""
datasets[id(data)] = data
return {"name": str(id(data))}
alt.data_transformers.register("id", id_transform)
with alt.data_transformers.enable("id"):
chart_dict = altair_chart.to_dict()
# Put datasets back into the chart dict but note how they weren't
# transformed.
chart_dict["datasets"] = datasets
arrow_vega_lite.marshall(
vega_lite_chart,
chart_dict,
use_container_width=use_container_width,
theme=theme,
**kwargs,
) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/arrow_altair.py | 0.918991 | 0.287268 | arrow_altair.py | pypi |
from typing import TYPE_CHECKING, Union, cast
from typing_extensions import TypeAlias
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Progress_pb2 import Progress as ProgressProto
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
# Currently, equates to just float, but we can't use `numbers.Real` due to
# https://github.com/python/mypy/issues/3186
FloatOrInt: TypeAlias = Union[int, float]
class ProgressMixin:
def progress(self, value: FloatOrInt) -> "DeltaGenerator":
"""Display a progress bar.
Parameters
----------
value : int or float
0 <= value <= 100 for int
0.0 <= value <= 1.0 for float
Example
-------
Here is an example of a progress bar increasing over time:
>>> import streamlit as st
>>> import time
>>>
>>> my_bar = st.progress(0)
>>>
>>> for percent_complete in range(100):
... time.sleep(0.1)
... my_bar.progress(percent_complete + 1)
"""
# TODO: standardize numerical type checking across st.* functions.
progress_proto = ProgressProto()
if isinstance(value, int):
if 0 <= value <= 100:
progress_proto.value = value
else:
raise StreamlitAPIException(
"Progress Value has invalid value [0, 100]: %d" % value
)
elif isinstance(value, float):
if 0.0 <= value <= 1.0:
progress_proto.value = int(value * 100)
else:
raise StreamlitAPIException(
"Progress Value has invalid value [0.0, 1.0]: %f" % value
)
else:
raise StreamlitAPIException(
"Progress Value has invalid type: %s" % type(value).__name__
)
return self.dg._enqueue("progress", progress_proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/progress.py | 0.857022 | 0.265589 | progress.py | pypi |
from dataclasses import dataclass
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Generic,
List,
Optional,
Sequence,
Union,
cast,
overload,
)
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.MultiSelect_pb2 import MultiSelect as MultiSelectProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.type_util import (
Key,
LabelVisibility,
OptionSequence,
T,
ensure_indexable,
is_iterable,
is_type,
maybe_raise_label_warnings,
to_key,
)
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
@overload
def _check_and_convert_to_indices( # type: ignore[misc]
opt: Sequence[Any], default_values: None
) -> Optional[List[int]]:
...
@overload
def _check_and_convert_to_indices(
opt: Sequence[Any], default_values: Union[Sequence[Any], Any]
) -> List[int]:
...
def _check_and_convert_to_indices(
opt: Sequence[Any], default_values: Union[Sequence[Any], Any, None]
) -> Optional[List[int]]:
"""Perform validation checks and return indices based on the default values."""
if default_values is None and None not in opt:
return None
if not isinstance(default_values, list):
# This if is done before others because calling if not x (done
# right below) when x is of type pd.Series() or np.array() throws a
# ValueError exception.
if is_type(default_values, "numpy.ndarray") or is_type(
default_values, "pandas.core.series.Series"
):
default_values = list(cast(Sequence[Any], default_values))
elif not default_values or default_values in opt:
default_values = [default_values]
else:
default_values = list(default_values)
for value in default_values:
if value not in opt:
raise StreamlitAPIException(
"Every Multiselect default value must exist in options"
)
return [opt.index(value) for value in default_values]
def _get_default_count(default: Union[Sequence[Any], Any, None]) -> int:
if default is None:
return 0
if not is_iterable(default):
return 1
return len(cast(Sequence[Any], default))
def _get_over_max_options_message(current_selections: int, max_selections: int):
curr_selections_noun = "option" if current_selections == 1 else "options"
max_selections_noun = "option" if max_selections == 1 else "options"
return f"""
Multiselect has {current_selections} {curr_selections_noun} selected but `max_selections`
is set to {max_selections}. This happened because you either gave too many options to `default`
or you manipulated the widget's state through `st.session_state`. Note that
the latter can happen before the line indicated in the traceback.
Please select at most {max_selections} {max_selections_noun}.
"""
@dataclass
class MultiSelectSerde(Generic[T]):
options: Sequence[T]
default_value: List[int]
def serialize(self, value: List[T]) -> List[int]:
return _check_and_convert_to_indices(self.options, value)
def deserialize(
self,
ui_value: Optional[List[int]],
widget_id: str = "",
) -> List[T]:
current_value: List[int] = (
ui_value if ui_value is not None else self.default_value
)
return [self.options[i] for i in current_value]
class MultiSelectMixin:
@gather_metrics("multiselect")
def multiselect(
self,
label: str,
options: OptionSequence[T],
default: Optional[Any] = None,
format_func: Callable[[Any], Any] = str,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
max_selections: Optional[int] = None,
) -> List[T]:
"""Display a multiselect widget.
The multiselect widget starts as empty.
Parameters
----------
label : str
A short label explaining to the user what this select widget is for.
The label can optionally contain Markdown and supports the following
elements: Bold, Italics, Strikethroughs, Inline Code, Emojis, and Links.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
For accessibility reasons, you should never set an empty label (label="")
but hide it with label_visibility if needed. In the future, we may disallow
empty labels by raising an exception.
options : Sequence[V], numpy.ndarray, pandas.Series, pandas.DataFrame, or pandas.Index
Labels for the select options. This will be cast to str internally
by default. For pandas.DataFrame, the first column is selected.
default: [V], V, or None
List of default values. Can also be a single value.
format_func : function
Function to modify the display of selectbox options. It receives
the raw option as an argument and should output the label to be
shown for that option. This has no impact on the return value of
the multiselect.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. Multiple widgets of the same type may
not share the same key.
help : str
An optional tooltip that gets displayed next to the multiselect.
on_change : callable
An optional callback invoked when this multiselect's value changes.
args : tuple
An optional tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean, which disables the multiselect widget if set
to True. The default is False. This argument can only be supplied
by keyword.
label_visibility : "visible" or "hidden" or "collapsed"
The visibility of the label. If "hidden", the label doesn't show but there
is still empty space for it above the widget (equivalent to label="").
If "collapsed", both the label and the space are removed. Default is
"visible". This argument can only be supplied by keyword.
max_selections : int
The max selections that can be selected at a time.
This argument can only be supplied by keyword.
Returns
-------
list
A list with the selected options
Example
-------
>>> import streamlit as st
>>>
>>> options = st.multiselect(
... 'What are your favorite colors',
... ['Green', 'Yellow', 'Red', 'Blue'],
... ['Yellow', 'Red'])
>>>
>>> st.write('You selected:', options)
.. output::
https://doc-multiselect.streamlitapp.com/
height: 420px
"""
ctx = get_script_run_ctx()
return self._multiselect(
label=label,
options=options,
default=default,
format_func=format_func,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
label_visibility=label_visibility,
ctx=ctx,
max_selections=max_selections,
)
def _multiselect(
self,
label: str,
options: OptionSequence[T],
default: Union[Sequence[Any], Any, None] = None,
format_func: Callable[[Any], Any] = str,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
ctx: Optional[ScriptRunContext] = None,
max_selections: Optional[int] = None,
) -> List[T]:
key = to_key(key)
check_callback_rules(self.dg, on_change)
check_session_state_rules(default_value=default, key=key)
opt = ensure_indexable(options)
maybe_raise_label_warnings(label, label_visibility)
indices = _check_and_convert_to_indices(opt, default)
multiselect_proto = MultiSelectProto()
multiselect_proto.label = label
default_value: List[int] = [] if indices is None else indices
multiselect_proto.default[:] = default_value
multiselect_proto.options[:] = [str(format_func(option)) for option in opt]
multiselect_proto.form_id = current_form_id(self.dg)
multiselect_proto.max_selections = max_selections or 0
if help is not None:
multiselect_proto.help = dedent(help)
serde = MultiSelectSerde(opt, default_value)
widget_state = register_widget(
"multiselect",
multiselect_proto,
user_key=key,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
)
default_count = _get_default_count(widget_state.value)
if max_selections and default_count > max_selections:
raise StreamlitAPIException(
_get_over_max_options_message(default_count, max_selections)
)
# This needs to be done after register_widget because we don't want
# the following proto fields to affect a widget's ID.
multiselect_proto.disabled = disabled
multiselect_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if widget_state.value_changed:
multiselect_proto.value[:] = serde.serialize(widget_state.value)
multiselect_proto.set_value = True
self.dg._enqueue("multiselect", multiselect_proto)
return widget_state.value
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/multiselect.py | 0.916871 | 0.346804 | multiselect.py | pypi |
from typing import TYPE_CHECKING, Optional, cast
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Alert_pb2 import Alert as AlertProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.string_util import clean_text, is_emoji
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
from streamlit.type_util import SupportsStr
def validate_emoji(maybe_emoji: Optional[str]) -> str:
if maybe_emoji is None:
return ""
elif is_emoji(maybe_emoji):
return maybe_emoji
else:
raise StreamlitAPIException(
f'The value "{maybe_emoji}" is not a valid emoji. Shortcodes are not allowed, please use a single character instead.'
)
class AlertMixin:
@gather_metrics("error")
def error(
self,
body: "SupportsStr",
*, # keyword-only args:
icon: Optional[str] = None,
) -> "DeltaGenerator":
"""Display error message.
Parameters
----------
body : str
The error text to display.
icon : str or None
An optional, keyword-only argument that specifies an emoji to use as
the icon for the alert. Shortcodes are not allowed, please use a
single character instead. E.g. "🚨", "🔥", "🤖", etc.
Defaults to None, which means no icon is displayed.
Example
-------
>>> import streamlit as st
>>>
>>> st.error('This is an error', icon="🚨")
"""
alert_proto = AlertProto()
alert_proto.icon = validate_emoji(icon)
alert_proto.body = clean_text(body)
alert_proto.format = AlertProto.ERROR
return self.dg._enqueue("alert", alert_proto)
@gather_metrics("warning")
def warning(
self,
body: "SupportsStr",
*, # keyword-only args:
icon: Optional[str] = None,
) -> "DeltaGenerator":
"""Display warning message.
Parameters
----------
body : str
The warning text to display.
icon : str or None
An optional, keyword-only argument that specifies an emoji to use as
the icon for the alert. Shortcodes are not allowed, please use a
single character instead. E.g. "🚨", "🔥", "🤖", etc.
Defaults to None, which means no icon is displayed.
Example
-------
>>> import streamlit as st
>>>
>>> st.warning('This is a warning', icon="⚠️")
"""
alert_proto = AlertProto()
alert_proto.body = clean_text(body)
alert_proto.icon = validate_emoji(icon)
alert_proto.format = AlertProto.WARNING
return self.dg._enqueue("alert", alert_proto)
@gather_metrics("info")
def info(
self,
body: "SupportsStr",
*, # keyword-only args:
icon: Optional[str] = None,
) -> "DeltaGenerator":
"""Display an informational message.
Parameters
----------
body : str
The info text to display.
icon : str or None
An optional, keyword-only argument that specifies an emoji to use as
the icon for the alert. Shortcodes are not allowed, please use a
single character instead. E.g. "🚨", "🔥", "🤖", etc.
Defaults to None, which means no icon is displayed.
Example
-------
>>> import streamlit as st
>>>
>>> st.info('This is a purely informational message', icon="ℹ️")
"""
alert_proto = AlertProto()
alert_proto.body = clean_text(body)
alert_proto.icon = validate_emoji(icon)
alert_proto.format = AlertProto.INFO
return self.dg._enqueue("alert", alert_proto)
@gather_metrics("success")
def success(
self,
body: "SupportsStr",
*, # keyword-only args:
icon: Optional[str] = None,
) -> "DeltaGenerator":
"""Display a success message.
Parameters
----------
body : str
The success text to display.
icon : str or None
An optional, keyword-only argument that specifies an emoji to use as
the icon for the alert. Shortcodes are not allowed, please use a
single character instead. E.g. "🚨", "🔥", "🤖", etc.
Defaults to None, which means no icon is displayed.
Example
-------
>>> import streamlit as st
>>>
>>> st.success('This is a success message!', icon="✅")
"""
alert_proto = AlertProto()
alert_proto.body = clean_text(body)
alert_proto.icon = validate_emoji(icon)
alert_proto.format = AlertProto.SUCCESS
return self.dg._enqueue("alert", alert_proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/alert.py | 0.92779 | 0.176459 | alert.py | pypi |
from typing import TYPE_CHECKING, Optional, cast
from streamlit.proto.Heading_pb2 import Heading as HeadingProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.string_util import clean_text
from streamlit.type_util import SupportsStr
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
class HeadingMixin:
@gather_metrics("header")
def header(
self, body: SupportsStr, anchor: Optional[str] = None
) -> "DeltaGenerator":
"""Display text in header formatting.
Parameters
----------
body : str
The text to display as Github-flavored Markdown. Syntax
information can be found at: https://github.github.com/gfm.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
anchor : str
The anchor name of the header that can be accessed with #anchor
in the URL. If omitted, it generates an anchor using the body.
Examples
--------
>>> import streamlit as st
>>>
>>> st.header('This is a header')
>>> st.header('A header with _italics_ :blue[colors] and emojis :sunglasses:')
"""
header_proto = HeadingProto()
if anchor is not None:
header_proto.anchor = anchor
header_proto.body = clean_text(body)
header_proto.tag = "h2"
return self.dg._enqueue("heading", header_proto)
@gather_metrics("subheader")
def subheader(
self, body: SupportsStr, anchor: Optional[str] = None
) -> "DeltaGenerator":
"""Display text in subheader formatting.
Parameters
----------
body : str
The text to display as Github-flavored Markdown. Syntax
information can be found at: https://github.github.com/gfm.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
anchor : str
The anchor name of the header that can be accessed with #anchor
in the URL. If omitted, it generates an anchor using the body.
Examples
--------
>>> import streamlit as st
>>>
>>> st.subheader('This is a subheader')
>>> st.subheader('A subheader with _italics_ :blue[colors] and emojis :sunglasses:')
"""
subheader_proto = HeadingProto()
if anchor is not None:
subheader_proto.anchor = anchor
subheader_proto.body = clean_text(body)
subheader_proto.tag = "h3"
return self.dg._enqueue("heading", subheader_proto)
@gather_metrics("title")
def title(
self, body: SupportsStr, anchor: Optional[str] = None
) -> "DeltaGenerator":
"""Display text in title formatting.
Each document should have a single `st.title()`, although this is not
enforced.
Parameters
----------
body : str
The text to display as Github-flavored Markdown. Syntax
information can be found at: https://github.github.com/gfm.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
anchor : str
The anchor name of the header that can be accessed with #anchor
in the URL. If omitted, it generates an anchor using the body.
Examples
--------
>>> import streamlit as st
>>>
>>> st.title('This is a title')
>>> st.title('A title with _italics_ :blue[colors] and emojis :sunglasses:')
"""
title_proto = HeadingProto()
if anchor is not None:
title_proto.anchor = anchor
title_proto.body = clean_text(body)
title_proto.tag = "h1"
return self.dg._enqueue("heading", title_proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/heading.py | 0.944125 | 0.435541 | heading.py | pypi |
import re
from dataclasses import dataclass
from textwrap import dedent
from typing import Optional, cast
import streamlit
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.ColorPicker_pb2 import ColorPicker as ColorPickerProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.type_util import Key, LabelVisibility, maybe_raise_label_warnings, to_key
@dataclass
class ColorPickerSerde:
value: str
def serialize(self, v: str) -> str:
return str(v)
def deserialize(self, ui_value: Optional[str], widget_id: str = "") -> str:
return str(ui_value if ui_value is not None else self.value)
class ColorPickerMixin:
@gather_metrics("color_picker")
def color_picker(
self,
label: str,
value: Optional[str] = None,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
) -> str:
"""Display a color picker widget.
Parameters
----------
label : str
A short label explaining to the user what this input is for.
The label can optionally contain Markdown and supports the following
elements: Bold, Italics, Strikethroughs, Inline Code, Emojis, and Links.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
For accessibility reasons, you should never set an empty label (label="")
but hide it with label_visibility if needed. In the future, we may disallow
empty labels by raising an exception.
value : str
The hex value of this widget when it first renders. If None,
defaults to black.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. Multiple widgets of the same type may
not share the same key.
help : str
An optional tooltip that gets displayed next to the color picker.
on_change : callable
An optional callback invoked when this color_picker's value
changes.
args : tuple
An optional tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean, which disables the color picker if set to
True. The default is False. This argument can only be supplied by
keyword.
label_visibility : "visible" or "hidden" or "collapsed"
The visibility of the label. If "hidden", the label doesn’t show but there
is still empty space for it above the widget (equivalent to label="").
If "collapsed", both the label and the space are removed. Default is
"visible". This argument can only be supplied by keyword.
Returns
-------
str
The selected color as a hex string.
Example
-------
>>> import streamlit as st
>>>
>>> color = st.color_picker('Pick A Color', '#00f900')
>>> st.write('The current color is', color)
.. output::
https://doc-color-picker.streamlitapp.com/
height: 335px
"""
ctx = get_script_run_ctx()
return self._color_picker(
label=label,
value=value,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
label_visibility=label_visibility,
ctx=ctx,
)
def _color_picker(
self,
label: str,
value: Optional[str] = None,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
ctx: Optional[ScriptRunContext] = None,
) -> str:
key = to_key(key)
check_callback_rules(self.dg, on_change)
check_session_state_rules(default_value=value, key=key)
maybe_raise_label_warnings(label, label_visibility)
# set value default
if value is None:
value = "#000000"
# make sure the value is a string
if not isinstance(value, str):
raise StreamlitAPIException(
"""
Color Picker Value has invalid type: %s. Expects a hex string
like '#00FFAA' or '#000'.
"""
% type(value).__name__
)
# validate the value and expects a hex string
match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value)
if not match:
raise StreamlitAPIException(
"""
'%s' is not a valid hex code for colors. Valid ones are like
'#00FFAA' or '#000'.
"""
% value
)
color_picker_proto = ColorPickerProto()
color_picker_proto.label = label
color_picker_proto.default = str(value)
color_picker_proto.form_id = current_form_id(self.dg)
if help is not None:
color_picker_proto.help = dedent(help)
serde = ColorPickerSerde(value)
widget_state = register_widget(
"color_picker",
color_picker_proto,
user_key=key,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
)
# This needs to be done after register_widget because we don't want
# the following proto fields to affect a widget's ID.
color_picker_proto.disabled = disabled
color_picker_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if widget_state.value_changed:
color_picker_proto.value = widget_state.value
color_picker_proto.set_value = True
self.dg._enqueue("color_picker", color_picker_proto)
return widget_state.value
@property
def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("streamlit.delta_generator.DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/color_picker.py | 0.930813 | 0.201971 | color_picker.py | pypi |
from dataclasses import dataclass
from textwrap import dedent
from typing import Optional, cast
import streamlit
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.TextArea_pb2 import TextArea as TextAreaProto
from streamlit.proto.TextInput_pb2 import TextInput as TextInputProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.type_util import (
Key,
LabelVisibility,
SupportsStr,
maybe_raise_label_warnings,
to_key,
)
@dataclass
class TextInputSerde:
value: SupportsStr
def deserialize(self, ui_value: Optional[str], widget_id: str = "") -> str:
return str(ui_value if ui_value is not None else self.value)
def serialize(self, v: str) -> str:
return v
@dataclass
class TextAreaSerde:
value: SupportsStr
def deserialize(self, ui_value: Optional[str], widget_id: str = "") -> str:
return str(ui_value if ui_value is not None else self.value)
def serialize(self, v: str) -> str:
return v
class TextWidgetsMixin:
@gather_metrics("text_input")
def text_input(
self,
label: str,
value: SupportsStr = "",
max_chars: Optional[int] = None,
key: Optional[Key] = None,
type: str = "default",
help: Optional[str] = None,
autocomplete: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
placeholder: Optional[str] = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
) -> str:
"""Display a single-line text input widget.
Parameters
----------
label : str
A short label explaining to the user what this input is for.
The label can optionally contain Markdown and supports the following
elements: Bold, Italics, Strikethroughs, Inline Code, Emojis, and Links.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
For accessibility reasons, you should never set an empty label (label="")
but hide it with label_visibility if needed. In the future, we may disallow
empty labels by raising an exception.
value : object
The text value of this widget when it first renders. This will be
cast to str internally.
max_chars : int or None
Max number of characters allowed in text input.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. Multiple widgets of the same type may
not share the same key.
type : str
The type of the text input. This can be either "default" (for
a regular text input), or "password" (for a text input that
masks the user's typed value). Defaults to "default".
help : str
An optional tooltip that gets displayed next to the input.
autocomplete : str
An optional value that will be passed to the <input> element's
autocomplete property. If unspecified, this value will be set to
"new-password" for "password" inputs, and the empty string for
"default" inputs. For more details, see https://developer.mozilla.org/en-US/docs/Web/HTML/Attributes/autocomplete
on_change : callable
An optional callback invoked when this text_input's value changes.
args : tuple
An optional tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
placeholder : str or None
An optional string displayed when the text input is empty. If None,
no text is displayed. This argument can only be supplied by keyword.
disabled : bool
An optional boolean, which disables the text input if set to True.
The default is False. This argument can only be supplied by keyword.
label_visibility : "visible" or "hidden" or "collapsed"
The visibility of the label. If "hidden", the label doesn't show but there
is still empty space for it above the widget (equivalent to label="").
If "collapsed", both the label and the space are removed. Default is
"visible". This argument can only be supplied by keyword.
Returns
-------
str
The current value of the text input widget.
Example
-------
>>> import streamlit as st
>>>
>>> title = st.text_input('Movie title', 'Life of Brian')
>>> st.write('The current movie title is', title)
.. output::
https://doc-text-input.streamlitapp.com/
height: 260px
"""
ctx = get_script_run_ctx()
return self._text_input(
label=label,
value=value,
max_chars=max_chars,
key=key,
type=type,
help=help,
autocomplete=autocomplete,
on_change=on_change,
args=args,
kwargs=kwargs,
placeholder=placeholder,
disabled=disabled,
label_visibility=label_visibility,
ctx=ctx,
)
def _text_input(
self,
label: str,
value: SupportsStr = "",
max_chars: Optional[int] = None,
key: Optional[Key] = None,
type: str = "default",
help: Optional[str] = None,
autocomplete: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
placeholder: Optional[str] = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
ctx: Optional[ScriptRunContext] = None,
) -> str:
key = to_key(key)
check_callback_rules(self.dg, on_change)
check_session_state_rules(default_value=None if value == "" else value, key=key)
maybe_raise_label_warnings(label, label_visibility)
text_input_proto = TextInputProto()
text_input_proto.label = label
text_input_proto.default = str(value)
text_input_proto.form_id = current_form_id(self.dg)
if help is not None:
text_input_proto.help = dedent(help)
if max_chars is not None:
text_input_proto.max_chars = max_chars
if placeholder is not None:
text_input_proto.placeholder = str(placeholder)
if type == "default":
text_input_proto.type = TextInputProto.DEFAULT
elif type == "password":
text_input_proto.type = TextInputProto.PASSWORD
else:
raise StreamlitAPIException(
"'%s' is not a valid text_input type. Valid types are 'default' and 'password'."
% type
)
# Marshall the autocomplete param. If unspecified, this will be
# set to "new-password" for password inputs.
if autocomplete is None:
autocomplete = "new-password" if type == "password" else ""
text_input_proto.autocomplete = autocomplete
serde = TextInputSerde(value)
widget_state = register_widget(
"text_input",
text_input_proto,
user_key=key,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
)
# This needs to be done after register_widget because we don't want
# the following proto fields to affect a widget's ID.
text_input_proto.disabled = disabled
text_input_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if widget_state.value_changed:
text_input_proto.value = widget_state.value
text_input_proto.set_value = True
self.dg._enqueue("text_input", text_input_proto)
return widget_state.value
@gather_metrics("text_area")
def text_area(
self,
label: str,
value: SupportsStr = "",
height: Optional[int] = None,
max_chars: Optional[int] = None,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
placeholder: Optional[str] = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
) -> str:
"""Display a multi-line text input widget.
Parameters
----------
label : str
A short label explaining to the user what this input is for.
The label can optionally contain Markdown and supports the following
elements: Bold, Italics, Strikethroughs, Inline Code, Emojis, and Links.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
For accessibility reasons, you should never set an empty label (label="")
but hide it with label_visibility if needed. In the future, we may disallow
empty labels by raising an exception.
value : object
The text value of this widget when it first renders. This will be
cast to str internally.
height : int or None
Desired height of the UI element expressed in pixels. If None, a
default height is used.
max_chars : int or None
Maximum number of characters allowed in text area.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. Multiple widgets of the same type may
not share the same key.
help : str
An optional tooltip that gets displayed next to the textarea.
on_change : callable
An optional callback invoked when this text_area's value changes.
args : tuple
An optional tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
placeholder : str or None
An optional string displayed when the text area is empty. If None,
no text is displayed. This argument can only be supplied by keyword.
disabled : bool
An optional boolean, which disables the text area if set to True.
The default is False. This argument can only be supplied by keyword.
label_visibility : "visible" or "hidden" or "collapsed"
The visibility of the label. If "hidden", the label doesn't show but there
is still empty space for it above the widget (equivalent to label="").
If "collapsed", both the label and the space are removed. Default is
"visible". This argument can only be supplied by keyword.
Returns
-------
str
The current value of the text input widget.
Example
-------
>>> import streamlit as st
>>>
>>> txt = st.text_area('Text to analyze', '''
... It was the best of times, it was the worst of times, it was
... the age of wisdom, it was the age of foolishness, it was
... the epoch of belief, it was the epoch of incredulity, it
... was the season of Light, it was the season of Darkness, it
... was the spring of hope, it was the winter of despair, (...)
... ''')
>>> st.write('Sentiment:', run_sentiment_analysis(txt))
"""
ctx = get_script_run_ctx()
return self._text_area(
label=label,
value=value,
height=height,
max_chars=max_chars,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
placeholder=placeholder,
disabled=disabled,
label_visibility=label_visibility,
ctx=ctx,
)
def _text_area(
self,
label: str,
value: SupportsStr = "",
height: Optional[int] = None,
max_chars: Optional[int] = None,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
placeholder: Optional[str] = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
ctx: Optional[ScriptRunContext] = None,
) -> str:
key = to_key(key)
check_callback_rules(self.dg, on_change)
check_session_state_rules(default_value=None if value == "" else value, key=key)
maybe_raise_label_warnings(label, label_visibility)
text_area_proto = TextAreaProto()
text_area_proto.label = label
text_area_proto.default = str(value)
text_area_proto.form_id = current_form_id(self.dg)
if help is not None:
text_area_proto.help = dedent(help)
if height is not None:
text_area_proto.height = height
if max_chars is not None:
text_area_proto.max_chars = max_chars
if placeholder is not None:
text_area_proto.placeholder = str(placeholder)
serde = TextAreaSerde(value)
widget_state = register_widget(
"text_area",
text_area_proto,
user_key=key,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
)
# This needs to be done after register_widget because we don't want
# the following proto fields to affect a widget's ID.
text_area_proto.disabled = disabled
text_area_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if widget_state.value_changed:
text_area_proto.value = widget_state.value
text_area_proto.set_value = True
self.dg._enqueue("text_area", text_area_proto)
return widget_state.value
@property
def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("streamlit.delta_generator.DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/text_widgets.py | 0.94023 | 0.242183 | text_widgets.py | pypi |
import io
from typing import TYPE_CHECKING, Any, Optional, cast
from typing_extensions import Final
import streamlit.elements.image as image_utils
from streamlit import config
from streamlit.errors import StreamlitDeprecationWarning
from streamlit.logger import get_logger
from streamlit.proto.Image_pb2 import ImageList as ImageListProto
from streamlit.runtime.metrics_util import gather_metrics
if TYPE_CHECKING:
from matplotlib.figure import Figure
from streamlit.delta_generator import DeltaGenerator
LOGGER: Final = get_logger(__name__)
class PyplotMixin:
@gather_metrics("pyplot")
def pyplot(
self,
fig: Optional["Figure"] = None,
clear_figure: Optional[bool] = None,
**kwargs: Any,
) -> "DeltaGenerator":
"""Display a matplotlib.pyplot figure.
Parameters
----------
fig : Matplotlib Figure
The figure to plot. When this argument isn't specified, this
function will render the global figure (but this is deprecated,
as described below)
clear_figure : bool
If True, the figure will be cleared after being rendered.
If False, the figure will not be cleared after being rendered.
If left unspecified, we pick a default based on the value of `fig`.
* If `fig` is set, defaults to `False`.
* If `fig` is not set, defaults to `True`. This simulates Jupyter's
approach to matplotlib rendering.
**kwargs : any
Arguments to pass to Matplotlib's savefig function.
Example
-------
>>> import streamlit as st
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>>
>>> arr = np.random.normal(1, 1, size=100)
>>> fig, ax = plt.subplots()
>>> ax.hist(arr, bins=20)
>>>
>>> st.pyplot(fig)
.. output::
https://doc-pyplot.streamlitapp.com/
height: 630px
Notes
-----
.. note::
Deprecation warning. After December 1st, 2020, we will remove the ability
to specify no arguments in `st.pyplot()`, as that requires the use of
Matplotlib's global figure object, which is not thread-safe. So
please always pass a figure object as shown in the example section
above.
Matplotlib supports several types of "backends". If you're getting an
error using Matplotlib with Streamlit, try setting your backend to "TkAgg"::
echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc
For more information, see https://matplotlib.org/faq/usage_faq.html.
"""
if not fig and config.get_option("deprecation.showPyplotGlobalUse"):
self.dg.exception(PyplotGlobalUseWarning())
image_list_proto = ImageListProto()
marshall(
self.dg._get_delta_path_str(), image_list_proto, fig, clear_figure, **kwargs
)
return self.dg._enqueue("imgs", image_list_proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
def marshall(
coordinates: str,
image_list_proto: ImageListProto,
fig: Optional["Figure"] = None,
clear_figure: Optional[bool] = True,
**kwargs: Any,
) -> None:
try:
import matplotlib
import matplotlib.pyplot as plt
plt.ioff()
except ImportError:
raise ImportError("pyplot() command requires matplotlib")
# You can call .savefig() on a Figure object or directly on the pyplot
# module, in which case you're doing it to the latest Figure.
if not fig:
if clear_figure is None:
clear_figure = True
fig = plt
# Normally, dpi is set to 'figure', and the figure's dpi is set to 100.
# So here we pick double of that to make things look good in a high
# DPI display.
options = {"bbox_inches": "tight", "dpi": 200, "format": "png"}
# If some options are passed in from kwargs then replace the values in
# options with the ones from kwargs
options = {a: kwargs.get(a, b) for a, b in options.items()}
# Merge options back into kwargs.
kwargs.update(options)
image = io.BytesIO()
fig.savefig(image, **kwargs)
image_utils.marshall_images(
coordinates=coordinates,
image=image,
caption=None,
width=-2,
proto_imgs=image_list_proto,
clamp=False,
channels="RGB",
output_format="PNG",
)
# Clear the figure after rendering it. This means that subsequent
# plt calls will be starting fresh.
if clear_figure:
fig.clf()
class PyplotGlobalUseWarning(StreamlitDeprecationWarning):
def __init__(self) -> None:
super(PyplotGlobalUseWarning, self).__init__(
msg=self._get_message(), config_option="deprecation.showPyplotGlobalUse"
)
def _get_message(self) -> str:
return """
You are calling `st.pyplot()` without any arguments. After December 1st, 2020,
we will remove the ability to do this as it requires the use of Matplotlib's global
figure object, which is not thread-safe.
To future-proof this code, you should pass in a figure as shown below:
```python
>>> fig, ax = plt.subplots()
>>> ax.scatter([1, 2, 3], [1, 2, 3])
>>> ... other plotting actions ...
>>> st.pyplot(fig)
```
""" | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/pyplot.py | 0.827932 | 0.616243 | pyplot.py | pypi |
from typing import TYPE_CHECKING, Any, Hashable, Optional, Union, cast
import streamlit
from streamlit import runtime, type_util
from streamlit.elements.form import is_in_form
from streamlit.errors import StreamlitAPIException
from streamlit.proto.LabelVisibilityMessage_pb2 import LabelVisibilityMessage
from streamlit.runtime.state import WidgetCallback, get_session_state
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
from streamlit.type_util import DataFrameCompatible
def last_index_for_melted_dataframes(
data: Union["DataFrameCompatible", Any]
) -> Optional[Hashable]:
if type_util.is_dataframe_compatible(data):
data = type_util.convert_anything_to_df(data)
if data.index.size > 0:
return cast(Hashable, data.index[-1])
return None
def check_callback_rules(
dg: "DeltaGenerator", on_change: Optional[WidgetCallback]
) -> None:
if runtime.exists() and is_in_form(dg) and on_change is not None:
raise StreamlitAPIException(
"With forms, callbacks can only be defined on the `st.form_submit_button`."
" Defining callbacks on other widgets inside a form is not allowed."
)
_shown_default_value_warning: bool = False
def check_session_state_rules(
default_value: Any, key: Optional[str], writes_allowed: bool = True
) -> None:
global _shown_default_value_warning
if key is None or not runtime.exists():
return
session_state = get_session_state()
if not session_state.is_new_state_value(key):
return
if not writes_allowed:
raise StreamlitAPIException(
"Values for st.button, st.download_button, st.file_uploader, and "
"st.form cannot be set using st.session_state."
)
if default_value is not None and not _shown_default_value_warning:
streamlit.warning(
f'The widget with key "{key}" was created with a default value but'
" also had its value set via the Session State API."
)
_shown_default_value_warning = True
def get_label_visibility_proto_value(
label_visibility_string: type_util.LabelVisibility,
) -> "LabelVisibilityMessage.LabelVisibilityOptions.ValueType":
"""Returns one of LabelVisibilityMessage enum constants.py based on string value."""
if label_visibility_string == "visible":
return LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE
elif label_visibility_string == "hidden":
return LabelVisibilityMessage.LabelVisibilityOptions.HIDDEN
elif label_visibility_string == "collapsed":
return LabelVisibilityMessage.LabelVisibilityOptions.COLLAPSED
raise ValueError(f"Unknown label visibility value: {label_visibility_string}") | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/utils.py | 0.907752 | 0.202384 | utils.py | pypi |
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Generic, Optional, Sequence, cast
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Radio_pb2 import Radio as RadioProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.type_util import (
Key,
LabelVisibility,
OptionSequence,
T,
ensure_indexable,
maybe_raise_label_warnings,
to_key,
)
from streamlit.util import index_
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
@dataclass
class RadioSerde(Generic[T]):
options: Sequence[T]
index: int
def serialize(self, v: object) -> int:
if len(self.options) == 0:
return 0
return index_(self.options, v)
def deserialize(
self,
ui_value: Optional[int],
widget_id: str = "",
) -> Optional[T]:
idx = ui_value if ui_value is not None else self.index
return (
self.options[idx]
if len(self.options) > 0 and self.options[idx] is not None
else None
)
class RadioMixin:
@gather_metrics("radio")
def radio(
self,
label: str,
options: OptionSequence[T],
index: int = 0,
format_func: Callable[[Any], Any] = str,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only args:
disabled: bool = False,
horizontal: bool = False,
label_visibility: LabelVisibility = "visible",
) -> Optional[T]:
r"""Display a radio button widget.
Parameters
----------
label : str
A short label explaining to the user what this radio group is for.
The label can optionally contain Markdown and supports the following
elements: Bold, Italics, Strikethroughs, Inline Code, Emojis, and Links.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
For accessibility reasons, you should never set an empty label (label="")
but hide it with label_visibility if needed. In the future, we may disallow
empty labels by raising an exception.
options : Sequence, numpy.ndarray, pandas.Series, pandas.DataFrame, or pandas.Index
Labels for the radio options. This will be cast to str internally
by default. For pandas.DataFrame, the first column is selected.
index : int
The index of the preselected option on first render.
format_func : function
Function to modify the display of radio options. It receives
the raw option as an argument and should output the label to be
shown for that option. This has no impact on the return value of
the radio.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. Multiple widgets of the same type may
not share the same key.
help : str
An optional tooltip that gets displayed next to the radio.
on_change : callable
An optional callback invoked when this radio's value changes.
args : tuple
An optional tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean, which disables the radio button if set to
True. The default is False. This argument can only be supplied by
keyword.
horizontal : bool
An optional boolean, which orients the radio group horizontally.
The default is false (vertical buttons). This argument can only
be supplied by keyword.
label_visibility : "visible" or "hidden" or "collapsed"
The visibility of the label. If "hidden", the label doesn't show but there
is still empty space for it above the widget (equivalent to label="").
If "collapsed", both the label and the space are removed. Default is
"visible". This argument can only be supplied by keyword.
Returns
-------
any
The selected option.
Example
-------
>>> import streamlit as st
>>>
>>> genre = st.radio(
... "What\'s your favorite movie genre",
... ('Comedy', 'Drama', 'Documentary'))
>>>
>>> if genre == 'Comedy':
... st.write('You selected comedy.')
... else:
... st.write("You didn\'t select comedy.")
.. output::
https://doc-radio.streamlitapp.com/
height: 260px
"""
ctx = get_script_run_ctx()
return self._radio(
label=label,
options=options,
index=index,
format_func=format_func,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
horizontal=horizontal,
ctx=ctx,
label_visibility=label_visibility,
)
def _radio(
self,
label: str,
options: OptionSequence[T],
index: int = 0,
format_func: Callable[[Any], Any] = str,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only args:
disabled: bool = False,
horizontal: bool = False,
label_visibility: LabelVisibility = "visible",
ctx: Optional[ScriptRunContext],
) -> Optional[T]:
key = to_key(key)
check_callback_rules(self.dg, on_change)
check_session_state_rules(default_value=None if index == 0 else index, key=key)
maybe_raise_label_warnings(label, label_visibility)
opt = ensure_indexable(options)
if not isinstance(index, int):
raise StreamlitAPIException(
"Radio Value has invalid type: %s" % type(index).__name__
)
if len(opt) > 0 and not 0 <= index < len(opt):
raise StreamlitAPIException(
"Radio index must be between 0 and length of options"
)
radio_proto = RadioProto()
radio_proto.label = label
radio_proto.default = index
radio_proto.options[:] = [str(format_func(option)) for option in opt]
radio_proto.form_id = current_form_id(self.dg)
radio_proto.horizontal = horizontal
if help is not None:
radio_proto.help = dedent(help)
serde = RadioSerde(opt, index)
widget_state = register_widget(
"radio",
radio_proto,
user_key=key,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
)
# This needs to be done after register_widget because we don't want
# the following proto fields to affect a widget's ID.
radio_proto.disabled = disabled
radio_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if widget_state.value_changed:
radio_proto.value = serde.serialize(widget_state.value)
radio_proto.set_value = True
self.dg._enqueue("radio", radio_proto)
return widget_state.value
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/radio.py | 0.948668 | 0.241601 | radio.py | pypi |
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, List, Optional, cast
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.proto.CameraInput_pb2 import CameraInput as CameraInputProto
from streamlit.proto.Common_pb2 import FileUploaderState as FileUploaderStateProto
from streamlit.proto.Common_pb2 import UploadedFileInfo as UploadedFileInfoProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.runtime.uploaded_file_manager import UploadedFile, UploadedFileRec
from streamlit.type_util import Key, LabelVisibility, maybe_raise_label_warnings, to_key
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
SomeUploadedSnapshotFile = Optional[UploadedFile]
def _get_file_recs_for_camera_input_widget(
widget_id: str, widget_value: Optional[FileUploaderStateProto]
) -> List[UploadedFileRec]:
if widget_value is None:
return []
ctx = get_script_run_ctx()
if ctx is None:
return []
uploaded_file_info = widget_value.uploaded_file_info
if len(uploaded_file_info) == 0:
return []
active_file_ids = [f.id for f in uploaded_file_info]
# Grab the files that correspond to our active file IDs.
return ctx.uploaded_file_mgr.get_files(
session_id=ctx.session_id,
widget_id=widget_id,
file_ids=active_file_ids,
)
@dataclass
class CameraInputSerde:
def serialize(
self,
snapshot: SomeUploadedSnapshotFile,
) -> FileUploaderStateProto:
state_proto = FileUploaderStateProto()
ctx = get_script_run_ctx()
if ctx is None:
return state_proto
# ctx.uploaded_file_mgr._file_id_counter stores the id to use for
# the *next* uploaded file, so the current highest file id is the
# counter minus 1.
state_proto.max_file_id = ctx.uploaded_file_mgr._file_id_counter - 1
if not snapshot:
return state_proto
file_info: UploadedFileInfoProto = state_proto.uploaded_file_info.add()
file_info.id = snapshot.id
file_info.name = snapshot.name
file_info.size = snapshot.size
return state_proto
def deserialize(
self, ui_value: Optional[FileUploaderStateProto], widget_id: str
) -> SomeUploadedSnapshotFile:
file_recs = _get_file_recs_for_camera_input_widget(widget_id, ui_value)
if len(file_recs) == 0:
return_value = None
else:
return_value = UploadedFile(file_recs[0])
return return_value
class CameraInputMixin:
@gather_metrics("camera_input")
def camera_input(
self,
label: str,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
) -> SomeUploadedSnapshotFile:
"""Display a widget that returns pictures from the user's webcam.
Parameters
----------
label : str
A short label explaining to the user what this widget is used for.
The label can optionally contain Markdown and supports the following
elements: Bold, Italics, Strikethroughs, Inline Code, Emojis, and Links.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
For accessibility reasons, you should never set an empty label (label="")
but hide it with label_visibility if needed. In the future, we may disallow
empty labels by raising an exception.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. Multiple widgets of the same type may
not share the same key.
help : str
A tooltip that gets displayed next to the camera input.
on_change : callable
An optional callback invoked when this camera_input's value
changes.
args : tuple
An optional tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean, which disables the camera input if set to
True. The default is False. This argument can only be supplied by
keyword.
label_visibility : "visible" or "hidden" or "collapsed"
The visibility of the label. If "hidden", the label doesn't show but there
is still empty space for it above the widget (equivalent to label="").
If "collapsed", both the label and the space are removed. Default is
"visible". This argument can only be supplied by keyword.
Returns
-------
None or UploadedFile
The UploadedFile class is a subclass of BytesIO, and therefore
it is "file-like". This means you can pass them anywhere where
a file is expected.
Examples
--------
>>> import streamlit as st
>>>
>>> picture = st.camera_input("Take a picture")
>>>
>>> if picture:
... st.image(picture)
"""
ctx = get_script_run_ctx()
return self._camera_input(
label=label,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
label_visibility=label_visibility,
ctx=ctx,
)
def _camera_input(
self,
label: str,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
ctx: Optional[ScriptRunContext] = None,
) -> SomeUploadedSnapshotFile:
key = to_key(key)
check_callback_rules(self.dg, on_change)
check_session_state_rules(default_value=None, key=key, writes_allowed=False)
maybe_raise_label_warnings(label, label_visibility)
camera_input_proto = CameraInputProto()
camera_input_proto.label = label
camera_input_proto.form_id = current_form_id(self.dg)
if help is not None:
camera_input_proto.help = dedent(help)
serde = CameraInputSerde()
camera_input_state = register_widget(
"camera_input",
camera_input_proto,
user_key=key,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
)
# This needs to be done after register_widget because we don't want
# the following proto fields to affect a widget's ID.
camera_input_proto.disabled = disabled
camera_input_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
ctx = get_script_run_ctx()
camera_image_input_state = serde.serialize(camera_input_state.value)
uploaded_shapshot_info = camera_image_input_state.uploaded_file_info
if ctx is not None and len(uploaded_shapshot_info) != 0:
newest_file_id = camera_image_input_state.max_file_id
active_file_ids = [f.id for f in uploaded_shapshot_info]
ctx.uploaded_file_mgr.remove_orphaned_files(
session_id=ctx.session_id,
widget_id=camera_input_proto.id,
newest_file_id=newest_file_id,
active_file_ids=active_file_ids,
)
self.dg._enqueue("camera_input", camera_input_proto)
return camera_input_state.value
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/camera_input.py | 0.930171 | 0.190291 | camera_input.py | pypi |
from typing import TYPE_CHECKING, Optional, cast
from streamlit.proto.IFrame_pb2 import IFrame as IFrameProto
from streamlit.runtime.metrics_util import gather_metrics
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
class IframeMixin:
@gather_metrics("_iframe")
def _iframe(
self,
src: str,
width: Optional[int] = None,
height: Optional[int] = None,
scrolling: bool = False,
) -> "DeltaGenerator":
"""Load a remote URL in an iframe.
Parameters
----------
src : str
The URL of the page to embed.
width : int
The width of the frame in CSS pixels. Defaults to the app's
default element width.
height : int
The height of the frame in CSS pixels. Defaults to 150.
scrolling : bool
If True, show a scrollbar when the content is larger than the iframe.
Otherwise, do not show a scrollbar. Defaults to False.
"""
iframe_proto = IFrameProto()
marshall(
iframe_proto,
src=src,
width=width,
height=height,
scrolling=scrolling,
)
return self.dg._enqueue("iframe", iframe_proto)
@gather_metrics("_html")
def _html(
self,
html: str,
width: Optional[int] = None,
height: Optional[int] = None,
scrolling: bool = False,
) -> "DeltaGenerator":
"""Display an HTML string in an iframe.
Parameters
----------
html : str
The HTML string to embed in the iframe.
width : int
The width of the frame in CSS pixels. Defaults to the app's
default element width.
height : int
The height of the frame in CSS pixels. Defaults to 150.
scrolling : bool
If True, show a scrollbar when the content is larger than the iframe.
Otherwise, do not show a scrollbar. Defaults to False.
"""
iframe_proto = IFrameProto()
marshall(
iframe_proto,
srcdoc=html,
width=width,
height=height,
scrolling=scrolling,
)
return self.dg._enqueue("iframe", iframe_proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
def marshall(
proto: IFrameProto,
src: Optional[str] = None,
srcdoc: Optional[str] = None,
width: Optional[int] = None,
height: Optional[int] = None,
scrolling: bool = False,
) -> None:
"""Marshalls data into an IFrame proto.
These parameters correspond directly to <iframe> attributes, which are
described in more detail at
https://developer.mozilla.org/en-US/docs/Web/HTML/Element/iframe.
Parameters
----------
proto : IFrame protobuf
The protobuf object to marshall data into.
src : str
The URL of the page to embed.
srcdoc : str
Inline HTML to embed. Overrides src.
width : int
The width of the frame in CSS pixels. Defaults to the app's
default element width.
height : int
The height of the frame in CSS pixels. Defaults to 150.
scrolling : bool
If true, show a scrollbar when the content is larger than the iframe.
Otherwise, never show a scrollbar.
"""
if src is not None:
proto.src = src
if srcdoc is not None:
proto.srcdoc = srcdoc
if width is not None:
proto.width = width
proto.has_width = True
if height is not None:
proto.height = height
else:
proto.height = 150
proto.scrolling = scrolling | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/iframe.py | 0.963553 | 0.352731 | iframe.py | pypi |
from dataclasses import dataclass
from datetime import date, datetime, time, timedelta, timezone, tzinfo
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
)
from typing_extensions import Final, TypeAlias
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.errors import StreamlitAPIException
from streamlit.js_number import JSNumber, JSNumberBoundsException
from streamlit.proto.Slider_pb2 import Slider as SliderProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
get_session_state,
register_widget,
)
from streamlit.type_util import Key, LabelVisibility, maybe_raise_label_warnings, to_key
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
SliderScalarT = TypeVar("SliderScalarT", int, float, date, time, datetime)
Step: TypeAlias = Union[int, float, timedelta]
SliderScalar: TypeAlias = Union[int, float, date, time, datetime]
SliderValueGeneric: TypeAlias = Union[
SliderScalarT,
Sequence[SliderScalarT],
]
SliderValue: TypeAlias = Union[
SliderValueGeneric[int],
SliderValueGeneric[float],
SliderValueGeneric[date],
SliderValueGeneric[time],
SliderValueGeneric[datetime],
]
SliderReturnGeneric: TypeAlias = Union[
SliderScalarT,
Tuple[SliderScalarT],
Tuple[SliderScalarT, SliderScalarT],
]
SliderReturn: TypeAlias = Union[
SliderReturnGeneric[int],
SliderReturnGeneric[float],
SliderReturnGeneric[date],
SliderReturnGeneric[time],
SliderReturnGeneric[datetime],
]
SECONDS_TO_MICROS: Final = 1000 * 1000
DAYS_TO_MICROS: Final = 24 * 60 * 60 * SECONDS_TO_MICROS
UTC_EPOCH: Final = datetime(1970, 1, 1, tzinfo=timezone.utc)
def _time_to_datetime(time_: time) -> datetime:
# Note, here we pick an arbitrary date well after Unix epoch.
# This prevents pre-epoch timezone issues (https://bugs.python.org/issue36759)
# We're dropping the date from datetime later, anyway.
return datetime.combine(date(2000, 1, 1), time_)
def _date_to_datetime(date_: date) -> datetime:
return datetime.combine(date_, time())
def _delta_to_micros(delta: timedelta) -> int:
return (
delta.microseconds
+ delta.seconds * SECONDS_TO_MICROS
+ delta.days * DAYS_TO_MICROS
)
def _datetime_to_micros(dt: datetime) -> int:
# The frontend is not aware of timezones and only expects a UTC-based
# timestamp (in microseconds). Since we want to show the date/time exactly
# as it is in the given datetime object, we just set the tzinfo to UTC and
# do not do any timezone conversions. Only the backend knows about
# original timezone and will replace the UTC timestamp in the deserialization.
utc_dt = dt.replace(tzinfo=timezone.utc)
return _delta_to_micros(utc_dt - UTC_EPOCH)
def _micros_to_datetime(micros: int, orig_tz: Optional[tzinfo]) -> datetime:
"""Restore times/datetimes to original timezone (dates are always naive)"""
utc_dt = UTC_EPOCH + timedelta(microseconds=micros)
# Add the original timezone. No conversion is required here,
# since in the serialization, we also just replace the timestamp with UTC.
return utc_dt.replace(tzinfo=orig_tz)
@dataclass
class SliderSerde:
value: List[float]
data_type: int
single_value: bool
orig_tz: Optional[tzinfo]
def deserialize(self, ui_value: Optional[List[float]], widget_id: str = ""):
if ui_value is not None:
val: Any = ui_value
else:
# Widget has not been used; fallback to the original value,
val = self.value
# The widget always returns a float array, so fix the return type if necessary
if self.data_type == SliderProto.INT:
val = [int(v) for v in val]
if self.data_type == SliderProto.DATETIME:
val = [_micros_to_datetime(int(v), self.orig_tz) for v in val]
if self.data_type == SliderProto.DATE:
val = [_micros_to_datetime(int(v), self.orig_tz).date() for v in val]
if self.data_type == SliderProto.TIME:
val = [
_micros_to_datetime(int(v), self.orig_tz)
.time()
.replace(tzinfo=self.orig_tz)
for v in val
]
return val[0] if self.single_value else tuple(val)
def serialize(self, v: Any) -> List[Any]:
range_value = isinstance(v, (list, tuple))
value = list(v) if range_value else [v]
if self.data_type == SliderProto.DATE:
value = [_datetime_to_micros(_date_to_datetime(v)) for v in value]
if self.data_type == SliderProto.TIME:
value = [_datetime_to_micros(_time_to_datetime(v)) for v in value]
if self.data_type == SliderProto.DATETIME:
value = [_datetime_to_micros(v) for v in value]
return value
class SliderMixin:
@gather_metrics("slider")
def slider(
self,
label: str,
min_value: Optional[SliderScalar] = None,
max_value: Optional[SliderScalar] = None,
value: Optional[SliderValue] = None,
step: Optional[Step] = None,
format: Optional[str] = None,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
# TODO(harahu): Add overload definitions. The return type is
# `SliderReturn`, in reality, but the return type is left as `Any`
# until we have proper overload definitions in place. Otherwise the
# user would have to cast the return value more often than not, which
# can be annoying.
) -> Any:
"""Display a slider widget.
This supports int, float, date, time, and datetime types.
This also allows you to render a range slider by passing a two-element
tuple or list as the `value`.
The difference between `st.slider` and `st.select_slider` is that
`slider` only accepts numerical or date/time data and takes a range as
input, while `select_slider` accepts any datatype and takes an iterable
set of options.
Parameters
----------
label : str
A short label explaining to the user what this slider is for.
The label can optionally contain Markdown and supports the following
elements: Bold, Italics, Strikethroughs, Inline Code, Emojis, and Links.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
For accessibility reasons, you should never set an empty label (label="")
but hide it with label_visibility if needed. In the future, we may disallow
empty labels by raising an exception.
min_value : a supported type or None
The minimum permitted value.
Defaults to 0 if the value is an int, 0.0 if a float,
value - timedelta(days=14) if a date/datetime, time.min if a time
max_value : a supported type or None
The maximum permitted value.
Defaults to 100 if the value is an int, 1.0 if a float,
value + timedelta(days=14) if a date/datetime, time.max if a time
value : a supported type or a tuple/list of supported types or None
The value of the slider when it first renders. If a tuple/list
of two values is passed here, then a range slider with those lower
and upper bounds is rendered. For example, if set to `(1, 10)` the
slider will have a selectable range between 1 and 10.
Defaults to min_value.
step : int/float/timedelta or None
The stepping interval.
Defaults to 1 if the value is an int, 0.01 if a float,
timedelta(days=1) if a date/datetime, timedelta(minutes=15) if a time
(or if max_value - min_value < 1 day)
format : str or None
A printf-style format string controlling how the interface should
display numbers. This does not impact the return value.
Formatter for int/float supports: %d %e %f %g %i
Formatter for date/time/datetime uses Moment.js notation:
https://momentjs.com/docs/#/displaying/format/
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. Multiple widgets of the same type may
not share the same key.
help : str
An optional tooltip that gets displayed next to the slider.
on_change : callable
An optional callback invoked when this slider's value changes.
args : tuple
An optional tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean, which disables the slider if set to True. The
default is False. This argument can only be supplied by keyword.
label_visibility : "visible" or "hidden" or "collapsed"
The visibility of the label. If "hidden", the label doesn't show but there
is still empty space for it above the widget (equivalent to label="").
If "collapsed", both the label and the space are removed. Default is
"visible". This argument can only be supplied by keyword.
Returns
-------
int/float/date/time/datetime or tuple of int/float/date/time/datetime
The current value of the slider widget. The return type will match
the data type of the value parameter.
Examples
--------
>>> import streamlit as st
>>>
>>> age = st.slider('How old are you?', 0, 130, 25)
>>> st.write("I'm ", age, 'years old')
And here's an example of a range slider:
>>> import streamlit as st
>>>
>>> values = st.slider(
... 'Select a range of values',
... 0.0, 100.0, (25.0, 75.0))
>>> st.write('Values:', values)
This is a range time slider:
>>> import streamlit as st
>>> from datetime import time
>>>
>>> appointment = st.slider(
... "Schedule your appointment:",
... value=(time(11, 30), time(12, 45)))
>>> st.write("You're scheduled for:", appointment)
Finally, a datetime slider:
>>> import streamlit as st
>>> from datetime import datetime
>>>
>>> start_time = st.slider(
... "When do you start?",
... value=datetime(2020, 1, 1, 9, 30),
... format="MM/DD/YY - hh:mm")
>>> st.write("Start time:", start_time)
.. output::
https://doc-slider.streamlitapp.com/
height: 300px
"""
ctx = get_script_run_ctx()
return self._slider(
label=label,
min_value=min_value,
max_value=max_value,
value=value,
step=step,
format=format,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
label_visibility=label_visibility,
ctx=ctx,
)
def _slider(
self,
label: str,
min_value=None,
max_value=None,
value=None,
step: Optional[Step] = None,
format: Optional[str] = None,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
ctx: Optional[ScriptRunContext] = None,
) -> SliderReturn:
key = to_key(key)
check_callback_rules(self.dg, on_change)
check_session_state_rules(default_value=value, key=key)
maybe_raise_label_warnings(label, label_visibility)
if value is None:
# Set value from session_state if exists.
session_state = get_session_state().filtered_state
# we look first to session_state value of the widget because
# depending on the value (single value or list/tuple) the slider should be
# initializing differently (either as range or single value slider)
if key is not None and key in session_state:
value = session_state[key]
else:
# Set value default.
value = min_value if min_value is not None else 0
SUPPORTED_TYPES = {
int: SliderProto.INT,
float: SliderProto.FLOAT,
datetime: SliderProto.DATETIME,
date: SliderProto.DATE,
time: SliderProto.TIME,
}
TIMELIKE_TYPES = (SliderProto.DATETIME, SliderProto.TIME, SliderProto.DATE)
# Ensure that the value is either a single value or a range of values.
single_value = isinstance(value, tuple(SUPPORTED_TYPES.keys()))
range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2)
if not single_value and not range_value:
raise StreamlitAPIException(
"Slider value should either be an int/float/datetime or a list/tuple of "
"0 to 2 ints/floats/datetimes"
)
# Simplify future logic by always making value a list
if single_value:
value = [value]
def all_same_type(items):
return len(set(map(type, items))) < 2
if not all_same_type(value):
raise StreamlitAPIException(
"Slider tuple/list components must be of the same type.\n"
f"But were: {list(map(type, value))}"
)
if len(value) == 0:
data_type = SliderProto.INT
else:
data_type = SUPPORTED_TYPES[type(value[0])]
datetime_min = time.min
datetime_max = time.max
if data_type == SliderProto.TIME:
datetime_min = time.min.replace(tzinfo=value[0].tzinfo)
datetime_max = time.max.replace(tzinfo=value[0].tzinfo)
if data_type in (SliderProto.DATETIME, SliderProto.DATE):
datetime_min = value[0] - timedelta(days=14)
datetime_max = value[0] + timedelta(days=14)
DEFAULTS = {
SliderProto.INT: {
"min_value": 0,
"max_value": 100,
"step": 1,
"format": "%d",
},
SliderProto.FLOAT: {
"min_value": 0.0,
"max_value": 1.0,
"step": 0.01,
"format": "%0.2f",
},
SliderProto.DATETIME: {
"min_value": datetime_min,
"max_value": datetime_max,
"step": timedelta(days=1),
"format": "YYYY-MM-DD",
},
SliderProto.DATE: {
"min_value": datetime_min,
"max_value": datetime_max,
"step": timedelta(days=1),
"format": "YYYY-MM-DD",
},
SliderProto.TIME: {
"min_value": datetime_min,
"max_value": datetime_max,
"step": timedelta(minutes=15),
"format": "HH:mm",
},
}
if min_value is None:
min_value = DEFAULTS[data_type]["min_value"]
if max_value is None:
max_value = DEFAULTS[data_type]["max_value"]
if step is None:
step = cast(Step, DEFAULTS[data_type]["step"])
if data_type in (
SliderProto.DATETIME,
SliderProto.DATE,
) and max_value - min_value < timedelta(days=1):
step = timedelta(minutes=15)
if format is None:
format = cast(str, DEFAULTS[data_type]["format"])
if step == 0:
raise StreamlitAPIException(
"Slider components cannot be passed a `step` of 0."
)
# Ensure that all arguments are of the same type.
slider_args = [min_value, max_value, step]
int_args = all(map(lambda a: isinstance(a, int), slider_args))
float_args = all(map(lambda a: isinstance(a, float), slider_args))
# When min and max_value are the same timelike, step should be a timedelta
timelike_args = (
data_type in TIMELIKE_TYPES
and isinstance(step, timedelta)
and type(min_value) == type(max_value)
)
if not int_args and not float_args and not timelike_args:
raise StreamlitAPIException(
"Slider value arguments must be of matching types."
"\n`min_value` has %(min_type)s type."
"\n`max_value` has %(max_type)s type."
"\n`step` has %(step)s type."
% {
"min_type": type(min_value).__name__,
"max_type": type(max_value).__name__,
"step": type(step).__name__,
}
)
# Ensure that the value matches arguments' types.
all_ints = data_type == SliderProto.INT and int_args
all_floats = data_type == SliderProto.FLOAT and float_args
all_timelikes = data_type in TIMELIKE_TYPES and timelike_args
if not all_ints and not all_floats and not all_timelikes:
raise StreamlitAPIException(
"Both value and arguments must be of the same type."
"\n`value` has %(value_type)s type."
"\n`min_value` has %(min_type)s type."
"\n`max_value` has %(max_type)s type."
% {
"value_type": type(value).__name__,
"min_type": type(min_value).__name__,
"max_type": type(max_value).__name__,
}
)
# Ensure that min <= value(s) <= max, adjusting the bounds as necessary.
min_value = min(min_value, max_value)
max_value = max(min_value, max_value)
if len(value) == 1:
min_value = min(value[0], min_value)
max_value = max(value[0], max_value)
elif len(value) == 2:
start, end = value
if start > end:
# Swap start and end, since they seem reversed
start, end = end, start
value = start, end
min_value = min(start, min_value)
max_value = max(end, max_value)
else:
# Empty list, so let's just use the outer bounds
value = [min_value, max_value]
# Bounds checks. JSNumber produces human-readable exceptions that
# we simply re-package as StreamlitAPIExceptions.
# (We check `min_value` and `max_value` here; `value` and `step` are
# already known to be in the [min_value, max_value] range.)
try:
if all_ints:
JSNumber.validate_int_bounds(min_value, "`min_value`")
JSNumber.validate_int_bounds(max_value, "`max_value`")
elif all_floats:
JSNumber.validate_float_bounds(min_value, "`min_value`")
JSNumber.validate_float_bounds(max_value, "`max_value`")
elif all_timelikes:
# No validation yet. TODO: check between 0001-01-01 to 9999-12-31
pass
except JSNumberBoundsException as e:
raise StreamlitAPIException(str(e))
orig_tz = None
# Convert dates or times into datetimes
if data_type == SliderProto.TIME:
value = list(map(_time_to_datetime, value))
min_value = _time_to_datetime(min_value)
max_value = _time_to_datetime(max_value)
if data_type == SliderProto.DATE:
value = list(map(_date_to_datetime, value))
min_value = _date_to_datetime(min_value)
max_value = _date_to_datetime(max_value)
# Now, convert to microseconds (so we can serialize datetime to a long)
if data_type in TIMELIKE_TYPES:
# Restore times/datetimes to original timezone (dates are always naive)
orig_tz = (
value[0].tzinfo
if data_type in (SliderProto.TIME, SliderProto.DATETIME)
else None
)
value = list(map(_datetime_to_micros, value))
min_value = _datetime_to_micros(min_value)
max_value = _datetime_to_micros(max_value)
step = _delta_to_micros(cast(timedelta, step))
# It would be great if we could guess the number of decimal places from
# the `step` argument, but this would only be meaningful if step were a
# decimal. As a possible improvement we could make this function accept
# decimals and/or use some heuristics for floats.
slider_proto = SliderProto()
slider_proto.label = label
slider_proto.format = format
slider_proto.default[:] = value
slider_proto.min = min_value
slider_proto.max = max_value
slider_proto.step = cast(float, step)
slider_proto.data_type = data_type
slider_proto.options[:] = []
slider_proto.form_id = current_form_id(self.dg)
if help is not None:
slider_proto.help = dedent(help)
serde = SliderSerde(value, data_type, single_value, orig_tz)
widget_state = register_widget(
"slider",
slider_proto,
user_key=key,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
)
# This needs to be done after register_widget because we don't want
# the following proto fields to affect a widget's ID.
slider_proto.disabled = disabled
slider_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if widget_state.value_changed:
slider_proto.value[:] = serde.serialize(widget_state.value)
slider_proto.set_value = True
self.dg._enqueue("slider", slider_proto)
return cast(SliderReturn, widget_state.value)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/slider.py | 0.856197 | 0.269692 | slider.py | pypi |
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, Optional, cast
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.proto.Checkbox_pb2 import Checkbox as CheckboxProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.type_util import Key, LabelVisibility, maybe_raise_label_warnings, to_key
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
@dataclass
class CheckboxSerde:
value: bool
def serialize(self, v: bool) -> bool:
return bool(v)
def deserialize(self, ui_value: Optional[bool], widget_id: str = "") -> bool:
return bool(ui_value if ui_value is not None else self.value)
class CheckboxMixin:
@gather_metrics("checkbox")
def checkbox(
self,
label: str,
value: bool = False,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
) -> bool:
"""Display a checkbox widget.
Parameters
----------
label : str
A short label explaining to the user what this checkbox is for.
The label can optionally contain Markdown and supports the following
elements: Bold, Italics, Strikethroughs, Inline Code, Emojis, and Links.
value : bool
Preselect the checkbox when it first renders. This will be
cast to bool internally.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. Multiple widgets of the same type may
not share the same key.
help : str
An optional tooltip that gets displayed next to the checkbox.
on_change : callable
An optional callback invoked when this checkbox's value changes.
args : tuple
An optional tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean, which disables the checkbox if set to True.
The default is False. This argument can only be supplied by keyword.
label_visibility : "visible" or "hidden" or "collapsed"
The visibility of the label. If "hidden", the label doesn't show but there
is still empty space for it (equivalent to label="").
If "collapsed", both the label and the space are removed. Default is
"visible". This argument can only be supplied by keyword.
Returns
-------
bool
Whether or not the checkbox is checked.
Example
-------
>>> import streamlit as st
>>>
>>> agree = st.checkbox('I agree')
>>>
>>> if agree:
... st.write('Great!')
.. output::
https://doc-checkbox.streamlitapp.com/
height: 220px
"""
ctx = get_script_run_ctx()
return self._checkbox(
label=label,
value=value,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
label_visibility=label_visibility,
ctx=ctx,
)
def _checkbox(
self,
label: str,
value: bool = False,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
ctx: Optional[ScriptRunContext] = None,
) -> bool:
key = to_key(key)
check_callback_rules(self.dg, on_change)
check_session_state_rules(
default_value=None if value is False else value, key=key
)
maybe_raise_label_warnings(label, label_visibility)
checkbox_proto = CheckboxProto()
checkbox_proto.label = label
checkbox_proto.default = bool(value)
checkbox_proto.form_id = current_form_id(self.dg)
if help is not None:
checkbox_proto.help = dedent(help)
serde = CheckboxSerde(value)
checkbox_state = register_widget(
"checkbox",
checkbox_proto,
user_key=key,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
)
# This needs to be done after register_widget because we don't want
# the following proto fields to affect a widget's ID.
checkbox_proto.disabled = disabled
checkbox_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if checkbox_state.value_changed:
checkbox_proto.value = checkbox_state.value
checkbox_proto.set_value = True
self.dg._enqueue("checkbox", checkbox_proto)
return checkbox_state.value
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/checkbox.py | 0.942665 | 0.198666 | checkbox.py | pypi |
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Generic, Optional, Sequence, cast
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Selectbox_pb2 import Selectbox as SelectboxProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.type_util import (
Key,
LabelVisibility,
OptionSequence,
T,
ensure_indexable,
maybe_raise_label_warnings,
to_key,
)
from streamlit.util import index_
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
@dataclass
class SelectboxSerde(Generic[T]):
options: Sequence[T]
index: int
def serialize(self, v: object) -> int:
if len(self.options) == 0:
return 0
return index_(self.options, v)
def deserialize(
self,
ui_value: Optional[int],
widget_id: str = "",
) -> Optional[T]:
idx: int = ui_value if ui_value is not None else self.index
return self.options[idx] if len(self.options) > 0 else None
class SelectboxMixin:
@gather_metrics("selectbox")
def selectbox(
self,
label: str,
options: OptionSequence[T],
index: int = 0,
format_func: Callable[[Any], Any] = str,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
) -> Optional[T]:
"""Display a select widget.
Parameters
----------
label : str
A short label explaining to the user what this select widget is for.
The label can optionally contain Markdown and supports the following
elements: Bold, Italics, Strikethroughs, Inline Code, Emojis, and Links.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
For accessibility reasons, you should never set an empty label (label="")
but hide it with label_visibility if needed. In the future, we may disallow
empty labels by raising an exception.
options : Sequence, numpy.ndarray, pandas.Series, pandas.DataFrame, or pandas.Index
Labels for the select options. This will be cast to str internally
by default. For pandas.DataFrame, the first column is selected.
index : int
The index of the preselected option on first render.
format_func : function
Function to modify the display of the labels. It receives the option
as an argument and its output will be cast to str.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. Multiple widgets of the same type may
not share the same key.
help : str
An optional tooltip that gets displayed next to the selectbox.
on_change : callable
An optional callback invoked when this selectbox's value changes.
args : tuple
An optional tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean, which disables the selectbox if set to True.
The default is False. This argument can only be supplied by keyword.
label_visibility : "visible" or "hidden" or "collapsed"
The visibility of the label. If "hidden", the label doesn't show but there
is still empty space for it above the widget (equivalent to label="").
If "collapsed", both the label and the space are removed. Default is
"visible". This argument can only be supplied by keyword.
Returns
-------
any
The selected option
Example
-------
>>> import streamlit as st
>>>
>>> option = st.selectbox(
... 'How would you like to be contacted?',
... ('Email', 'Home phone', 'Mobile phone'))
>>>
>>> st.write('You selected:', option)
.. output::
https://doc-selectbox.streamlitapp.com/
height: 320px
"""
ctx = get_script_run_ctx()
return self._selectbox(
label=label,
options=options,
index=index,
format_func=format_func,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
label_visibility=label_visibility,
ctx=ctx,
)
def _selectbox(
self,
label: str,
options: OptionSequence[T],
index: int = 0,
format_func: Callable[[Any], Any] = str,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
ctx: Optional[ScriptRunContext] = None,
) -> Optional[T]:
key = to_key(key)
check_callback_rules(self.dg, on_change)
check_session_state_rules(default_value=None if index == 0 else index, key=key)
maybe_raise_label_warnings(label, label_visibility)
opt = ensure_indexable(options)
if not isinstance(index, int):
raise StreamlitAPIException(
"Selectbox Value has invalid type: %s" % type(index).__name__
)
if len(opt) > 0 and not 0 <= index < len(opt):
raise StreamlitAPIException(
"Selectbox index must be between 0 and length of options"
)
selectbox_proto = SelectboxProto()
selectbox_proto.label = label
selectbox_proto.default = index
selectbox_proto.options[:] = [str(format_func(option)) for option in opt]
selectbox_proto.form_id = current_form_id(self.dg)
if help is not None:
selectbox_proto.help = dedent(help)
serde = SelectboxSerde(opt, index)
widget_state = register_widget(
"selectbox",
selectbox_proto,
user_key=key,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
)
# This needs to be done after register_widget because we don't want
# the following proto fields to affect a widget's ID.
selectbox_proto.disabled = disabled
selectbox_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if widget_state.value_changed:
selectbox_proto.value = serde.serialize(widget_state.value)
selectbox_proto.set_value = True
self.dg._enqueue("selectbox", selectbox_proto)
return widget_state.value
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/selectbox.py | 0.942507 | 0.268962 | selectbox.py | pypi |
from dataclasses import dataclass
from datetime import date, datetime, time
from textwrap import dedent
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, Tuple, Union, cast
from dateutil import relativedelta
from typing_extensions import TypeAlias
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.DateInput_pb2 import DateInput as DateInputProto
from streamlit.proto.TimeInput_pb2 import TimeInput as TimeInputProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.type_util import Key, LabelVisibility, maybe_raise_label_warnings, to_key
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
TimeValue: TypeAlias = Union[time, datetime, None]
SingleDateValue: TypeAlias = Union[date, datetime, None]
DateValue: TypeAlias = Union[SingleDateValue, Sequence[SingleDateValue]]
DateWidgetReturn: TypeAlias = Union[date, Tuple[()], Tuple[date], Tuple[date, date]]
def _parse_date_value(value: DateValue) -> Tuple[List[date], bool]:
parsed_dates: List[date]
range_value: bool = False
if value is None:
# Set value default.
parsed_dates = [datetime.now().date()]
elif isinstance(value, datetime):
parsed_dates = [value.date()]
elif isinstance(value, date):
parsed_dates = [value]
elif isinstance(value, (list, tuple)):
if not len(value) in (0, 1, 2):
raise StreamlitAPIException(
"DateInput value should either be an date/datetime or a list/tuple of "
"0 - 2 date/datetime values"
)
parsed_dates = [v.date() if isinstance(v, datetime) else v for v in value]
range_value = True
else:
raise StreamlitAPIException(
"DateInput value should either be an date/datetime or a list/tuple of "
"0 - 2 date/datetime values"
)
return parsed_dates, range_value
def _parse_min_date(
min_value: SingleDateValue,
parsed_dates: Sequence[date],
) -> date:
parsed_min_date: date
if isinstance(min_value, datetime):
parsed_min_date = min_value.date()
elif isinstance(min_value, date):
parsed_min_date = min_value
elif min_value is None:
if parsed_dates:
parsed_min_date = parsed_dates[0] - relativedelta.relativedelta(years=10)
else:
parsed_min_date = date.today() - relativedelta.relativedelta(years=10)
else:
raise StreamlitAPIException(
"DateInput min should either be a date/datetime or None"
)
return parsed_min_date
def _parse_max_date(
max_value: SingleDateValue,
parsed_dates: Sequence[date],
) -> date:
parsed_max_date: date
if isinstance(max_value, datetime):
parsed_max_date = max_value.date()
elif isinstance(max_value, date):
parsed_max_date = max_value
elif max_value is None:
if parsed_dates:
parsed_max_date = parsed_dates[-1] + relativedelta.relativedelta(years=10)
else:
parsed_max_date = date.today() + relativedelta.relativedelta(years=10)
else:
raise StreamlitAPIException(
"DateInput max should either be a date/datetime or None"
)
return parsed_max_date
@dataclass(frozen=True)
class _DateInputValues:
value: Sequence[date]
is_range: bool
max: date
min: date
@classmethod
def from_raw_values(
cls,
value: DateValue,
min_value: SingleDateValue,
max_value: SingleDateValue,
) -> "_DateInputValues":
parsed_value, is_range = _parse_date_value(value=value)
return cls(
value=parsed_value,
is_range=is_range,
min=_parse_min_date(
min_value=min_value,
parsed_dates=parsed_value,
),
max=_parse_max_date(
max_value=max_value,
parsed_dates=parsed_value,
),
)
def __post_init__(self) -> None:
if self.min > self.max:
raise StreamlitAPIException(
f"The `min_value`, set to {self.min}, shouldn't be larger "
f"than the `max_value`, set to {self.max}."
)
if self.value:
start_value = self.value[0]
end_value = self.value[-1]
if (start_value < self.min) or (end_value > self.max):
raise StreamlitAPIException(
f"The default `value` of {self.value} "
f"must lie between the `min_value` of {self.min} "
f"and the `max_value` of {self.max}, inclusively."
)
@dataclass
class TimeInputSerde:
value: time
def deserialize(self, ui_value: Optional[str], widget_id: Any = "") -> time:
return (
datetime.strptime(ui_value, "%H:%M").time()
if ui_value is not None
else self.value
)
def serialize(self, v: Union[datetime, time]) -> str:
if isinstance(v, datetime):
v = v.time()
return time.strftime(v, "%H:%M")
@dataclass
class DateInputSerde:
value: _DateInputValues
def deserialize(
self,
ui_value: Any,
widget_id: str = "",
) -> DateWidgetReturn:
return_value: Sequence[date]
if ui_value is not None:
return_value = tuple(
datetime.strptime(v, "%Y/%m/%d").date() for v in ui_value
)
else:
return_value = self.value.value
if not self.value.is_range:
return return_value[0]
return cast(DateWidgetReturn, tuple(return_value))
def serialize(self, v: DateWidgetReturn) -> List[str]:
to_serialize = list(v) if isinstance(v, (list, tuple)) else [v]
return [date.strftime(v, "%Y/%m/%d") for v in to_serialize]
class TimeWidgetsMixin:
@gather_metrics("time_input")
def time_input(
self,
label: str,
value: TimeValue = None,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
) -> time:
"""Display a time input widget.
Parameters
----------
label : str
A short label explaining to the user what this time input is for.
The label can optionally contain Markdown and supports the following
elements: Bold, Italics, Strikethroughs, Inline Code, Emojis, and Links.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
For accessibility reasons, you should never set an empty label (label="")
but hide it with label_visibility if needed. In the future, we may disallow
empty labels by raising an exception.
value : datetime.time/datetime.datetime
The value of this widget when it first renders. This will be
cast to str internally. Defaults to the current time.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. Multiple widgets of the same type may
not share the same key.
help : str
An optional tooltip that gets displayed next to the input.
on_change : callable
An optional callback invoked when this time_input's value changes.
args : tuple
An optional tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean, which disables the time input if set to True.
The default is False. This argument can only be supplied by keyword.
label_visibility : "visible" or "hidden" or "collapsed"
The visibility of the label. If "hidden", the label doesn't show but there
is still empty space for it above the widget (equivalent to label="").
If "collapsed", both the label and the space are removed. Default is
"visible". This argument can only be supplied by keyword.
Returns
-------
datetime.time
The current value of the time input widget.
Example
-------
>>> import datetime
>>> import streamlit as st
>>>
>>> t = st.time_input('Set an alarm for', datetime.time(8, 45))
>>> st.write('Alarm is set for', t)
.. output::
https://doc-time-input.streamlitapp.com/
height: 260px
"""
ctx = get_script_run_ctx()
return self._time_input(
label=label,
value=value,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
label_visibility=label_visibility,
ctx=ctx,
)
def _time_input(
self,
label: str,
value: Union[time, datetime, None] = None,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
ctx: Optional[ScriptRunContext] = None,
) -> time:
key = to_key(key)
check_callback_rules(self.dg, on_change)
check_session_state_rules(default_value=value, key=key)
maybe_raise_label_warnings(label, label_visibility)
parsed_time: time
if value is None:
# Set value default.
parsed_time = datetime.now().time().replace(second=0, microsecond=0)
elif isinstance(value, datetime):
parsed_time = value.time().replace(second=0, microsecond=0)
elif isinstance(value, time):
parsed_time = value
else:
raise StreamlitAPIException(
"The type of value should be one of datetime, time or None"
)
del value
time_input_proto = TimeInputProto()
time_input_proto.label = label
time_input_proto.default = time.strftime(parsed_time, "%H:%M")
time_input_proto.form_id = current_form_id(self.dg)
if help is not None:
time_input_proto.help = dedent(help)
serde = TimeInputSerde(parsed_time)
widget_state = register_widget(
"time_input",
time_input_proto,
user_key=key,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
)
# This needs to be done after register_widget because we don't want
# the following proto fields to affect a widget's ID.
time_input_proto.disabled = disabled
time_input_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if widget_state.value_changed:
time_input_proto.value = serde.serialize(widget_state.value)
time_input_proto.set_value = True
self.dg._enqueue("time_input", time_input_proto)
return widget_state.value
@gather_metrics("date_input")
def date_input(
self,
label: str,
value: DateValue = None,
min_value: SingleDateValue = None,
max_value: SingleDateValue = None,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
) -> DateWidgetReturn:
r"""Display a date input widget.
Parameters
----------
label : str
A short label explaining to the user what this date input is for.
The label can optionally contain Markdown and supports the following
elements: Bold, Italics, Strikethroughs, Inline Code, Emojis, and Links.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
For accessibility reasons, you should never set an empty label (label="")
but hide it with label_visibility if needed. In the future, we may disallow
empty labels by raising an exception.
value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None
The value of this widget when it first renders. If a list/tuple with
0 to 2 date/datetime values is provided, the datepicker will allow
users to provide a range. Defaults to today as a single-date picker.
min_value : datetime.date or datetime.datetime
The minimum selectable date. If value is a date, defaults to value - 10 years.
If value is the interval [start, end], defaults to start - 10 years.
max_value : datetime.date or datetime.datetime
The maximum selectable date. If value is a date, defaults to value + 10 years.
If value is the interval [start, end], defaults to end + 10 years.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. Multiple widgets of the same type may
not share the same key.
help : str
An optional tooltip that gets displayed next to the input.
on_change : callable
An optional callback invoked when this date_input's value changes.
args : tuple
An optional tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean, which disables the date input if set to True.
The default is False. This argument can only be supplied by keyword.
label_visibility : "visible" or "hidden" or "collapsed"
The visibility of the label. If "hidden", the label doesn't show but there
is still empty space for it above the widget (equivalent to label="").
If "collapsed", both the label and the space are removed. Default is
"visible". This argument can only be supplied by keyword.
Returns
-------
datetime.date or a tuple with 0-2 dates
The current value of the date input widget.
Example
-------
>>> import datetime
>>> import streamlit as st
>>>
>>> d = st.date_input(
... "When\'s your birthday",
... datetime.date(2019, 7, 6))
>>> st.write('Your birthday is:', d)
.. output::
https://doc-date-input.streamlitapp.com/
height: 260px
"""
ctx = get_script_run_ctx()
return self._date_input(
label=label,
value=value,
min_value=min_value,
max_value=max_value,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
label_visibility=label_visibility,
ctx=ctx,
)
def _date_input(
self,
label: str,
value: DateValue = None,
min_value: SingleDateValue = None,
max_value: SingleDateValue = None,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
ctx: Optional[ScriptRunContext] = None,
) -> DateWidgetReturn:
key = to_key(key)
check_callback_rules(self.dg, on_change)
check_session_state_rules(default_value=value, key=key)
maybe_raise_label_warnings(label, label_visibility)
parsed_values = _DateInputValues.from_raw_values(
value=value,
min_value=min_value,
max_value=max_value,
)
del value, min_value, max_value
date_input_proto = DateInputProto()
date_input_proto.is_range = parsed_values.is_range
if help is not None:
date_input_proto.help = dedent(help)
date_input_proto.label = label
date_input_proto.default[:] = [
date.strftime(v, "%Y/%m/%d") for v in parsed_values.value
]
date_input_proto.min = date.strftime(parsed_values.min, "%Y/%m/%d")
date_input_proto.max = date.strftime(parsed_values.max, "%Y/%m/%d")
date_input_proto.form_id = current_form_id(self.dg)
serde = DateInputSerde(parsed_values)
widget_state = register_widget(
"date_input",
date_input_proto,
user_key=key,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
)
# This needs to be done after register_widget because we don't want
# the following proto fields to affect a widget's ID.
date_input_proto.disabled = disabled
date_input_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if widget_state.value_changed:
date_input_proto.value[:] = serde.serialize(widget_state.value)
date_input_proto.set_value = True
self.dg._enqueue("date_input", date_input_proto)
return widget_state.value
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/time_widgets.py | 0.891805 | 0.240579 | time_widgets.py | pypi |
import json
from typing import TYPE_CHECKING, Any, Dict, Optional, cast
from typing_extensions import Final
import streamlit.elements.legacy_data_frame as data_frame
import streamlit.elements.lib.dicttools as dicttools
from streamlit.logger import get_logger
from streamlit.proto.VegaLiteChart_pb2 import VegaLiteChart as VegaLiteChartProto
from streamlit.runtime.metrics_util import gather_metrics
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
from streamlit.elements.arrow import Data
LOGGER: Final = get_logger(__name__)
class LegacyVegaLiteMixin:
@gather_metrics("_legacy_vega_lite_chart")
def _legacy_vega_lite_chart(
self,
data: "Data" = None,
spec: Optional[Dict[str, Any]] = None,
use_container_width: bool = False,
**kwargs: Any,
) -> "DeltaGenerator":
"""Display a chart using the Vega-Lite library.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict,
or None
Either the data to be plotted or a Vega-Lite spec containing the
data (which more closely follows the Vega-Lite API).
spec : dict or None
The Vega-Lite spec for the chart. If the spec was already passed in
the previous argument, this must be set to None. See
https://vega.github.io/vega-lite/docs/ for more info.
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over Vega-Lite's native `width` value.
**kwargs : any
Same as spec, but as keywords.
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>>
>>> df = pd.DataFrame(
... np.random.randn(200, 3),
... columns=['a', 'b', 'c'])
>>>
>>> st._legacy_vega_lite_chart(df, {
... 'mark': {'type': 'circle', 'tooltip': True},
... 'encoding': {
... 'x': {'field': 'a', 'type': 'quantitative'},
... 'y': {'field': 'b', 'type': 'quantitative'},
... 'size': {'field': 'c', 'type': 'quantitative'},
... 'color': {'field': 'c', 'type': 'quantitative'},
... },
... })
.. output::
https://static.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5
height: 200px
Examples of Vega-Lite usage without Streamlit can be found at
https://vega.github.io/vega-lite/examples/. Most of those can be easily
translated to the syntax shown above.
"""
vega_lite_chart_proto = VegaLiteChartProto()
marshall(
vega_lite_chart_proto,
data,
spec,
use_container_width=use_container_width,
**kwargs,
)
return self.dg._enqueue("vega_lite_chart", vega_lite_chart_proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
def marshall(
proto: VegaLiteChartProto,
data: "Data" = None,
spec: Optional[Dict[str, Any]] = None,
use_container_width: bool = False,
**kwargs: Any,
) -> None:
"""Construct a Vega-Lite chart object.
See DeltaGenerator._legacy_vega_lite_chart for docs.
"""
# Support passing data inside spec['datasets'] and spec['data'].
# (The data gets pulled out of the spec dict later on.)
if isinstance(data, dict) and spec is None:
spec = data
data = None
# Support passing no spec arg, but filling it with kwargs.
# Example:
# marshall(proto, baz='boz')
if spec is None:
spec = dict()
else:
# Clone the spec dict, since we may be mutating it.
spec = dict(spec)
# Support passing in kwargs. Example:
# marshall(proto, {foo: 'bar'}, baz='boz')
if len(kwargs):
# Merge spec with unflattened kwargs, where kwargs take precedence.
# This only works for string keys, but kwarg keys are strings anyways.
spec = dict(spec, **dicttools.unflatten(kwargs, _CHANNELS))
if len(spec) == 0:
raise ValueError("Vega-Lite charts require a non-empty spec dict.")
if "autosize" not in spec:
spec["autosize"] = {"type": "fit", "contains": "padding"}
# Pull data out of spec dict when it's in a 'dataset' key:
# marshall(proto, {datasets: {foo: df1, bar: df2}, ...})
if "datasets" in spec:
for k, v in spec["datasets"].items():
dataset = proto.datasets.add()
dataset.name = str(k)
dataset.has_name = True
data_frame.marshall_data_frame(v, dataset.data)
del spec["datasets"]
# Pull data out of spec dict when it's in a top-level 'data' key:
# marshall(proto, {data: df})
# marshall(proto, {data: {values: df, ...}})
# marshall(proto, {data: {url: 'url'}})
# marshall(proto, {data: {name: 'foo'}})
if "data" in spec:
data_spec = spec["data"]
if isinstance(data_spec, dict):
if "values" in data_spec:
data = data_spec["values"]
del spec["data"]
else:
data = data_spec
del spec["data"]
proto.spec = json.dumps(spec)
proto.use_container_width = use_container_width
if data is not None:
data_frame.marshall_data_frame(data, proto.data)
# See https://vega.github.io/vega-lite/docs/encoding.html
_CHANNELS: Final = {
"x",
"y",
"x2",
"y2",
"xError",
"yError2",
"xError",
"yError2",
"longitude",
"latitude",
"color",
"opacity",
"fillOpacity",
"strokeOpacity",
"strokeWidth",
"size",
"shape",
"text",
"tooltip",
"href",
"key",
"order",
"detail",
"facet",
"row",
"column",
} | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/legacy_vega_lite.py | 0.857067 | 0.291082 | legacy_vega_lite.py | pypi |
import contextlib
import inspect
from typing import TYPE_CHECKING, Any, cast
from typing_extensions import Final
from streamlit.logger import get_logger
from streamlit.proto.DocString_pb2 import DocString as DocStringProto
from streamlit.runtime.metrics_util import gather_metrics
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
LOGGER: Final = get_logger(__name__)
CONFUSING_STREAMLIT_MODULES: Final = (
"streamlit.echo",
"streamlit.delta_generator",
"streamlit.runtime.legacy_caching.caching",
)
CONFUSING_STREAMLIT_SIG_PREFIXES: Final = ("(element, ",)
class HelpMixin:
@gather_metrics("help")
def help(self, obj: Any) -> "DeltaGenerator":
"""Display object's doc string, nicely formatted.
Displays the doc string for this object.
Parameters
----------
obj : Object
The object whose docstring should be displayed.
Example
-------
Don't remember how to initialize a dataframe? Try this:
>>> import streamlit as st
>>> import pandas
>>>
>>> st.help(pandas.DataFrame)
Want to quickly check what datatype is output by a certain function?
Try:
>>> import streamlit as st
>>>
>>> x = my_poorly_documented_function()
>>> st.help(x)
"""
doc_string_proto = DocStringProto()
_marshall(doc_string_proto, obj)
return self.dg._enqueue("doc_string", doc_string_proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
def _marshall(doc_string_proto: DocStringProto, obj: Any) -> None:
"""Construct a DocString object.
See DeltaGenerator.help for docs.
"""
try:
doc_string_proto.name = obj.__name__
except AttributeError:
# Some objects might not have a __name__ attribute.
# In that case we just don't set the name.
pass
module_name = getattr(obj, "__module__", None)
if module_name in CONFUSING_STREAMLIT_MODULES:
doc_string_proto.module = "streamlit"
elif module_name is not None:
doc_string_proto.module = module_name
else:
# Leave doc_string_proto.module as an empty string (default value).
pass
obj_type = type(obj)
doc_string_proto.type = str(obj_type)
if callable(obj):
doc_string_proto.signature = _get_signature(obj)
doc_string = inspect.getdoc(obj)
# Sometimes an object has no docstring, but the object's type does.
# If that's the case here, use the type's docstring.
# For objects where type is type we do not print the docs.
# We also do not print the docs for functions and methods if
# the docstring is empty.
if (
doc_string is None
and obj_type is not type
and not inspect.isfunction(obj)
and not inspect.ismethod(obj)
):
doc_string = inspect.getdoc(obj_type)
if doc_string is None:
doc_string = "No docs available."
doc_string_proto.doc_string = doc_string
def _get_signature(f):
is_delta_gen = False
with contextlib.suppress(AttributeError):
is_delta_gen = f.__module__ == "streamlit.delta_generator"
# Functions such as numpy.minimum don't have a __module__ attribute,
# since we're only using it to check if its a DeltaGenerator, its ok
# to continue
sig = ""
with contextlib.suppress(ValueError):
sig = str(inspect.signature(f))
if is_delta_gen:
for prefix in CONFUSING_STREAMLIT_SIG_PREFIXES:
if sig.startswith(prefix):
sig = sig.replace(prefix, "(")
break
return sig | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/doc_string.py | 0.72662 | 0.198841 | doc_string.py | pypi |
import inspect
import json as json
import types
from typing import TYPE_CHECKING, Any, List, Tuple, Type, cast
import numpy as np
from typing_extensions import Final
from streamlit import type_util
from streamlit.errors import StreamlitAPIException
from streamlit.logger import get_logger
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.state import SessionStateProxy
from streamlit.user_info import UserInfoProxy
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
# Special methods:
HELP_TYPES: Final[Tuple[Type[Any], ...]] = (
types.BuiltinFunctionType,
types.BuiltinMethodType,
types.FunctionType,
types.MethodType,
types.ModuleType,
)
_LOGGER = get_logger(__name__)
class WriteMixin:
@gather_metrics("write")
def write(self, *args: Any, unsafe_allow_html: bool = False, **kwargs) -> None:
"""Write arguments to the app.
This is the Swiss Army knife of Streamlit commands: it does different
things depending on what you throw at it. Unlike other Streamlit commands,
write() has some unique properties:
1. You can pass in multiple arguments, all of which will be written.
2. Its behavior depends on the input types as follows.
3. It returns None, so its "slot" in the App cannot be reused.
Parameters
----------
*args : any
One or many objects to print to the App.
Arguments are handled as follows:
- write(string) : Prints the formatted Markdown string, with
support for LaTeX expression, emoji shortcodes, and colored text.
See docs for st.markdown for more.
- write(data_frame) : Displays the DataFrame as a table.
- write(error) : Prints an exception specially.
- write(func) : Displays information about a function.
- write(module) : Displays information about the module.
- write(dict) : Displays dict in an interactive widget.
- write(mpl_fig) : Displays a Matplotlib figure.
- write(altair) : Displays an Altair chart.
- write(keras) : Displays a Keras model.
- write(graphviz) : Displays a Graphviz graph.
- write(plotly_fig) : Displays a Plotly figure.
- write(bokeh_fig) : Displays a Bokeh figure.
- write(sympy_expr) : Prints SymPy expression using LaTeX.
- write(htmlable) : Prints _repr_html_() for the object if available.
- write(obj) : Prints str(obj) if otherwise unknown.
unsafe_allow_html : bool
This is a keyword-only argument that defaults to False.
By default, any HTML tags found in strings will be escaped and
therefore treated as pure text. This behavior may be turned off by
setting this argument to True.
That said, *we strongly advise against it*. It is hard to write secure
HTML, so by using this argument you may be compromising your users'
security. For more information, see:
https://github.com/streamlit/streamlit/issues/152
Example
-------
Its basic use case is to draw Markdown-formatted text, whenever the
input is a string:
>>> import streamlit as st
>>>
>>> st.write('Hello, *World!* :sunglasses:')
.. output::
https://doc-write1.streamlitapp.com/
height: 150px
As mentioned earlier, `st.write()` also accepts other data formats, such as
numbers, data frames, styled data frames, and assorted objects:
>>> import streamlit as st
>>> import pandas as pd
>>>
>>> st.write(1234)
>>> st.write(pd.DataFrame({
... 'first column': [1, 2, 3, 4],
... 'second column': [10, 20, 30, 40],
... }))
.. output::
https://doc-write2.streamlitapp.com/
height: 350px
Finally, you can pass in multiple arguments to do things like:
>>> import streamlit as st
>>>
>>> st.write('1 + 1 = ', 2)
>>> st.write('Below is a DataFrame:', data_frame, 'Above is a dataframe.')
.. output::
https://doc-write3.streamlitapp.com/
height: 410px
Oh, one more thing: `st.write` accepts chart objects too! For example:
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>> import altair as alt
>>>
>>> df = pd.DataFrame(
... np.random.randn(200, 3),
... columns=['a', 'b', 'c'])
...
>>> c = alt.Chart(df).mark_circle().encode(
... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])
>>>
>>> st.write(c)
.. output::
https://doc-vega-lite-chart.streamlitapp.com/
height: 300px
"""
if kwargs:
_LOGGER.warning(
'Invalid arguments were passed to "st.write" function. Support for '
"passing such unknown keywords arguments will be dropped in future. "
"Invalid arguments were: %s",
kwargs,
)
string_buffer: List[str] = []
# This bans some valid cases like: e = st.empty(); e.write("a", "b").
# BUT: 1) such cases are rare, 2) this rule is easy to understand,
# and 3) this rule should be removed once we have st.container()
if not self.dg._is_top_level and len(args) > 1:
raise StreamlitAPIException(
"Cannot replace a single element with multiple elements.\n\n"
"The `write()` method only supports multiple elements when "
"inserting elements rather than replacing. That is, only "
"when called as `st.write()` or `st.sidebar.write()`."
)
def flush_buffer():
if string_buffer:
self.dg.markdown(
" ".join(string_buffer),
unsafe_allow_html=unsafe_allow_html,
)
string_buffer[:] = []
for arg in args:
# Order matters!
if isinstance(arg, str):
string_buffer.append(arg)
elif type_util.is_snowpark_or_pyspark_data_object(arg):
flush_buffer()
self.dg.dataframe(arg)
elif type_util.is_dataframe_like(arg):
flush_buffer()
if len(np.shape(arg)) > 2:
self.dg.text(arg)
else:
self.dg.dataframe(arg)
elif isinstance(arg, Exception):
flush_buffer()
self.dg.exception(arg)
elif isinstance(arg, HELP_TYPES):
flush_buffer()
self.dg.help(arg)
elif type_util.is_altair_chart(arg):
flush_buffer()
self.dg.altair_chart(arg)
elif type_util.is_type(arg, "matplotlib.figure.Figure"):
flush_buffer()
self.dg.pyplot(arg)
elif type_util.is_plotly_chart(arg):
flush_buffer()
self.dg.plotly_chart(arg)
elif type_util.is_type(arg, "bokeh.plotting.figure.Figure"):
flush_buffer()
self.dg.bokeh_chart(arg)
elif type_util.is_graphviz_chart(arg):
flush_buffer()
self.dg.graphviz_chart(arg)
elif type_util.is_sympy_expession(arg):
flush_buffer()
self.dg.latex(arg)
elif type_util.is_keras_model(arg):
from tensorflow.python.keras.utils import vis_utils
flush_buffer()
dot = vis_utils.model_to_dot(arg)
self.dg.graphviz_chart(dot.to_string())
elif isinstance(arg, (dict, list, SessionStateProxy, UserInfoProxy)):
flush_buffer()
self.dg.json(arg)
elif type_util.is_namedtuple(arg):
flush_buffer()
self.dg.json(json.dumps(arg._asdict()))
elif type_util.is_pydeck(arg):
flush_buffer()
self.dg.pydeck_chart(arg)
elif inspect.isclass(arg):
flush_buffer()
self.dg.text(arg)
elif hasattr(arg, "_repr_html_"):
self.dg.markdown(
arg._repr_html_(),
unsafe_allow_html=True,
)
else:
string_buffer.append("`%s`" % str(arg).replace("`", "\\`"))
flush_buffer()
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/write.py | 0.830044 | 0.292065 | write.py | pypi |
from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence, Union, cast
from typing_extensions import Literal
from streamlit import config
from streamlit.runtime.metrics_util import gather_metrics
if TYPE_CHECKING:
from altair.vegalite.v4.api import Chart
from streamlit.delta_generator import DeltaGenerator
from streamlit.elements.arrow import Data
def _use_arrow() -> bool:
"""True if we're using Apache Arrow for DataFrame serialization."""
# Explicitly coerce to bool here because mypy is (incorrectly) complaining
# that we're trying to return 'Any'.
return bool(config.get_option("global.dataFrameSerialization") == "arrow")
class DataFrameSelectorMixin:
@gather_metrics("dataframe")
def dataframe(
self,
data: "Data" = None,
width: Optional[int] = None,
height: Optional[int] = None,
*,
use_container_width: bool = False,
) -> "DeltaGenerator":
"""Display a dataframe as an interactive table.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, pyspark.sql.DataFrame, snowflake.snowpark.dataframe.DataFrame, snowflake.snowpark.table.Table, Iterable, dict, or None
The data to display.
If 'data' is a pandas.Styler, it will be used to style its
underlying DataFrame. Streamlit supports custom cell
values and colors. (It does not support some of the more exotic
pandas styling features, like bar charts, hovering, and captions.)
Styler support is experimental!
Pyarrow tables are not supported by Streamlit's legacy DataFrame serialization
(i.e. with `config.dataFrameSerialization = "legacy"`).
To use pyarrow tables, please enable pyarrow by changing the config setting,
`config.dataFrameSerialization = "arrow"`.
width : int or None
Desired width of the dataframe expressed in pixels. If None, the width
will be automatically calculated based on the column content.
height : int or None
Desired height of the dataframe expressed in pixels. If None, a
default height is used.
use_container_width : bool
If True, set the dataframe width to the width of the parent container.
This takes precedence over the width argument.
This argument can only be supplied by keyword.
Examples
--------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>>
>>> df = pd.DataFrame(
... np.random.randn(50, 20),
... columns=('col %d' % i for i in range(20)))
...
>>> st.dataframe(df) # Same as st.write(df)
.. output::
https://doc-dataframe.streamlitapp.com/
height: 410px
>>> st.dataframe(df, 200, 100)
You can also pass a Pandas Styler object to change the style of
the rendered DataFrame:
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>>
>>> df = pd.DataFrame(
... np.random.randn(10, 20),
... columns=('col %d' % i for i in range(20)))
...
>>> st.dataframe(df.style.highlight_max(axis=0))
.. output::
https://doc-dataframe1.streamlitapp.com/
height: 410px
"""
if _use_arrow():
return self.dg._arrow_dataframe(
data, width, height, use_container_width=use_container_width
)
else:
return self.dg._legacy_dataframe(data, width, height)
@gather_metrics("table")
def table(self, data: "Data" = None) -> "DeltaGenerator":
"""Display a static table.
This differs from `st.dataframe` in that the table in this case is
static: its entire contents are laid out directly on the page.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, pyspark.sql.DataFrame, snowflake.snowpark.dataframe.DataFrame, snowflake.snowpark.table.Table, Iterable, dict, or None
The table data.
Pyarrow tables are not supported by Streamlit's legacy DataFrame serialization
(i.e. with `config.dataFrameSerialization = "legacy"`).
To use pyarrow tables, please enable pyarrow by changing the config setting,
`config.dataFrameSerialization = "arrow"`.
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>>
>>> df = pd.DataFrame(
... np.random.randn(10, 5),
... columns=('col %d' % i for i in range(5)))
...
>>> st.table(df)
.. output::
https://doc-table.streamlitapp.com/
height: 480px
"""
if _use_arrow():
return self.dg._arrow_table(data)
else:
return self.dg._legacy_table(data)
@gather_metrics("line_chart")
def line_chart(
self,
data: "Data" = None,
*,
x: Union[str, None] = None,
y: Union[str, Sequence[str], None] = None,
width: int = 0,
height: int = 0,
use_container_width: bool = True,
) -> "DeltaGenerator":
"""Display a line chart.
This is syntax-sugar around st.altair_chart. The main difference
is this command uses the data's own column and indices to figure out
the chart's spec. As a result this is easier to use for many "just plot
this" scenarios, while being less customizable.
If st.line_chart does not guess the data specification
correctly, try specifying your desired chart using st.altair_chart.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, pyspark.sql.DataFrame, snowflake.snowpark.dataframe.DataFrame, snowflake.snowpark.table.Table, Iterable, dict or None
Data to be plotted.
Pyarrow tables are not supported by Streamlit's legacy DataFrame serialization
(i.e. with `config.dataFrameSerialization = "legacy"`).
To use pyarrow tables, please enable pyarrow by changing the config setting,
`config.dataFrameSerialization = "arrow"`.
x : str or None
Column name to use for the x-axis. If None, uses the data index for the x-axis.
This argument can only be supplied by keyword.
y : str, sequence of str, or None
Column name(s) to use for the y-axis. If a sequence of strings, draws several series
on the same chart by melting your wide-format table into a long-format table behind
the scenes. If None, draws the data of all remaining columns as data series.
This argument can only be supplied by keyword.
width : int
The chart width in pixels. If 0, selects the width automatically.
This argument can only be supplied by keyword.
height : int
The chart height in pixels. If 0, selects the height automatically.
This argument can only be supplied by keyword.
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over the width argument.
This argument can only be supplied by keyword.
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>>
>>> chart_data = pd.DataFrame(
... np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
...
>>> st.line_chart(chart_data)
.. output::
https://doc-line-chart.streamlitapp.com/
height: 400px
"""
if _use_arrow():
return self.dg._arrow_line_chart(
data,
x=x,
y=y,
width=width,
height=height,
use_container_width=use_container_width,
)
else:
return self.dg._legacy_line_chart(
data,
width=width,
height=height,
use_container_width=use_container_width,
)
@gather_metrics("area_chart")
def area_chart(
self,
data: "Data" = None,
*,
x: Union[str, None] = None,
y: Union[str, Sequence[str], None] = None,
width: int = 0,
height: int = 0,
use_container_width: bool = True,
) -> "DeltaGenerator":
"""Display an area chart.
This is just syntax-sugar around st.altair_chart. The main difference
is this command uses the data's own column and indices to figure out
the chart's spec. As a result this is easier to use for many "just plot
this" scenarios, while being less customizable.
If st.area_chart does not guess the data specification
correctly, try specifying your desired chart using st.altair_chart.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, pyspark.sql.DataFrame, snowflake.snowpark.dataframe.DataFrame, snowflake.snowpark.table.Table, Iterable, or dict
Data to be plotted.
Pyarrow tables are not supported by Streamlit's legacy DataFrame serialization
(i.e. with `config.dataFrameSerialization = "legacy"`).
To use pyarrow tables, please enable pyarrow by changing the config setting,
`config.dataFrameSerialization = "arrow"`.
x : str or None
Column name to use for the x-axis. If None, uses the data index for the x-axis.
This argument can only be supplied by keyword.
y : str, sequence of str, or None
Column name(s) to use for the y-axis. If a sequence of strings, draws several series
on the same chart by melting your wide-format table into a long-format table behind
the scenes. If None, draws the data of all remaining columns as data series.
This argument can only be supplied by keyword.
width : int
The chart width in pixels. If 0, selects the width automatically.
This argument can only be supplied by keyword.
height : int
The chart height in pixels. If 0, selects the height automatically.
This argument can only be supplied by keyword.
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over the width argument.
This argument can only be supplied by keyword.
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>>
>>> chart_data = pd.DataFrame(
... np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
...
>>> st.area_chart(chart_data)
.. output::
https://doc-area-chart.streamlitapp.com/
height: 400px
"""
if _use_arrow():
return self.dg._arrow_area_chart(
data,
x=x,
y=y,
width=width,
height=height,
use_container_width=use_container_width,
)
else:
return self.dg._legacy_area_chart(
data,
width=width,
height=height,
use_container_width=use_container_width,
)
@gather_metrics("bar_chart")
def bar_chart(
self,
data: "Data" = None,
*,
x: Union[str, None] = None,
y: Union[str, Sequence[str], None] = None,
width: int = 0,
height: int = 0,
use_container_width: bool = True,
) -> "DeltaGenerator":
"""Display a bar chart.
This is just syntax-sugar around st.altair_chart. The main difference
is this command uses the data's own column and indices to figure out
the chart's spec. As a result this is easier to use for many "just plot
this" scenarios, while being less customizable.
If st.bar_chart does not guess the data specification
correctly, try specifying your desired chart using st.altair_chart.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, pyspark.sql.DataFrame, snowflake.snowpark.dataframe.DataFrame, snowflake.snowpark.table.Table, Iterable, or dict
Data to be plotted.
Pyarrow tables are not supported by Streamlit's legacy DataFrame serialization
(i.e. with `config.dataFrameSerialization = "legacy"`).
To use pyarrow tables, please enable pyarrow by changing the config setting,
`config.dataFrameSerialization = "arrow"`.
x : str or None
Column name to use for the x-axis. If None, uses the data index for the x-axis.
This argument can only be supplied by keyword.
y : str, sequence of str, or None
Column name(s) to use for the y-axis. If a sequence of strings, draws several series
on the same chart by melting your wide-format table into a long-format table behind
the scenes. If None, draws the data of all remaining columns as data series.
This argument can only be supplied by keyword.
width : int
The chart width in pixels. If 0, selects the width automatically.
This argument can only be supplied by keyword.
height : int
The chart height in pixels. If 0, selects the height automatically.
This argument can only be supplied by keyword.
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over the width argument.
This argument can only be supplied by keyword.
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>>
>>> chart_data = pd.DataFrame(
... np.random.randn(20, 3),
... columns=["a", "b", "c"])
...
>>> st.bar_chart(chart_data)
.. output::
https://doc-bar-chart.streamlitapp.com/
height: 400px
"""
if _use_arrow():
return self.dg._arrow_bar_chart(
data,
x=x,
y=y,
width=width,
height=height,
use_container_width=use_container_width,
)
else:
return self.dg._legacy_bar_chart(
data,
width=width,
height=height,
use_container_width=use_container_width,
)
@gather_metrics("altair_chart")
def altair_chart(
self,
altair_chart: "Chart",
use_container_width: bool = False,
theme: Union[None, Literal["streamlit"]] = "streamlit",
) -> "DeltaGenerator":
"""Display a chart using the Altair library.
Parameters
----------
altair_chart : altair.vegalite.v2.api.Chart
The Altair chart object to display.
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over Altair's native `width` value.
theme : "streamlit" or None
The theme of the chart. Currently, we only support "streamlit" for the Streamlit
defined design or None to fallback to the default behavior of the library.
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>> import altair as alt
>>>
>>> chart_data = pd.DataFrame(
... np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
...
>>> c = alt.Chart(chart_data).mark_circle().encode(
... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])
>>>
>>> st.altair_chart(c, use_container_width=True)
Examples of Altair charts can be found at
https://altair-viz.github.io/gallery/.
.. output::
https://doc-vega-lite-chart.streamlitapp.com/
height: 300px
"""
if _use_arrow():
return self.dg._arrow_altair_chart(altair_chart, use_container_width, theme)
else:
return self.dg._legacy_altair_chart(altair_chart, use_container_width)
@gather_metrics("vega_lite_chart")
def vega_lite_chart(
self,
data: "Data" = None,
spec: Optional[Dict[str, Any]] = None,
use_container_width: bool = False,
theme: Union[None, Literal["streamlit"]] = "streamlit",
**kwargs: Any,
) -> "DeltaGenerator":
"""Display a chart using the Vega-Lite library.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, Iterable, dict, or None
Either the data to be plotted or a Vega-Lite spec containing the
data (which more closely follows the Vega-Lite API).
Pyarrow tables are not supported by Streamlit's legacy DataFrame serialization
(i.e. with `config.dataFrameSerialization = "legacy"`).
To use pyarrow tables, please enable pyarrow by changing the config setting,
`config.dataFrameSerialization = "arrow"`.
spec : dict or None
The Vega-Lite spec for the chart. If the spec was already passed in
the previous argument, this must be set to None. See
https://vega.github.io/vega-lite/docs/ for more info.
use_container_width : bool
If True, set the chart width to the column width. This takes
precedence over Vega-Lite's native `width` value.
theme : "streamlit" or None
The theme of the chart. Currently, we only support "streamlit" for the Streamlit
defined design or None to fallback to the default behavior of the library.
**kwargs : any
Same as spec, but as keywords.
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>>
>>> chart_data = pd.DataFrame(
... np.random.randn(200, 3),
... columns=['a', 'b', 'c'])
>>>
>>> st.vega_lite_chart(chart_data, {
... 'mark': {'type': 'circle', 'tooltip': True},
... 'encoding': {
... 'x': {'field': 'a', 'type': 'quantitative'},
... 'y': {'field': 'b', 'type': 'quantitative'},
... 'size': {'field': 'c', 'type': 'quantitative'},
... 'color': {'field': 'c', 'type': 'quantitative'},
... },
... })
.. output::
https://doc-vega-lite-chart.streamlitapp.com/
height: 300px
Examples of Vega-Lite usage without Streamlit can be found at
https://vega.github.io/vega-lite/examples/. Most of those can be easily
translated to the syntax shown above.
"""
if _use_arrow():
return self.dg._arrow_vega_lite_chart(
data, spec, use_container_width, theme, **kwargs
)
else:
return self.dg._legacy_vega_lite_chart(
data, spec, use_container_width, **kwargs
)
@gather_metrics("add_rows")
def add_rows(self, data: "Data" = None, **kwargs) -> Optional["DeltaGenerator"]:
"""Concatenate a dataframe to the bottom of the current one.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, pyspark.sql.DataFrame, snowflake.snowpark.dataframe.DataFrame, Iterable, dict, or None
Table to concat. Optional.
Pyarrow tables are not supported by Streamlit's legacy DataFrame serialization
(i.e. with `config.dataFrameSerialization = "legacy"`).
To use pyarrow tables, please enable pyarrow by changing the config setting,
`config.dataFrameSerialization = "arrow"`.
**kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None
The named dataset to concat. Optional. You can only pass in 1
dataset (including the one in the data parameter).
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>>
>>> df1 = pd.DataFrame(
... np.random.randn(50, 20),
... columns=('col %d' % i for i in range(20)))
...
>>> my_table = st.table(df1)
>>>
>>> df2 = pd.DataFrame(
... np.random.randn(50, 20),
... columns=('col %d' % i for i in range(20)))
...
>>> my_table.add_rows(df2)
>>> # Now the table shown in the Streamlit app contains the data for
>>> # df1 followed by the data for df2.
You can do the same thing with plots. For example, if you want to add
more data to a line chart:
>>> # Assuming df1 and df2 from the example above still exist...
>>> my_chart = st.line_chart(df1)
>>> my_chart.add_rows(df2)
>>> # Now the chart shown in the Streamlit app contains the data for
>>> # df1 followed by the data for df2.
And for plots whose datasets are named, you can pass the data with a
keyword argument where the key is the name:
>>> my_chart = st.vega_lite_chart({
... 'mark': 'line',
... 'encoding': {'x': 'a', 'y': 'b'},
... 'datasets': {
... 'some_fancy_name': df1, # <-- named dataset
... },
... 'data': {'name': 'some_fancy_name'},
... }),
>>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword
"""
if _use_arrow():
return self.dg._arrow_add_rows(data, **kwargs)
else:
return self.dg._legacy_add_rows(data, **kwargs)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/dataframe_selector.py | 0.955423 | 0.437944 | dataframe_selector.py | pypi |
import json
from typing import TYPE_CHECKING, Any, List, Union, cast
from streamlit.proto.Json_pb2 import Json as JsonProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.state import SessionStateProxy
from streamlit.user_info import UserInfoProxy
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
def _ensure_serialization(o: object) -> Union[str, List[Any]]:
"""A repr function for json.dumps default arg, which tries to serialize sets as lists"""
if isinstance(o, set):
return list(o)
return repr(o)
class JsonMixin:
@gather_metrics("json")
def json(
self,
body: object,
*, # keyword-only arguments:
expanded: bool = True,
) -> "DeltaGenerator":
"""Display object or string as a pretty-printed JSON string.
Parameters
----------
body : object or str
The object to print as JSON. All referenced objects should be
serializable to JSON as well. If object is a string, we assume it
contains serialized JSON.
expanded : bool
An optional boolean that allows the user to set whether the initial
state of this json element should be expanded. Defaults to True.
This argument can only be supplied by keyword.
Example
-------
>>> import streamlit as st
>>>
>>> st.json({
... 'foo': 'bar',
... 'baz': 'boz',
... 'stuff': [
... 'stuff 1',
... 'stuff 2',
... 'stuff 3',
... 'stuff 5',
... ],
... })
.. output::
https://doc-json.streamlitapp.com/
height: 385px
"""
import streamlit as st
if isinstance(body, (SessionStateProxy, UserInfoProxy)):
body = body.to_dict()
if not isinstance(body, str):
try:
# Serialize body to string and try to interpret sets as lists
body = json.dumps(body, default=_ensure_serialization)
except TypeError as err:
st.warning(
"Warning: this data structure was not fully serializable as "
f"JSON due to one or more unexpected keys. (Error was: {err})"
)
body = json.dumps(body, skipkeys=True, default=_ensure_serialization)
json_proto = JsonProto()
json_proto.body = body
json_proto.expanded = expanded
return self.dg._enqueue("json", json_proto)
@property
def dg(self) -> "DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/json.py | 0.868646 | 0.198472 | json.py | pypi |
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, Optional, Union, cast
from typing_extensions import Literal, TypeAlias
from streamlit.elements.utils import get_label_visibility_proto_value
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Metric_pb2 import Metric as MetricProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.string_util import clean_text
from streamlit.type_util import LabelVisibility, maybe_raise_label_warnings
if TYPE_CHECKING:
import numpy as np
from streamlit.delta_generator import DeltaGenerator
Value: TypeAlias = Union["np.integer", "np.floating", float, int, str, None]
Delta: TypeAlias = Union[float, int, str, None]
DeltaColor: TypeAlias = Literal["normal", "inverse", "off"]
@dataclass(frozen=True)
class MetricColorAndDirection:
color: "MetricProto.MetricColor.ValueType"
direction: "MetricProto.MetricDirection.ValueType"
class MetricMixin:
@gather_metrics("metric")
def metric(
self,
label: str,
value: Value,
delta: Delta = None,
delta_color: DeltaColor = "normal",
help: Optional[str] = None,
label_visibility: LabelVisibility = "visible",
) -> "DeltaGenerator":
"""Display a metric in big bold font, with an optional indicator of how the metric changed.
Tip: If you want to display a large number, it may be a good idea to
shorten it using packages like `millify <https://github.com/azaitsev/millify>`_
or `numerize <https://github.com/davidsa03/numerize>`_. E.g. ``1234`` can be
displayed as ``1.2k`` using ``st.metric("Short number", millify(1234))``.
Parameters
----------
label : str
The header or title for the metric. The label can optionally contain
Markdown and supports the following elements: Bold, Italics,
Strikethroughs, Inline Code, Emojis, and Links.
This also supports:
* Emoji shortcodes, such as ``:+1:`` and ``:sunglasses:``.
For a list of all supported codes,
see https://share.streamlit.io/streamlit/emoji-shortcodes.
* LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
must be on their own lines). Supported LaTeX functions are listed
at https://katex.org/docs/supported.html.
* Colored text, using the syntax ``:color[text to be colored]``,
where ``color`` needs to be replaced with any of the following
supported colors: blue, green, orange, red, violet.
value : int, float, str, or None
Value of the metric. None is rendered as a long dash.
delta : int, float, str, or None
Indicator of how the metric changed, rendered with an arrow below
the metric. If delta is negative (int/float) or starts with a minus
sign (str), the arrow points down and the text is red; else the
arrow points up and the text is green. If None (default), no delta
indicator is shown.
delta_color : str
If "normal" (default), the delta indicator is shown as described
above. If "inverse", it is red when positive and green when
negative. This is useful when a negative change is considered
good, e.g. if cost decreased. If "off", delta is shown in gray
regardless of its value.
help : str
An optional tooltip that gets displayed next to the metric label.
label_visibility : "visible" or "hidden" or "collapsed"
The visibility of the label. If "hidden", the label doesn't show but there
is still empty space for it (equivalent to label="").
If "collapsed", both the label and the space are removed. Default is
"visible". This argument can only be supplied by keyword.
Example
-------
>>> import streamlit as st
>>>
>>> st.metric(label="Temperature", value="70 °F", delta="1.2 °F")
.. output::
https://doc-metric-example1.streamlitapp.com/
height: 210px
``st.metric`` looks especially nice in combination with ``st.columns``:
>>> import streamlit as st
>>>
>>> col1, col2, col3 = st.columns(3)
>>> col1.metric("Temperature", "70 °F", "1.2 °F")
>>> col2.metric("Wind", "9 mph", "-8%")
>>> col3.metric("Humidity", "86%", "4%")
.. output::
https://doc-metric-example2.streamlitapp.com/
height: 210px
The delta indicator color can also be inverted or turned off:
>>> import streamlit as st
>>>
>>> st.metric(label="Gas price", value=4, delta=-0.5,
... delta_color="inverse")
>>>
>>> st.metric(label="Active developers", value=123, delta=123,
... delta_color="off")
.. output::
https://doc-metric-example3.streamlitapp.com/
height: 320px
"""
maybe_raise_label_warnings(label, label_visibility)
metric_proto = MetricProto()
metric_proto.body = _parse_value(value)
metric_proto.label = _parse_label(label)
metric_proto.delta = _parse_delta(delta)
if help is not None:
metric_proto.help = dedent(help)
color_and_direction = _determine_delta_color_and_direction(
cast(DeltaColor, clean_text(delta_color)), delta
)
metric_proto.color = color_and_direction.color
metric_proto.direction = color_and_direction.direction
metric_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
return self.dg._enqueue("metric", metric_proto)
@property
def dg(self) -> "DeltaGenerator":
return cast("DeltaGenerator", self)
def _parse_label(label: str) -> str:
if not isinstance(label, str):
raise TypeError(
f"'{str(label)}' is of type {str(type(label))}, which is not an accepted type."
" label only accepts: str. Please convert the label to an accepted type."
)
return label
def _parse_value(value: Value) -> str:
if value is None:
return "—"
if isinstance(value, int) or isinstance(value, float) or isinstance(value, str):
return str(value)
elif hasattr(value, "item"):
# Add support for numpy values (e.g. int16, float64, etc.)
try:
# Item could also be just a variable, so we use try, except
if isinstance(value.item(), float) or isinstance(value.item(), int):
return str(value.item())
except Exception:
# If the numpy item is not a valid value, the TypeError below will be raised.
pass
raise TypeError(
f"'{str(value)}' is of type {str(type(value))}, which is not an accepted type."
" value only accepts: int, float, str, or None."
" Please convert the value to an accepted type."
)
def _parse_delta(delta: Delta) -> str:
if delta is None or delta == "":
return ""
if isinstance(delta, str):
return dedent(delta)
elif isinstance(delta, int) or isinstance(delta, float):
return str(delta)
else:
raise TypeError(
f"'{str(delta)}' is of type {str(type(delta))}, which is not an accepted type."
" delta only accepts: int, float, str, or None."
" Please convert the value to an accepted type."
)
def _determine_delta_color_and_direction(
delta_color: DeltaColor,
delta: Delta,
) -> MetricColorAndDirection:
if delta_color not in {"normal", "inverse", "off"}:
raise StreamlitAPIException(
f"'{str(delta_color)}' is not an accepted value. delta_color only accepts: "
"'normal', 'inverse', or 'off'"
)
if delta is None or delta == "":
return MetricColorAndDirection(
color=MetricProto.MetricColor.GRAY,
direction=MetricProto.MetricDirection.NONE,
)
if _is_negative_delta(delta):
if delta_color == "normal":
cd_color = MetricProto.MetricColor.RED
elif delta_color == "inverse":
cd_color = MetricProto.MetricColor.GREEN
else:
cd_color = MetricProto.MetricColor.GRAY
cd_direction = MetricProto.MetricDirection.DOWN
else:
if delta_color == "normal":
cd_color = MetricProto.MetricColor.GREEN
elif delta_color == "inverse":
cd_color = MetricProto.MetricColor.RED
else:
cd_color = MetricProto.MetricColor.GRAY
cd_direction = MetricProto.MetricDirection.UP
return MetricColorAndDirection(
color=cd_color,
direction=cd_direction,
)
def _is_negative_delta(delta: Delta) -> bool:
return dedent(str(delta)).startswith("-") | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/metric.py | 0.965916 | 0.383122 | metric.py | pypi |
import plotly.graph_objects as go
import plotly.io as pio
# This is the streamlit theme for plotly where we pass in a template.data
# and a template.layout.
# Template.data is for changing specific graph properties in a general aspect
# such as Contour plots or Waterfall plots.
# Template.layout is for changing things such as the x axis and fonts and other
# general layout properties for general graphs.
# We pass in temporary colors to the frontend and the frontend will replace
# those colors because we want to change colors based on the background color.
# Start at #0000001 because developers may be likely to use #000000
CATEGORY_0 = "#000001"
CATEGORY_1 = "#000002"
CATEGORY_2 = "#000003"
CATEGORY_3 = "#000004"
CATEGORY_4 = "#000005"
CATEGORY_5 = "#000006"
CATEGORY_6 = "#000007"
CATEGORY_7 = "#000008"
CATEGORY_8 = "#000009"
CATEGORY_9 = "#000010"
SEQUENTIAL_0 = "#000011"
SEQUENTIAL_1 = "#000012"
SEQUENTIAL_2 = "#000013"
SEQUENTIAL_3 = "#000014"
SEQUENTIAL_4 = "#000015"
SEQUENTIAL_5 = "#000016"
SEQUENTIAL_6 = "#000017"
SEQUENTIAL_7 = "#000018"
SEQUENTIAL_8 = "#000019"
SEQUENTIAL_9 = "#000020"
DIVERGING_0 = "#000021"
DIVERGING_1 = "#000022"
DIVERGING_2 = "#000023"
DIVERGING_3 = "#000024"
DIVERGING_4 = "#000025"
DIVERGING_5 = "#000026"
DIVERGING_6 = "#000027"
DIVERGING_7 = "#000028"
DIVERGING_8 = "#000029"
DIVERGING_9 = "#000030"
DIVERGING_10 = "#000031"
INCREASING = "#000032"
DECREASING = "#000033"
TOTAL = "#000034"
GRAY_30 = "#000035"
GRAY_70 = "#000036"
GRAY_90 = "#000037"
BG_COLOR = "#000038"
FADED_TEXT_05 = "#000039"
BG_MIX = "#000040"
# Plotly represents continuous colorscale through an array of pairs.
# The pair's first index is the starting point and the next pair's first index is the end point.
# The pair's second index is the starting color and the next pair's second index is the end color.
# For more information, please refer to https://plotly.com/python/colorscales/
streamlit_colorscale = [
[0.0, SEQUENTIAL_0],
[0.1111111111111111, SEQUENTIAL_1],
[0.2222222222222222, SEQUENTIAL_2],
[0.3333333333333333, SEQUENTIAL_3],
[0.4444444444444444, SEQUENTIAL_4],
[0.5555555555555556, SEQUENTIAL_5],
[0.6666666666666666, SEQUENTIAL_6],
[0.7777777777777778, SEQUENTIAL_7],
[0.8888888888888888, SEQUENTIAL_8],
[1.0, SEQUENTIAL_9],
]
pio.templates["streamlit"] = go.layout.Template(
data=go.layout.template.Data(
candlestick=[
go.layout.template.data.Candlestick(
decreasing=go.candlestick.Decreasing(
line=go.candlestick.decreasing.Line(color=DECREASING)
),
increasing=go.candlestick.Increasing(
line=go.candlestick.increasing.Line(color=INCREASING)
),
)
],
contour=[go.layout.template.data.Contour(colorscale=streamlit_colorscale)],
contourcarpet=[
go.layout.template.data.Contourcarpet(colorscale=streamlit_colorscale)
],
heatmap=[go.layout.template.data.Heatmap(colorscale=streamlit_colorscale)],
histogram2d=[
go.layout.template.data.Histogram2d(colorscale=streamlit_colorscale)
],
icicle=[
go.layout.template.data.Icicle(textfont=go.icicle.Textfont(color="white"))
],
sankey=[
go.layout.template.data.Sankey(textfont=go.sankey.Textfont(color=GRAY_70))
],
scatter=[
go.layout.template.data.Scatter(
marker=go.scatter.Marker(line=go.scatter.marker.Line(width=0))
)
],
table=[
go.layout.template.data.Table(
cells=go.table.Cells(
fill=go.table.cells.Fill(color=BG_COLOR),
font=go.table.cells.Font(color=GRAY_90),
line=go.table.cells.Line(color=FADED_TEXT_05),
),
header=go.table.Header(
font=go.table.header.Font(color=GRAY_70),
line=go.table.header.Line(color=FADED_TEXT_05),
fill=go.table.header.Fill(color=BG_MIX),
),
)
],
waterfall=[
go.layout.template.data.Waterfall(
increasing=go.waterfall.Increasing(
marker=go.waterfall.increasing.Marker(color=INCREASING)
),
decreasing=go.waterfall.Decreasing(
marker=go.waterfall.decreasing.Marker(color=DECREASING)
),
totals=go.waterfall.Totals(
marker=go.waterfall.totals.Marker(color=TOTAL)
),
connector=go.waterfall.Connector(
line=go.waterfall.connector.Line(color=GRAY_70, width=2)
),
)
],
),
layout=go.Layout(
colorway=[
CATEGORY_0,
CATEGORY_1,
CATEGORY_2,
CATEGORY_3,
CATEGORY_4,
CATEGORY_5,
CATEGORY_6,
CATEGORY_7,
CATEGORY_8,
CATEGORY_9,
],
colorscale=go.layout.Colorscale(
sequential=streamlit_colorscale,
sequentialminus=streamlit_colorscale,
diverging=[
[0.0, DIVERGING_0],
[0.1, DIVERGING_1],
[0.2, DIVERGING_2],
[0.3, DIVERGING_3],
[0.4, DIVERGING_4],
[0.5, DIVERGING_5],
[0.6, DIVERGING_6],
[0.7, DIVERGING_7],
[0.8, DIVERGING_8],
[0.9, DIVERGING_9],
[1.0, DIVERGING_10],
],
),
coloraxis=go.layout.Coloraxis(colorscale=streamlit_colorscale),
),
) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/lib/streamlit_plotly_theme.py | 0.703346 | 0.425605 | streamlit_plotly_theme.py | pypi |
from typing import Any, Dict, Optional
def _unflatten_single_dict(flat_dict):
"""Convert a flat dict of key-value pairs to dict tree.
Example
-------
_unflatten_single_dict({
foo_bar_baz: 123,
foo_bar_biz: 456,
x_bonks: 'hi',
})
# Returns:
# {
# foo: {
# bar: {
# baz: 123,
# biz: 456,
# },
# },
# x: {
# bonks: 'hi'
# }
# }
Parameters
----------
flat_dict : dict
A one-level dict where keys are fully-qualified paths separated by
underscores.
Returns
-------
dict
A tree made of dicts inside of dicts.
"""
out: Dict[str, Any] = dict()
for pathstr, v in flat_dict.items():
path = pathstr.split("_")
prev_dict: Optional[Dict[str, Any]] = None
curr_dict = out
for k in path:
if k not in curr_dict:
curr_dict[k] = dict()
prev_dict = curr_dict
curr_dict = curr_dict[k]
if prev_dict is not None:
prev_dict[k] = v
return out
def unflatten(flat_dict, encodings=None):
"""Converts a flat dict of key-value pairs to a spec tree.
Example
-------
unflatten({
foo_bar_baz: 123,
foo_bar_biz: 456,
x_bonks: 'hi',
}, ['x'])
# Returns:
# {
# foo: {
# bar: {
# baz: 123,
# biz: 456,
# },
# },
# encoding: { # This gets added automatically
# x: {
# bonks: 'hi'
# }
# }
# }
Args
----
flat_dict: dict
A flat dict where keys are fully-qualified paths separated by
underscores.
encodings: set
Key names that should be automatically moved into the 'encoding' key.
Returns
-------
A tree made of dicts inside of dicts.
"""
if encodings is None:
encodings = set()
out_dict = _unflatten_single_dict(flat_dict)
for k, v in list(out_dict.items()):
# Unflatten child dicts:
if isinstance(v, dict):
v = unflatten(v, encodings)
elif hasattr(v, "__iter__"):
for i, child in enumerate(v):
if isinstance(child, dict):
v[i] = unflatten(child, encodings)
# Move items into 'encoding' if needed:
if k in encodings:
if "encoding" not in out_dict:
out_dict["encoding"] = dict()
out_dict["encoding"][k] = v
out_dict.pop(k)
return out_dict | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/elements/lib/dicttools.py | 0.923372 | 0.264309 | dicttools.py | pypi |
import hashlib
import os
import time
from pathlib import Path
from typing import Optional
# How many times to try to grab the MD5 hash.
_MAX_RETRIES = 5
# How long to wait between retries.
_RETRY_WAIT_SECS = 0.1
def calc_md5_with_blocking_retries(
path: str,
*, # keyword-only arguments:
glob_pattern: Optional[str] = None,
allow_nonexistent: bool = False,
) -> str:
"""Calculate the MD5 checksum of a given path.
For a file, this means calculating the md5 of the file's contents. For a
directory, we concatenate the directory's path with the names of all the
files in it and calculate the md5 of that.
IMPORTANT: This method calls time.sleep(), which blocks execution. So you
should only use this outside the main thread.
"""
if allow_nonexistent and not os.path.exists(path):
content = path.encode("UTF-8")
elif os.path.isdir(path):
glob_pattern = glob_pattern or "*"
content = _stable_dir_identifier(path, glob_pattern).encode("UTF-8")
else:
content = _get_file_content_with_blocking_retries(path)
md5 = hashlib.md5()
md5.update(content)
# Use hexdigest() instead of digest(), so it's easier to debug.
return md5.hexdigest()
def path_modification_time(path: str, allow_nonexistent: bool = False) -> float:
"""Return the modification time of a path (file or directory).
If allow_nonexistent is True and the path does not exist, we return 0.0 to
guarantee that any file/dir later created at the path has a later
modification time than the last time returned by this function for that
path.
If allow_nonexistent is False and no file/dir exists at the path, a
FileNotFoundError is raised (by os.stat).
For any path that does correspond to an existing file/dir, we return its
modification time.
"""
if allow_nonexistent and not os.path.exists(path):
return 0.0
return os.stat(path).st_mtime
def _get_file_content_with_blocking_retries(file_path: str) -> bytes:
content = b""
# There's a race condition where sometimes file_path no longer exists when
# we try to read it (since the file is in the process of being written).
# So here we retry a few times using this loop. See issue #186.
for i in range(_MAX_RETRIES):
try:
with open(file_path, "rb") as f:
content = f.read()
break
except FileNotFoundError as e:
if i >= _MAX_RETRIES - 1:
raise e
time.sleep(_RETRY_WAIT_SECS)
return content
def _dirfiles(dir_path: str, glob_pattern: str) -> str:
p = Path(dir_path)
filenames = sorted(
[f.name for f in p.glob(glob_pattern) if not f.name.startswith(".")]
)
return "+".join(filenames)
def _stable_dir_identifier(dir_path: str, glob_pattern: str) -> str:
"""Wait for the files in a directory to look stable-ish before returning an id.
We do this to deal with problems that would otherwise arise from many tools
(e.g. git) and editors (e.g. vim) "editing" files (from the user's
perspective) by doing some combination of deleting, creating, and moving
various files under the hood.
Because of this, we're unable to rely on FileSystemEvents that we receive
from watchdog to determine when a file has been added to or removed from a
directory.
This is a bit of an unfortunate situation, but the approach we take here is
most likely fine as:
* The worst thing that can happen taking this approach is a false
positive page added/removed notification, which isn't too disastrous
and can just be ignored.
* It is impossible (that is, I'm fairly certain that the problem is
undecidable) to know whether a file created/deleted/moved event
corresponds to a legitimate file creation/deletion/move or is part of
some sequence of events that results in what the user sees as a file
"edit".
"""
dirfiles = _dirfiles(dir_path, glob_pattern)
for _ in range(_MAX_RETRIES):
time.sleep(_RETRY_WAIT_SECS)
new_dirfiles = _dirfiles(dir_path, glob_pattern)
if dirfiles == new_dirfiles:
break
dirfiles = new_dirfiles
return f"{dir_path}+{dirfiles}" | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/watcher/util.py | 0.742982 | 0.372933 | util.py | pypi |
from typing import Callable, Optional, Type, Union
import click
import streamlit.watcher
from streamlit import config, env_util
from streamlit.logger import get_logger
from streamlit.watcher.polling_path_watcher import PollingPathWatcher
LOGGER = get_logger(__name__)
try:
# Check if the watchdog module is installed.
from streamlit.watcher.event_based_path_watcher import EventBasedPathWatcher
watchdog_available = True
except ImportError:
watchdog_available = False
# Stub the EventBasedPathWatcher so it can be mocked by tests
class EventBasedPathWatcher: # type: ignore
pass
# local_sources_watcher.py caches the return value of
# get_default_path_watcher_class(), so it needs to differentiate between the
# cases where it:
# 1. has yet to call get_default_path_watcher_class()
# 2. has called get_default_path_watcher_class(), which returned that no
# path watcher should be installed.
# This forces us to define this stub class since the cached value equaling
# None corresponds to case 1 above.
class NoOpPathWatcher:
def __init__(
self,
_path_str: str,
_on_changed: Callable[[str], None],
*, # keyword-only arguments:
glob_pattern: Optional[str] = None,
allow_nonexistent: bool = False,
):
pass
# EventBasedPathWatcher will be a stub and have no functional
# implementation if its import failed (due to missing watchdog module),
# so we can't reference it directly in this type.
PathWatcherType = Union[
Type["streamlit.watcher.event_based_path_watcher.EventBasedPathWatcher"],
Type[PollingPathWatcher],
Type[NoOpPathWatcher],
]
def report_watchdog_availability():
if not watchdog_available:
if not config.get_option("global.disableWatchdogWarning"):
msg = "\n $ xcode-select --install" if env_util.IS_DARWIN else ""
click.secho(
" %s" % "For better performance, install the Watchdog module:",
fg="blue",
bold=True,
)
click.secho(
"""%s
$ pip install watchdog
"""
% msg
)
def _watch_path(
path: str,
on_path_changed: Callable[[str], None],
watcher_type: Optional[str] = None,
*, # keyword-only arguments:
glob_pattern: Optional[str] = None,
allow_nonexistent: bool = False,
) -> bool:
"""Create a PathWatcher for the given path if we have a viable
PathWatcher class.
Parameters
----------
path
Path to watch.
on_path_changed
Function that's called when the path changes.
watcher_type
Optional watcher_type string. If None, it will default to the
'server.fileWatcherType` config option.
glob_pattern
Optional glob pattern to use when watching a directory. If set, only
files matching the pattern will be counted as being created/deleted
within the watched directory.
allow_nonexistent
If True, allow the file or directory at the given path to be
nonexistent.
Returns
-------
bool
True if the path is being watched, or False if we have no
PathWatcher class.
"""
if watcher_type is None:
watcher_type = config.get_option("server.fileWatcherType")
watcher_class = get_path_watcher_class(watcher_type)
if watcher_class is NoOpPathWatcher:
return False
watcher_class(
path,
on_path_changed,
glob_pattern=glob_pattern,
allow_nonexistent=allow_nonexistent,
)
return True
def watch_file(
path: str,
on_file_changed: Callable[[str], None],
watcher_type: Optional[str] = None,
) -> bool:
return _watch_path(path, on_file_changed, watcher_type)
def watch_dir(
path: str,
on_dir_changed: Callable[[str], None],
watcher_type: Optional[str] = None,
*, # keyword-only arguments:
glob_pattern: Optional[str] = None,
allow_nonexistent: bool = False,
) -> bool:
return _watch_path(
path,
on_dir_changed,
watcher_type,
glob_pattern=glob_pattern,
allow_nonexistent=allow_nonexistent,
)
def get_default_path_watcher_class() -> PathWatcherType:
"""Return the class to use for path changes notifications, based on the
server.fileWatcherType config option.
"""
return get_path_watcher_class(config.get_option("server.fileWatcherType"))
def get_path_watcher_class(watcher_type: str) -> PathWatcherType:
"""Return the PathWatcher class that corresponds to the given watcher_type
string. Acceptable values are 'auto', 'watchdog', 'poll' and 'none'.
"""
if watcher_type == "auto":
if watchdog_available:
return EventBasedPathWatcher
else:
return PollingPathWatcher
elif watcher_type == "watchdog" and watchdog_available:
return EventBasedPathWatcher
elif watcher_type == "poll":
return PollingPathWatcher
else:
return NoOpPathWatcher | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/watcher/path_watcher.py | 0.917943 | 0.203648 | path_watcher.py | pypi |
import time
from concurrent.futures import ThreadPoolExecutor
from typing import Callable, Optional
from streamlit.logger import get_logger
from streamlit.util import repr_
from streamlit.watcher import util
LOGGER = get_logger(__name__)
_MAX_WORKERS = 4
_POLLING_PERIOD_SECS = 0.2
class PollingPathWatcher:
"""Watches a path on disk via a polling loop."""
_executor = ThreadPoolExecutor(max_workers=_MAX_WORKERS)
@staticmethod
def close_all() -> None:
"""Close top-level watcher object.
This is a no-op, and exists for interface parity with
EventBasedPathWatcher.
"""
LOGGER.debug("Watcher closed")
def __init__(
self,
path: str,
on_changed: Callable[[str], None],
*, # keyword-only arguments:
glob_pattern: Optional[str] = None,
allow_nonexistent: bool = False,
) -> None:
"""Constructor.
You do not need to retain a reference to a PollingPathWatcher to
prevent it from being garbage collected. (The global _executor object
retains references to all active instances.)
"""
# TODO(vdonato): Modernize this by switching to pathlib.
self._path = path
self._on_changed = on_changed
self._glob_pattern = glob_pattern
self._allow_nonexistent = allow_nonexistent
self._active = True
self._modification_time = util.path_modification_time(
self._path, self._allow_nonexistent
)
self._md5 = util.calc_md5_with_blocking_retries(
self._path,
glob_pattern=self._glob_pattern,
allow_nonexistent=self._allow_nonexistent,
)
self._schedule()
def __repr__(self) -> str:
return repr_(self)
def _schedule(self) -> None:
def task():
time.sleep(_POLLING_PERIOD_SECS)
self._check_if_path_changed()
PollingPathWatcher._executor.submit(task)
def _check_if_path_changed(self) -> None:
if not self._active:
# Don't call self._schedule()
return
modification_time = util.path_modification_time(
self._path, self._allow_nonexistent
)
if modification_time <= self._modification_time:
self._schedule()
return
self._modification_time = modification_time
md5 = util.calc_md5_with_blocking_retries(
self._path,
glob_pattern=self._glob_pattern,
allow_nonexistent=self._allow_nonexistent,
)
if md5 == self._md5:
self._schedule()
return
self._md5 = md5
LOGGER.debug("Change detected: %s", self._path)
self._on_changed(self._path)
self._schedule()
def close(self) -> None:
"""Stop watching the file system."""
self._active = False | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/watcher/polling_path_watcher.py | 0.702428 | 0.155687 | polling_path_watcher.py | pypi |
import urllib.parse as parse
from typing import Any, Dict, List
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import get_script_run_ctx
@gather_metrics("experimental_get_query_params")
def get_query_params() -> Dict[str, List[str]]:
"""Return the query parameters that is currently showing in the browser's URL bar.
Returns
-------
dict
The current query parameters as a dict. "Query parameters" are the part of the URL that comes
after the first "?".
Example
-------
Let's say the user's web browser is at
`http://localhost:8501/?show_map=True&selected=asia&selected=america`.
Then, you can get the query parameters using the following:
>>> import streamlit as st
>>>
>>> st.experimental_get_query_params()
{"show_map": ["True"], "selected": ["asia", "america"]}
Note that the values in the returned dict are *always* lists. This is
because we internally use Python's urllib.parse.parse_qs(), which behaves
this way. And this behavior makes sense when you consider that every item
in a query string is potentially a 1-element array.
"""
ctx = get_script_run_ctx()
if ctx is None:
return {}
return parse.parse_qs(ctx.query_string)
@gather_metrics("experimental_set_query_params")
def set_query_params(**query_params: Any) -> None:
"""Set the query parameters that are shown in the browser's URL bar.
Parameters
----------
**query_params : dict
The query parameters to set, as key-value pairs.
Example
-------
To point the user's web browser to something like
"http://localhost:8501/?show_map=True&selected=asia&selected=america",
you would do the following:
>>> import streamlit as st
>>>
>>> st.experimental_set_query_params(
... show_map=True,
... selected=["asia", "america"],
... )
"""
ctx = get_script_run_ctx()
if ctx is None:
return
ctx.query_string = parse.urlencode(query_params, doseq=True)
msg = ForwardMsg()
msg.page_info_changed.query_string = ctx.query_string
ctx.enqueue(msg) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/commands/query_params.py | 0.884139 | 0.367895 | query_params.py | pypi |
from abc import abstractmethod
from typing import List, NamedTuple
from streamlit.proto.openmetrics_data_model_pb2 import Metric as MetricProto
class CacheStat(NamedTuple):
"""Describes a single cache entry.
Properties
----------
category_name : str
A human-readable name for the cache "category" that the entry belongs
to - e.g. "st.memo", "session_state", etc.
cache_name : str
A human-readable name for cache instance that the entry belongs to.
For "st.memo" and other function decorator caches, this might be the
name of the cached function. If the cache category doesn't have
multiple separate cache instances, this can just be the empty string.
byte_length : int
The entry's memory footprint in bytes.
"""
category_name: str
cache_name: str
byte_length: int
def to_metric_str(self) -> str:
return 'cache_memory_bytes{cache_type="%s",cache="%s"} %s' % (
self.category_name,
self.cache_name,
self.byte_length,
)
def marshall_metric_proto(self, metric: MetricProto) -> None:
"""Fill an OpenMetrics `Metric` protobuf object."""
label = metric.labels.add()
label.name = "cache_type"
label.value = self.category_name
label = metric.labels.add()
label.name = "cache"
label.value = self.cache_name
metric_point = metric.metric_points.add()
metric_point.gauge_value.int_value = self.byte_length
class CacheStatsProvider:
@abstractmethod
def get_stats(self) -> List[CacheStat]:
raise NotImplementedError
class StatsManager:
def __init__(self):
self._cache_stats_providers: List[CacheStatsProvider] = []
def register_provider(self, provider: CacheStatsProvider) -> None:
"""Register a CacheStatsProvider with the manager.
This function is not thread-safe. Call it immediately after
creation.
"""
self._cache_stats_providers.append(provider)
def get_stats(self) -> List[CacheStat]:
"""Return a list containing all stats from each registered provider."""
all_stats: List[CacheStat] = []
for provider in self._cache_stats_providers:
all_stats.extend(provider.get_stats())
return all_stats | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/runtime/stats.py | 0.924811 | 0.302919 | stats.py | pypi |
import contextlib
import inspect
import os
import sys
import threading
import time
import uuid
from collections.abc import Sized
from functools import wraps
from timeit import default_timer as timer
from typing import Any, Callable, List, Optional, Set, TypeVar, Union, cast, overload
from typing_extensions import Final
from streamlit import config, util
from streamlit.logger import get_logger
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.PageProfile_pb2 import Argument, Command
_LOGGER = get_logger(__name__)
# Limit the number of commands to keep the page profile message small
# since Segment allows only a maximum of 32kb per event.
_MAX_TRACKED_COMMANDS: Final = 200
# Only track a maximum of 25 uses per unique command since some apps use
# commands excessively (e.g. calling add_rows thousands of times in one rerun)
# making the page profile useless.
_MAX_TRACKED_PER_COMMAND: Final = 25
# A mapping to convert from the actual name to preferred/shorter representations
_OBJECT_NAME_MAPPING: Final = {
"streamlit.delta_generator.DeltaGenerator": "DG",
"pandas.core.frame.DataFrame": "DataFrame",
"plotly.graph_objs._figure.Figure": "PlotlyFigure",
"bokeh.plotting.figure.Figure": "BokehFigure",
"matplotlib.figure.Figure": "MatplotlibFigure",
"pandas.io.formats.style.Styler": "PandasStyler",
"pandas.core.indexes.base.Index": "PandasIndex",
"pandas.core.series.Series": "PandasSeries",
}
# A list of dependencies to check for attribution
_ATTRIBUTIONS_TO_CHECK: Final = [
"snowflake",
"torch",
"tensorflow",
"streamlit_extras",
"streamlit_pydantic",
"plost",
]
_ETC_MACHINE_ID_PATH = "/etc/machine-id"
_DBUS_MACHINE_ID_PATH = "/var/lib/dbus/machine-id"
def _get_machine_id_v3() -> str:
"""Get the machine ID
This is a unique identifier for a user for tracking metrics in Segment,
that is broken in different ways in some Linux distros and Docker images.
- at times just a hash of '', which means many machines map to the same ID
- at times a hash of the same string, when running in a Docker container
"""
machine_id = str(uuid.getnode())
if os.path.isfile(_ETC_MACHINE_ID_PATH):
with open(_ETC_MACHINE_ID_PATH, "r") as f:
machine_id = f.read()
elif os.path.isfile(_DBUS_MACHINE_ID_PATH):
with open(_DBUS_MACHINE_ID_PATH, "r") as f:
machine_id = f.read()
return machine_id
class Installation:
_instance_lock = threading.Lock()
_instance: Optional["Installation"] = None
@classmethod
def instance(cls) -> "Installation":
"""Returns the singleton Installation"""
# We use a double-checked locking optimization to avoid the overhead
# of acquiring the lock in the common case:
# https://en.wikipedia.org/wiki/Double-checked_locking
if cls._instance is None:
with cls._instance_lock:
if cls._instance is None:
cls._instance = Installation()
return cls._instance
def __init__(self):
self.installation_id_v3 = str(
uuid.uuid5(uuid.NAMESPACE_DNS, _get_machine_id_v3())
)
def __repr__(self) -> str:
return util.repr_(self)
@property
def installation_id(self):
return self.installation_id_v3
def _get_type_name(obj: object) -> str:
"""Get a simplified name for the type of the given object."""
with contextlib.suppress(Exception):
obj_type = type(obj)
type_name = "unknown"
if hasattr(obj_type, "__qualname__"):
type_name = obj_type.__qualname__
elif hasattr(obj_type, "__name__"):
type_name = obj_type.__name__
if obj_type.__module__ != "builtins":
# Add the full module path
type_name = f"{obj_type.__module__}.{type_name}"
if type_name in _OBJECT_NAME_MAPPING:
type_name = _OBJECT_NAME_MAPPING[type_name]
return type_name
return "failed"
def _get_top_level_module(func: Callable[..., Any]) -> str:
"""Get the top level module for the given function."""
module = inspect.getmodule(func)
if module is None or not module.__name__:
return "unknown"
return module.__name__.split(".")[0]
def _get_arg_metadata(arg: object) -> Optional[str]:
"""Get metadata information related to the value of the given object."""
with contextlib.suppress(Exception):
if isinstance(arg, (bool)):
return f"val:{arg}"
if isinstance(arg, Sized):
return f"len:{len(arg)}"
return None
def _get_command_telemetry(
_command_func: Callable[..., Any], _command_name: str, *args, **kwargs
) -> Command:
"""Get telemetry information for the given callable and its arguments."""
arg_keywords = inspect.getfullargspec(_command_func).args
self_arg: Optional[Any] = None
arguments: List[Argument] = []
is_method = inspect.ismethod(_command_func)
name = _command_name
for i, arg in enumerate(args):
pos = i
if is_method:
# If func is a method, ignore the first argument (self)
i = i + 1
keyword = arg_keywords[i] if len(arg_keywords) > i else f"{i}"
if keyword == "self":
self_arg = arg
continue
argument = Argument(k=keyword, t=_get_type_name(arg), p=pos)
arg_metadata = _get_arg_metadata(arg)
if arg_metadata:
argument.m = arg_metadata
arguments.append(argument)
for kwarg, kwarg_value in kwargs.items():
argument = Argument(k=kwarg, t=_get_type_name(kwarg_value))
arg_metadata = _get_arg_metadata(kwarg_value)
if arg_metadata:
argument.m = arg_metadata
arguments.append(argument)
top_level_module = _get_top_level_module(_command_func)
if top_level_module != "streamlit":
# If the gather_metrics decorator is used outside of streamlit library
# we enforce a prefix to be added to the tracked command:
name = f"external:{top_level_module}:{name}"
if (
name == "create_instance"
and self_arg
and hasattr(self_arg, "name")
and self_arg.name
):
name = f"component:{self_arg.name}"
return Command(name=name, args=arguments)
def to_microseconds(seconds: float) -> int:
"""Convert seconds into microseconds."""
return int(seconds * 1_000_000)
F = TypeVar("F", bound=Callable[..., Any])
@overload
def gather_metrics(
name: str,
func: F,
) -> F:
...
@overload
def gather_metrics(
name: str,
func: None = None,
) -> Callable[[F], F]:
...
def gather_metrics(name: str, func: Optional[F] = None) -> Union[Callable[[F], F], F]:
"""Function decorator to add telemetry tracking to commands.
Parameters
----------
func : callable
The function to track for telemetry.
name : str or None
Overwrite the function name with a custom name that is used for telemetry tracking.
Example
-------
>>> @st.gather_metrics
... def my_command(url):
... return url
>>> @st.gather_metrics(name="custom_name")
... def my_command(url):
... return url
"""
if not name:
_LOGGER.warning("gather_metrics: name is empty")
name = "undefined"
if func is None:
# Support passing the params via function decorator
def wrapper(f: F) -> F:
return gather_metrics(
name=name,
func=f,
)
return wrapper
else:
# To make mypy type narrow Optional[F] -> F
non_optional_func = func
@wraps(non_optional_func)
def wrapped_func(*args, **kwargs):
exec_start = timer()
# get_script_run_ctx gets imported here to prevent circular dependencies
from streamlit.runtime.scriptrunner import get_script_run_ctx
ctx = get_script_run_ctx()
tracking_activated = (
ctx is not None
and ctx.gather_usage_stats
and not ctx.command_tracking_deactivated
and len(ctx.tracked_commands)
< _MAX_TRACKED_COMMANDS # Prevent too much memory usage
)
command_telemetry: Optional[Command] = None
if ctx and tracking_activated:
try:
command_telemetry = _get_command_telemetry(
non_optional_func, name, *args, **kwargs
)
if (
command_telemetry.name not in ctx.tracked_commands_counter
or ctx.tracked_commands_counter[command_telemetry.name]
< _MAX_TRACKED_PER_COMMAND
):
ctx.tracked_commands.append(command_telemetry)
ctx.tracked_commands_counter.update([command_telemetry.name])
# Deactivate tracking to prevent calls inside already tracked commands
ctx.command_tracking_deactivated = True
except Exception as ex:
# Always capture all exceptions since we want to make sure that
# the telemetry never causes any issues.
_LOGGER.debug("Failed to collect command telemetry", exc_info=ex)
try:
result = non_optional_func(*args, **kwargs)
finally:
# Activate tracking again if command executes without any exceptions
if ctx:
ctx.command_tracking_deactivated = False
if tracking_activated and command_telemetry:
# Set the execution time to the measured value
command_telemetry.time = to_microseconds(timer() - exec_start)
return result
with contextlib.suppress(AttributeError):
# Make this a well-behaved decorator by preserving important function
# attributes.
wrapped_func.__dict__.update(non_optional_func.__dict__)
wrapped_func.__signature__ = inspect.signature(non_optional_func) # type: ignore
return cast(F, wrapped_func)
def create_page_profile_message(
commands: List[Command],
exec_time: int,
prep_time: int,
uncaught_exception: Optional[str] = None,
) -> ForwardMsg:
"""Create and return the full PageProfile ForwardMsg."""
msg = ForwardMsg()
msg.page_profile.commands.extend(commands)
msg.page_profile.exec_time = exec_time
msg.page_profile.prep_time = prep_time
msg.page_profile.headless = config.get_option("server.headless")
# Collect all config options that have been manually set
config_options: Set[str] = set()
if config._config_options:
for option_name in config._config_options.keys():
if not config.is_manually_set(option_name):
# We only care about manually defined options
continue
config_option = config._config_options[option_name]
if config_option.is_default:
option_name = f"{option_name}:default"
config_options.add(option_name)
msg.page_profile.config.extend(config_options)
# Check the predefined set of modules for attribution
attributions: Set[str] = {
attribution
for attribution in _ATTRIBUTIONS_TO_CHECK
if attribution in sys.modules
}
msg.page_profile.os = str(sys.platform)
msg.page_profile.timezone = str(time.tzname)
msg.page_profile.attributions.extend(attributions)
if uncaught_exception:
msg.page_profile.uncaught_exception = uncaught_exception
return msg | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/runtime/metrics_util.py | 0.75274 | 0.201971 | metrics_util.py | pypi |
from typing import Any, Optional
from streamlit import config
from streamlit.errors import MarkdownFormattedException
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.runtime.forward_msg_cache import populate_hash_if_needed
class MessageSizeError(MarkdownFormattedException):
"""Exception raised when a websocket message is larger than the configured limit."""
def __init__(self, failed_msg_str: Any):
msg = self._get_message(failed_msg_str)
super(MessageSizeError, self).__init__(msg)
def _get_message(self, failed_msg_str: Any) -> str:
# This needs to have zero indentation otherwise the markdown will render incorrectly.
return (
(
"""
**Data of size {message_size_mb:.1f} MB exceeds the message size limit of {message_size_limit_mb} MB.**
This is often caused by a large chart or dataframe. Please decrease the amount of data sent
to the browser, or increase the limit by setting the config option `server.maxMessageSize`.
[Click here to learn more about config options](https://docs.streamlit.io/library/advanced-features/configuration#set-configuration-options).
_Note that increasing the limit may lead to long loading times and large memory consumption
of the client's browser and the Streamlit server._
"""
)
.format(
message_size_mb=len(failed_msg_str) / 1e6,
message_size_limit_mb=(get_max_message_size_bytes() / 1e6),
)
.strip("\n")
)
def is_cacheable_msg(msg: ForwardMsg) -> bool:
"""True if the given message qualifies for caching."""
if msg.WhichOneof("type") in {"ref_hash", "initialize"}:
# Some message types never get cached
return False
return msg.ByteSize() >= int(config.get_option("global.minCachedMessageSize"))
def serialize_forward_msg(msg: ForwardMsg) -> bytes:
"""Serialize a ForwardMsg to send to a client.
If the message is too large, it will be converted to an exception message
instead.
"""
populate_hash_if_needed(msg)
msg_str = msg.SerializeToString()
if len(msg_str) > get_max_message_size_bytes():
import streamlit.elements.exception as exception
# Overwrite the offending ForwardMsg.delta with an error to display.
# This assumes that the size limit wasn't exceeded due to metadata.
exception.marshall(msg.delta.new_element.exception, MessageSizeError(msg_str))
msg_str = msg.SerializeToString()
return msg_str
# This needs to be initialized lazily to avoid calling config.get_option() and
# thus initializing config options when this file is first imported.
_max_message_size_bytes: Optional[int] = None
def get_max_message_size_bytes() -> int:
"""Returns the max websocket message size in bytes.
This will lazyload the value from the config and store it in the global symbol table.
"""
global _max_message_size_bytes
if _max_message_size_bytes is None:
_max_message_size_bytes = config.get_option("server.maxMessageSize") * int(1e6)
return _max_message_size_bytes | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/runtime/runtime_util.py | 0.928279 | 0.229212 | runtime_util.py | pypi |
import os
import sys
import textwrap
from collections import namedtuple
from typing import Optional
import click
import toml
from streamlit import env_util, file_util, util
from streamlit.logger import get_logger
LOGGER = get_logger(__name__)
# WT_SESSION is a Windows Terminal specific environment variable. If it exists,
# we are on the latest Windows Terminal that supports emojis
_SHOW_EMOJIS = not env_util.IS_WINDOWS or os.environ.get("WT_SESSION")
if env_util.IS_WINDOWS:
_CONFIG_FILE_PATH = r"%userprofile%/.streamlit/config.toml"
else:
_CONFIG_FILE_PATH = "~/.streamlit/config.toml"
_Activation = namedtuple(
"_Activation",
[
"email", # str : the user's email.
"is_valid", # boolean : whether the email is valid.
],
)
# IMPORTANT: Break the text below at 80 chars.
_EMAIL_PROMPT = """
{0}%(welcome)s
If you’d like to receive helpful onboarding emails, news, offers, promotions,
and the occasional swag, please enter your email address below. Otherwise,
leave this field blank.
%(email)s""".format(
"👋 " if _SHOW_EMOJIS else ""
) % {
"welcome": click.style("Welcome to Streamlit!", bold=True),
"email": click.style("Email: ", fg="blue"),
}
# IMPORTANT: Break the text below at 80 chars.
_TELEMETRY_TEXT = """
You can find our privacy policy at %(link)s
Summary:
- This open source library collects usage statistics.
- We cannot see and do not store information contained inside Streamlit apps,
such as text, charts, images, etc.
- Telemetry data is stored in servers in the United States.
- If you'd like to opt out, add the following to %(config)s,
creating that file if necessary:
[browser]
gatherUsageStats = false
""" % {
"link": click.style("https://streamlit.io/privacy-policy", underline=True),
"config": click.style(_CONFIG_FILE_PATH),
}
_TELEMETRY_HEADLESS_TEXT = """
Collecting usage statistics. To deactivate, set browser.gatherUsageStats to False.
"""
# IMPORTANT: Break the text below at 80 chars.
_INSTRUCTIONS_TEXT = """
%(start)s
%(prompt)s %(hello)s
""" % {
"start": click.style("Get started by typing:", fg="blue", bold=True),
"prompt": click.style("$", fg="blue"),
"hello": click.style("streamlit hello", bold=True),
}
class Credentials(object):
"""Credentials class."""
_singleton: Optional["Credentials"] = None
@classmethod
def get_current(cls):
"""Return the singleton instance."""
if cls._singleton is None:
Credentials()
return Credentials._singleton
def __init__(self):
"""Initialize class."""
if Credentials._singleton is not None:
raise RuntimeError(
"Credentials already initialized. Use .get_current() instead"
)
self.activation = None
self._conf_file = _get_credential_file_path()
Credentials._singleton = self
def __repr__(self) -> str:
return util.repr_(self)
def load(self, auto_resolve=False) -> None:
"""Load from toml file."""
if self.activation is not None:
LOGGER.error("Credentials already loaded. Not rereading file.")
return
try:
with open(self._conf_file, "r") as f:
data = toml.load(f).get("general")
if data is None:
raise Exception
self.activation = _verify_email(data.get("email"))
except FileNotFoundError:
if auto_resolve:
self.activate(show_instructions=not auto_resolve)
return
raise RuntimeError(
'Credentials not found. Please run "streamlit activate".'
)
except Exception:
if auto_resolve:
self.reset()
self.activate(show_instructions=not auto_resolve)
return
raise Exception(
textwrap.dedent(
"""
Unable to load credentials from %s.
Run "streamlit reset" and try again.
"""
)
% (self._conf_file)
)
def _check_activated(self, auto_resolve=True):
"""Check if streamlit is activated.
Used by `streamlit run script.py`
"""
try:
self.load(auto_resolve)
except (Exception, RuntimeError) as e:
_exit(str(e))
if self.activation is None or not self.activation.is_valid:
_exit("Activation email not valid.")
@classmethod
def reset(cls):
"""Reset credentials by removing file.
This is used by `streamlit activate reset` in case a user wants
to start over.
"""
c = Credentials.get_current()
c.activation = None
try:
os.remove(c._conf_file)
except OSError as e:
LOGGER.error("Error removing credentials file: %s" % e)
def save(self):
"""Save to toml file."""
if self.activation is None:
return
# Create intermediate directories if necessary
os.makedirs(os.path.dirname(self._conf_file), exist_ok=True)
# Write the file
data = {"email": self.activation.email}
with open(self._conf_file, "w") as f:
toml.dump({"general": data}, f)
def activate(self, show_instructions: bool = True) -> None:
"""Activate Streamlit.
Used by `streamlit activate`.
"""
try:
self.load()
except RuntimeError:
# Runtime Error is raised if credentials file is not found. In that case,
# `self.activation` is None and we will show the activation prompt below.
pass
if self.activation:
if self.activation.is_valid:
_exit("Already activated")
else:
_exit(
"Activation not valid. Please run "
"`streamlit activate reset` then `streamlit activate`"
)
else:
activated = False
while not activated:
email = click.prompt(
text=_EMAIL_PROMPT, prompt_suffix="", default="", show_default=False
)
self.activation = _verify_email(email)
if self.activation.is_valid:
self.save()
click.secho(_TELEMETRY_TEXT)
if show_instructions:
click.secho(_INSTRUCTIONS_TEXT)
activated = True
else: # pragma: nocover
LOGGER.error("Please try again.")
def _verify_email(email: str) -> _Activation:
"""Verify the user's email address.
The email can either be an empty string (if the user chooses not to enter
it), or a string with a single '@' somewhere in it.
Parameters
----------
email : str
Returns
-------
_Activation
An _Activation object. Its 'is_valid' property will be True only if
the email was validated.
"""
email = email.strip()
# We deliberately use simple email validation here
# since we do not use email address anywhere to send emails.
if len(email) > 0 and email.count("@") != 1:
LOGGER.error("That doesn't look like an email :(")
return _Activation(None, False)
return _Activation(email, True)
def _exit(message): # pragma: nocover
"""Exit program with error."""
LOGGER.error(message)
sys.exit(-1)
def _get_credential_file_path():
return file_util.get_streamlit_file_path("credentials.toml")
def _check_credential_file_exists():
return os.path.exists(_get_credential_file_path())
def check_credentials():
"""Check credentials and potentially activate.
Note
----
If there is no credential file and we are in headless mode, we should not
check, since credential would be automatically set to an empty string.
"""
from streamlit import config
if not _check_credential_file_exists() and config.get_option("server.headless"):
if not config.is_manually_set("browser.gatherUsageStats"):
# If not manually defined, show short message about usage stats gathering.
click.secho(_TELEMETRY_HEADLESS_TEXT)
return
Credentials.get_current()._check_activated() | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/runtime/credentials.py | 0.632162 | 0.156137 | credentials.py | pypi |
import hashlib
from typing import TYPE_CHECKING, Dict, List, MutableMapping, Optional
from weakref import WeakKeyDictionary
from streamlit import config, util
from streamlit.logger import get_logger
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.runtime.stats import CacheStat, CacheStatsProvider
if TYPE_CHECKING:
from streamlit.runtime.app_session import AppSession
LOGGER = get_logger(__name__)
def populate_hash_if_needed(msg: ForwardMsg) -> str:
"""Computes and assigns the unique hash for a ForwardMsg.
If the ForwardMsg already has a hash, this is a no-op.
Parameters
----------
msg : ForwardMsg
Returns
-------
string
The message's hash, returned here for convenience. (The hash
will also be assigned to the ForwardMsg; callers do not need
to do this.)
"""
if msg.hash == "":
# Move the message's metadata aside. It's not part of the
# hash calculation.
metadata = msg.metadata
msg.ClearField("metadata")
# MD5 is good enough for what we need, which is uniqueness.
hasher = hashlib.md5()
hasher.update(msg.SerializeToString())
msg.hash = hasher.hexdigest()
# Restore metadata.
msg.metadata.CopyFrom(metadata)
return msg.hash
def create_reference_msg(msg: ForwardMsg) -> ForwardMsg:
"""Create a ForwardMsg that refers to the given message via its hash.
The reference message will also get a copy of the source message's
metadata.
Parameters
----------
msg : ForwardMsg
The ForwardMsg to create the reference to.
Returns
-------
ForwardMsg
A new ForwardMsg that "points" to the original message via the
ref_hash field.
"""
ref_msg = ForwardMsg()
ref_msg.ref_hash = populate_hash_if_needed(msg)
ref_msg.metadata.CopyFrom(msg.metadata)
return ref_msg
class ForwardMsgCache(CacheStatsProvider):
"""A cache of ForwardMsgs.
Large ForwardMsgs (e.g. those containing big DataFrame payloads) are
stored in this cache. The server can choose to send a ForwardMsg's hash,
rather than the message itself, to a client. Clients can then
request messages from this cache via another endpoint.
This cache is *not* thread safe. It's intended to only be accessed by
the server thread.
"""
class Entry:
"""Cache entry.
Stores the cached message, and the set of AppSessions
that we've sent the cached message to.
"""
def __init__(self, msg: ForwardMsg):
self.msg = msg
self._session_script_run_counts: MutableMapping[
"AppSession", int
] = WeakKeyDictionary()
def __repr__(self) -> str:
return util.repr_(self)
def add_session_ref(self, session: "AppSession", script_run_count: int) -> None:
"""Adds a reference to a AppSession that has referenced
this Entry's message.
Parameters
----------
session : AppSession
script_run_count : int
The session's run count at the time of the call
"""
prev_run_count = self._session_script_run_counts.get(session, 0)
if script_run_count < prev_run_count:
LOGGER.error(
"New script_run_count (%s) is < prev_run_count (%s). "
"This should never happen!" % (script_run_count, prev_run_count)
)
script_run_count = prev_run_count
self._session_script_run_counts[session] = script_run_count
def has_session_ref(self, session: "AppSession") -> bool:
return session in self._session_script_run_counts
def get_session_ref_age(
self, session: "AppSession", script_run_count: int
) -> int:
"""The age of the given session's reference to the Entry,
given a new script_run_count.
"""
return script_run_count - self._session_script_run_counts[session]
def remove_session_ref(self, session: "AppSession") -> None:
del self._session_script_run_counts[session]
def has_refs(self) -> bool:
"""True if this Entry has references from any AppSession.
If not, it can be removed from the cache.
"""
return len(self._session_script_run_counts) > 0
def __init__(self):
self._entries: Dict[str, "ForwardMsgCache.Entry"] = {}
def __repr__(self) -> str:
return util.repr_(self)
def add_message(
self, msg: ForwardMsg, session: "AppSession", script_run_count: int
) -> None:
"""Add a ForwardMsg to the cache.
The cache will also record a reference to the given AppSession,
so that it can track which sessions have already received
each given ForwardMsg.
Parameters
----------
msg : ForwardMsg
session : AppSession
script_run_count : int
The number of times the session's script has run
"""
populate_hash_if_needed(msg)
entry = self._entries.get(msg.hash, None)
if entry is None:
entry = ForwardMsgCache.Entry(msg)
self._entries[msg.hash] = entry
entry.add_session_ref(session, script_run_count)
def get_message(self, hash: str) -> Optional[ForwardMsg]:
"""Return the message with the given ID if it exists in the cache.
Parameters
----------
hash : string
The id of the message to retrieve.
Returns
-------
ForwardMsg | None
"""
entry = self._entries.get(hash, None)
return entry.msg if entry else None
def has_message_reference(
self, msg: ForwardMsg, session: "AppSession", script_run_count: int
) -> bool:
"""Return True if a session has a reference to a message."""
populate_hash_if_needed(msg)
entry = self._entries.get(msg.hash, None)
if entry is None or not entry.has_session_ref(session):
return False
# Ensure we're not expired
age = entry.get_session_ref_age(session, script_run_count)
return age <= int(config.get_option("global.maxCachedMessageAge"))
def remove_expired_session_entries(
self, session: "AppSession", script_run_count: int
) -> None:
"""Remove any cached messages that have expired from the given session.
This should be called each time a AppSession finishes executing.
Parameters
----------
session : AppSession
script_run_count : int
The number of times the session's script has run
"""
max_age = config.get_option("global.maxCachedMessageAge")
# Operate on a copy of our entries dict.
# We may be deleting from it.
for msg_hash, entry in self._entries.copy().items():
if not entry.has_session_ref(session):
continue
age = entry.get_session_ref_age(session, script_run_count)
if age > max_age:
LOGGER.debug(
"Removing expired entry [session=%s, hash=%s, age=%s]",
id(session),
msg_hash,
age,
)
entry.remove_session_ref(session)
if not entry.has_refs():
# The entry has no more references. Remove it from
# the cache completely.
del self._entries[msg_hash]
def clear(self) -> None:
"""Remove all entries from the cache"""
self._entries.clear()
def get_stats(self) -> List[CacheStat]:
stats: List[CacheStat] = []
for entry_hash, entry in self._entries.items():
stats.append(
CacheStat(
category_name="ForwardMessageCache",
cache_name="",
byte_length=entry.msg.ByteSize(),
)
)
return stats | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/runtime/forward_msg_cache.py | 0.920994 | 0.205037 | forward_msg_cache.py | pypi |
import io
import threading
from typing import Dict, List, NamedTuple, Tuple
from blinker import Signal
from streamlit import util
from streamlit.logger import get_logger
from streamlit.runtime.stats import CacheStat, CacheStatsProvider
LOGGER = get_logger(__name__)
class UploadedFileRec(NamedTuple):
"""Metadata and raw bytes for an uploaded file. Immutable."""
id: int
name: str
type: str
data: bytes
class UploadedFile(io.BytesIO):
"""A mutable uploaded file.
This class extends BytesIO, which has copy-on-write semantics when
initialized with `bytes`.
"""
def __init__(self, record: UploadedFileRec):
# BytesIO's copy-on-write semantics doesn't seem to be mentioned in
# the Python docs - possibly because it's a CPython-only optimization
# and not guaranteed to be in other Python runtimes. But it's detailed
# here: https://hg.python.org/cpython/rev/79a5fbe2c78f
super(UploadedFile, self).__init__(record.data)
self.id = record.id
self.name = record.name
self.type = record.type
self.size = len(record.data)
def __eq__(self, other: object) -> bool:
if not isinstance(other, UploadedFile):
return NotImplemented
return self.id == other.id
def __repr__(self) -> str:
return util.repr_(self)
class UploadedFileManager(CacheStatsProvider):
"""Holds files uploaded by users of the running Streamlit app,
and emits an event signal when a file is added.
This class can be used safely from multiple threads simultaneously.
"""
def __init__(self):
# List of files for a given widget in a given session.
self._files_by_id: Dict[Tuple[str, str], List[UploadedFileRec]] = {}
# A counter that generates unique file IDs. Each file ID is greater
# than the previous ID, which means we can use IDs to compare files
# by age.
self._file_id_counter = 1
self._file_id_lock = threading.Lock()
# Prevents concurrent access to the _files_by_id dict.
# In remove_session_files(), we iterate over the dict's keys. It's
# an error to mutate a dict while iterating; this lock prevents that.
self._files_lock = threading.Lock()
self.on_files_updated = Signal(
doc="""Emitted when a file list is added to the manager or updated.
Parameters
----------
session_id : str
The session_id for the session whose files were updated.
"""
)
def __repr__(self) -> str:
return util.repr_(self)
def add_file(
self,
session_id: str,
widget_id: str,
file: UploadedFileRec,
) -> UploadedFileRec:
"""Add a file to the FileManager, and return a new UploadedFileRec
with its ID assigned.
The "on_files_updated" Signal will be emitted.
Safe to call from any thread.
Parameters
----------
session_id
The ID of the session that owns the file.
widget_id
The widget ID of the FileUploader that created the file.
file
The file to add.
Returns
-------
UploadedFileRec
The added file, which has its unique ID assigned.
"""
files_by_widget = session_id, widget_id
# Assign the file a unique ID
file_id = self._get_next_file_id()
file = UploadedFileRec(
id=file_id, name=file.name, type=file.type, data=file.data
)
with self._files_lock:
file_list = self._files_by_id.get(files_by_widget, None)
if file_list is not None:
file_list.append(file)
else:
self._files_by_id[files_by_widget] = [file]
self.on_files_updated.send(session_id)
return file
def get_all_files(self, session_id: str, widget_id: str) -> List[UploadedFileRec]:
"""Return all the files stored for the given widget.
Safe to call from any thread.
Parameters
----------
session_id
The ID of the session that owns the files.
widget_id
The widget ID of the FileUploader that created the files.
"""
file_list_id = (session_id, widget_id)
with self._files_lock:
return self._files_by_id.get(file_list_id, []).copy()
def get_files(
self, session_id: str, widget_id: str, file_ids: List[int]
) -> List[UploadedFileRec]:
"""Return the files with the given widget_id and file_ids.
Safe to call from any thread.
Parameters
----------
session_id
The ID of the session that owns the files.
widget_id
The widget ID of the FileUploader that created the files.
file_ids
List of file IDs. Only files whose IDs are in this list will be
returned.
"""
return [
f for f in self.get_all_files(session_id, widget_id) if f.id in file_ids
]
def remove_orphaned_files(
self,
session_id: str,
widget_id: str,
newest_file_id: int,
active_file_ids: List[int],
) -> None:
"""Remove 'orphaned' files: files that have been uploaded and
subsequently deleted, but haven't yet been removed from memory.
Because FileUploader can live inside forms, file deletion is made a
bit tricky: a file deletion should only happen after the form is
submitted.
FileUploader's widget value is an array of numbers that has two parts:
- The first number is always 'this.state.newestServerFileId'.
- The remaining 0 or more numbers are the file IDs of all the
uploader's uploaded files.
When the server receives the widget value, it deletes "orphaned"
uploaded files. An orphaned file is any file associated with a given
FileUploader whose file ID is not in the active_file_ids, and whose
ID is <= `newestServerFileId`.
This logic ensures that a FileUploader within a form doesn't have any
of its "unsubmitted" uploads prematurely deleted when the script is
re-run.
Safe to call from any thread.
"""
file_list_id = (session_id, widget_id)
with self._files_lock:
file_list = self._files_by_id.get(file_list_id)
if file_list is None:
return
# Remove orphaned files from the list:
# - `f.id in active_file_ids`:
# File is currently tracked by the widget. DON'T remove.
# - `f.id > newest_file_id`:
# file was uploaded *after* the widget was most recently
# updated. (It's probably in a form.) DON'T remove.
# - `f.id < newest_file_id and f.id not in active_file_ids`:
# File is not currently tracked by the widget, and was uploaded
# *before* this most recent update. This means it's been deleted
# by the user on the frontend, and is now "orphaned". Remove!
new_list = [
f for f in file_list if f.id > newest_file_id or f.id in active_file_ids
]
self._files_by_id[file_list_id] = new_list
num_removed = len(file_list) - len(new_list)
if num_removed > 0:
LOGGER.debug("Removed %s orphaned files" % num_removed)
def remove_file(self, session_id: str, widget_id: str, file_id: int) -> bool:
"""Remove the file list with the given ID, if it exists.
The "on_files_updated" Signal will be emitted.
Safe to call from any thread.
Returns
-------
bool
True if the file was removed, or False if no such file exists.
"""
file_list_id = (session_id, widget_id)
with self._files_lock:
file_list = self._files_by_id.get(file_list_id, None)
if file_list is None:
return False
# Remove the file from its list.
new_file_list = [file for file in file_list if file.id != file_id]
self._files_by_id[file_list_id] = new_file_list
self.on_files_updated.send(session_id)
return True
def _remove_files(self, session_id: str, widget_id: str) -> None:
"""Remove the file list for the provided widget in the
provided session, if it exists.
Does not emit any signals.
Safe to call from any thread.
"""
files_by_widget = session_id, widget_id
with self._files_lock:
self._files_by_id.pop(files_by_widget, None)
def remove_files(self, session_id: str, widget_id: str) -> None:
"""Remove the file list for the provided widget in the
provided session, if it exists.
The "on_files_updated" Signal will be emitted.
Safe to call from any thread.
Parameters
----------
session_id : str
The ID of the session that owns the files.
widget_id : str
The widget ID of the FileUploader that created the files.
"""
self._remove_files(session_id, widget_id)
self.on_files_updated.send(session_id)
def remove_session_files(self, session_id: str) -> None:
"""Remove all files that belong to the given session.
Safe to call from any thread.
Parameters
----------
session_id : str
The ID of the session whose files we're removing.
"""
# Copy the keys into a list, because we'll be mutating the dictionary.
with self._files_lock:
all_ids = list(self._files_by_id.keys())
for files_id in all_ids:
if files_id[0] == session_id:
self.remove_files(*files_id)
def _get_next_file_id(self) -> int:
"""Return the next file ID and increment our ID counter."""
with self._file_id_lock:
file_id = self._file_id_counter
self._file_id_counter += 1
return file_id
def get_stats(self) -> List[CacheStat]:
"""Return the manager's CacheStats.
Safe to call from any thread.
"""
with self._files_lock:
# Flatten all files into a single list
all_files: List[UploadedFileRec] = []
for file_list in self._files_by_id.values():
all_files.extend(file_list)
return [
CacheStat(
category_name="UploadedFileManager",
cache_name="",
byte_length=len(file.data),
)
for file in all_files
] | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/runtime/uploaded_file_manager.py | 0.859236 | 0.255733 | uploaded_file_manager.py | pypi |
from abc import abstractmethod
from enum import Enum
from typing import Optional, Union
from typing_extensions import Protocol
class MediaFileKind(Enum):
# st.image, st.video, st.audio files
MEDIA = "media"
# st.download_button files
DOWNLOADABLE = "downloadable"
class MediaFileStorageError(Exception):
"""Exception class for errors raised by MediaFileStorage.
When running in "development mode", the full text of these errors
is displayed in the frontend, so errors should be human-readable
(and actionable).
When running in "release mode", errors are redacted on the
frontend; we instead show a generic "Something went wrong!" message.
"""
class MediaFileStorage(Protocol):
@abstractmethod
def load_and_get_id(
self,
path_or_data: Union[str, bytes],
mimetype: str,
kind: MediaFileKind,
filename: Optional[str] = None,
) -> str:
"""Load the given file path or bytes into the manager and return
an ID that uniquely identifies it.
It’s an error to pass a URL to this function. (Media stored at
external URLs can be served directly to the Streamlit frontend;
there’s no need to store this data in MediaFileStorage.)
Parameters
----------
path_or_data
A path to a file, or the file's raw data as bytes.
mimetype
The media’s mimetype. Used to set the Content-Type header when
serving the media over HTTP.
kind
The kind of file this is: either MEDIA, or DOWNLOADABLE.
filename : str or None
Optional filename. Used to set the filename in the response header.
Returns
-------
str
The unique ID of the media file.
Raises
------
MediaFileStorageError
Raised if the media can't be loaded (for example, if a file
path is invalid).
"""
raise NotImplementedError
@abstractmethod
def get_url(self, file_id: str) -> str:
"""Return a URL for a file in the manager.
Parameters
----------
file_id
The file's ID, returned from load_media_and_get_id().
Returns
-------
str
A URL that the frontend can load the file from. Because this
URL may expire, it should not be cached!
Raises
------
MediaFileStorageError
Raised if the manager doesn't contain an object with the given ID.
"""
raise NotImplementedError
@abstractmethod
def delete_file(self, file_id: str) -> None:
"""Delete a file from the manager.
This should be called when a given file is no longer referenced
by any connected client, so that the MediaFileStorage can free its
resources.
Calling `delete_file` on a file_id that doesn't exist is allowed,
and is a no-op. (This means that multiple `delete_file` calls with
the same file_id is not an error.)
Note: implementations can choose to ignore `delete_file` calls -
this function is a *suggestion*, not a *command*. Callers should
not rely on file deletion happening immediately (or at all).
Parameters
----------
file_id
The file's ID, returned from load_media_and_get_id().
Returns
-------
None
Raises
------
MediaFileStorageError
Raised if file deletion fails for any reason. Note that these
failures will generally not be shown on the frontend (file
deletion usually occurs on session disconnect).
"""
raise NotImplementedError | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/runtime/media_file_storage.py | 0.910396 | 0.346652 | media_file_storage.py | pypi |
import collections
import threading
from typing import Dict, Optional, Set, Union
from streamlit.logger import get_logger
from streamlit.runtime.media_file_storage import MediaFileKind, MediaFileStorage
LOGGER = get_logger(__name__)
def _get_session_id() -> str:
"""Get the active AppSession's session_id."""
from streamlit.runtime.scriptrunner import get_script_run_ctx
ctx = get_script_run_ctx()
if ctx is None:
# This is only None when running "python myscript.py" rather than
# "streamlit run myscript.py". In which case the session ID doesn't
# matter and can just be a constant, as there's only ever "session".
return "dontcare"
else:
return ctx.session_id
class MediaFileMetadata:
"""Metadata that the MediaFileManager needs for each file it manages."""
def __init__(self, kind: MediaFileKind = MediaFileKind.MEDIA):
self._kind = kind
self._is_marked_for_delete = False
@property
def kind(self) -> MediaFileKind:
return self._kind
@property
def is_marked_for_delete(self) -> bool:
return self._is_marked_for_delete
def mark_for_delete(self) -> None:
self._is_marked_for_delete = True
class MediaFileManager:
"""In-memory file manager for MediaFile objects.
This keeps track of:
- Which files exist, and what their IDs are. This is important so we can
serve files by ID -- that's the whole point of this class!
- Which files are being used by which AppSession (by ID). This is
important so we can remove files from memory when no more sessions need
them.
- The exact location in the app where each file is being used (i.e. the
file's "coordinates"). This is is important so we can mark a file as "not
being used by a certain session" if it gets replaced by another file at
the same coordinates. For example, when doing an animation where the same
image is constantly replace with new frames. (This doesn't solve the case
where the file's coordinates keep changing for some reason, though! e.g.
if new elements keep being prepended to the app. Unlikely to happen, but
we should address it at some point.)
"""
def __init__(self, storage: MediaFileStorage):
self._storage = storage
# Dict of [file_id -> MediaFileMetadata]
self._file_metadata: Dict[str, MediaFileMetadata] = dict()
# Dict[session ID][coordinates] -> file_id.
self._files_by_session_and_coord: Dict[
str, Dict[str, str]
] = collections.defaultdict(dict)
# MediaFileManager is used from multiple threads, so all operations
# need to be protected with a Lock. (This is not an RLock, which
# means taking it multiple times from the same thread will deadlock.)
self._lock = threading.Lock()
def _get_inactive_file_ids(self) -> Set[str]:
"""Compute the set of files that are stored in the manager, but are
not referenced by any active session. These are files that can be
safely deleted.
Thread safety: callers must hold `self._lock`.
"""
# Get the set of all our file IDs.
file_ids = set(self._file_metadata.keys())
# Subtract all IDs that are in use by each session
for session_file_ids_by_coord in self._files_by_session_and_coord.values():
file_ids.difference_update(session_file_ids_by_coord.values())
return file_ids
def remove_orphaned_files(self) -> None:
"""Remove all files that are no longer referenced by any active session.
Safe to call from any thread.
"""
LOGGER.debug("Removing orphaned files...")
with self._lock:
for file_id in self._get_inactive_file_ids():
file = self._file_metadata[file_id]
if file.kind == MediaFileKind.MEDIA:
self._delete_file(file_id)
elif file.kind == MediaFileKind.DOWNLOADABLE:
if file.is_marked_for_delete:
self._delete_file(file_id)
else:
file.mark_for_delete()
def _delete_file(self, file_id: str) -> None:
"""Delete the given file from storage, and remove its metadata from
self._files_by_id.
Thread safety: callers must hold `self._lock`.
"""
LOGGER.debug("Deleting File: %s", file_id)
self._storage.delete_file(file_id)
del self._file_metadata[file_id]
def clear_session_refs(self, session_id: Optional[str] = None) -> None:
"""Remove the given session's file references.
(This does not remove any files from the manager - you must call
`remove_orphaned_files` for that.)
Should be called whenever ScriptRunner completes and when a session ends.
Safe to call from any thread.
"""
if session_id is None:
session_id = _get_session_id()
LOGGER.debug("Disconnecting files for session with ID %s", session_id)
with self._lock:
if session_id in self._files_by_session_and_coord:
del self._files_by_session_and_coord[session_id]
LOGGER.debug(
"Sessions still active: %r", self._files_by_session_and_coord.keys()
)
LOGGER.debug(
"Files: %s; Sessions with files: %s",
len(self._file_metadata),
len(self._files_by_session_and_coord),
)
def add(
self,
path_or_data: Union[bytes, str],
mimetype: str,
coordinates: str,
file_name: Optional[str] = None,
is_for_static_download: bool = False,
) -> str:
"""Add a new MediaFile with the given parameters and return its URL.
If an identical file already exists, return the existing URL
and registers the current session as a user.
Safe to call from any thread.
Parameters
----------
path_or_data : bytes or str
If bytes: the media file's raw data. If str: the name of a file
to load from disk.
mimetype : str
The mime type for the file. E.g. "audio/mpeg".
This string will be used in the "Content-Type" header when the file
is served over HTTP.
coordinates : str
Unique string identifying an element's location.
Prevents memory leak of "forgotten" file IDs when element media
is being replaced-in-place (e.g. an st.image stream).
coordinates should be of the form: "1.(3.-14).5"
file_name : str or None
Optional file_name. Used to set the filename in the response header.
is_for_static_download: bool
Indicate that data stored for downloading as a file,
not as a media for rendering at page. [default: False]
Returns
-------
str
The url that the frontend can use to fetch the media.
Raises
------
If a filename is passed, any Exception raised when trying to read the
file will be re-raised.
"""
session_id = _get_session_id()
with self._lock:
kind = (
MediaFileKind.DOWNLOADABLE
if is_for_static_download
else MediaFileKind.MEDIA
)
file_id = self._storage.load_and_get_id(
path_or_data, mimetype, kind, file_name
)
metadata = MediaFileMetadata(kind=kind)
self._file_metadata[file_id] = metadata
self._files_by_session_and_coord[session_id][coordinates] = file_id
return self._storage.get_url(file_id) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/runtime/media_file_manager.py | 0.861844 | 0.16388 | media_file_manager.py | pypi |
"""Hashing for st.memo and st.singleton."""
import collections
import dataclasses
import functools
import hashlib
import inspect
import io
import os
import pickle
import sys
import tempfile
import threading
import unittest.mock
import weakref
from enum import Enum
from typing import Any, Dict, List, Optional, Pattern
from streamlit import type_util, util
from streamlit.runtime.caching.cache_errors import CacheType, UnhashableTypeError
from streamlit.runtime.uploaded_file_manager import UploadedFile
# If a dataframe has more than this many rows, we consider it large and hash a sample.
_PANDAS_ROWS_LARGE = 100000
_PANDAS_SAMPLE_SIZE = 10000
# Similar to dataframes, we also sample large numpy arrays.
_NP_SIZE_LARGE = 1000000
_NP_SAMPLE_SIZE = 100000
# Arbitrary item to denote where we found a cycle in a hashed object.
# This allows us to hash self-referencing lists, dictionaries, etc.
_CYCLE_PLACEHOLDER = b"streamlit-57R34ML17-hesamagicalponyflyingthroughthesky-CYCLE"
def update_hash(val: Any, hasher, cache_type: CacheType) -> None:
"""Updates a hashlib hasher with the hash of val.
This is the main entrypoint to hashing.py.
"""
ch = _CacheFuncHasher(cache_type)
ch.update(hasher, val)
class _HashStack:
"""Stack of what has been hashed, for debug and circular reference detection.
This internally keeps 1 stack per thread.
Internally, this stores the ID of pushed objects rather than the objects
themselves because otherwise the "in" operator inside __contains__ would
fail for objects that don't return a boolean for "==" operator. For
example, arr == 10 where arr is a NumPy array returns another NumPy array.
This causes the "in" to crash since it expects a boolean.
"""
def __init__(self):
self._stack: collections.OrderedDict[int, List[Any]] = collections.OrderedDict()
def __repr__(self) -> str:
return util.repr_(self)
def push(self, val: Any):
self._stack[id(val)] = val
def pop(self):
self._stack.popitem()
def __contains__(self, val: Any):
return id(val) in self._stack
class _HashStacks:
"""Stacks of what has been hashed, with at most 1 stack per thread."""
def __init__(self):
self._stacks: weakref.WeakKeyDictionary[
threading.Thread, _HashStack
] = weakref.WeakKeyDictionary()
def __repr__(self) -> str:
return util.repr_(self)
@property
def current(self) -> _HashStack:
current_thread = threading.current_thread()
stack = self._stacks.get(current_thread, None)
if stack is None:
stack = _HashStack()
self._stacks[current_thread] = stack
return stack
hash_stacks = _HashStacks()
def _int_to_bytes(i: int) -> bytes:
num_bytes = (i.bit_length() + 8) // 8
return i.to_bytes(num_bytes, "little", signed=True)
def _key(obj: Optional[Any]) -> Any:
"""Return key for memoization."""
if obj is None:
return None
def is_simple(obj):
return (
isinstance(obj, bytes)
or isinstance(obj, bytearray)
or isinstance(obj, str)
or isinstance(obj, float)
or isinstance(obj, int)
or isinstance(obj, bool)
or obj is None
)
if is_simple(obj):
return obj
if isinstance(obj, tuple):
if all(map(is_simple, obj)):
return obj
if isinstance(obj, list):
if all(map(is_simple, obj)):
return ("__l", tuple(obj))
if (
type_util.is_type(obj, "pandas.core.frame.DataFrame")
or type_util.is_type(obj, "numpy.ndarray")
or inspect.isbuiltin(obj)
or inspect.isroutine(obj)
or inspect.iscode(obj)
):
return id(obj)
return NoResult
class _CacheFuncHasher:
"""A hasher that can hash objects with cycles."""
def __init__(self, cache_type: CacheType):
self._hashes: Dict[Any, bytes] = {}
# The number of the bytes in the hash.
self.size = 0
self.cache_type = cache_type
def __repr__(self) -> str:
return util.repr_(self)
def to_bytes(self, obj: Any) -> bytes:
"""Add memoization to _to_bytes and protect against cycles in data structures."""
tname = type(obj).__qualname__.encode()
key = (tname, _key(obj))
# Memoize if possible.
if key[1] is not NoResult:
if key in self._hashes:
return self._hashes[key]
# Break recursive cycles.
if obj in hash_stacks.current:
return _CYCLE_PLACEHOLDER
hash_stacks.current.push(obj)
try:
# Hash the input
b = b"%s:%s" % (tname, self._to_bytes(obj))
# Hmmm... It's possible that the size calculation is wrong. When we
# call to_bytes inside _to_bytes things get double-counted.
self.size += sys.getsizeof(b)
if key[1] is not NoResult:
self._hashes[key] = b
finally:
# In case an UnhashableTypeError (or other) error is thrown, clean up the
# stack so we don't get false positives in future hashing calls
hash_stacks.current.pop()
return b
def update(self, hasher, obj: Any) -> None:
"""Update the provided hasher with the hash of an object."""
b = self.to_bytes(obj)
hasher.update(b)
def _to_bytes(self, obj: Any) -> bytes:
"""Hash objects to bytes, including code with dependencies.
Python's built in `hash` does not produce consistent results across
runs.
"""
if isinstance(obj, unittest.mock.Mock):
# Mock objects can appear to be infinitely
# deep, so we don't try to hash them at all.
return self.to_bytes(id(obj))
elif isinstance(obj, bytes) or isinstance(obj, bytearray):
return obj
elif isinstance(obj, str):
return obj.encode()
elif isinstance(obj, float):
return self.to_bytes(hash(obj))
elif isinstance(obj, int):
return _int_to_bytes(obj)
elif isinstance(obj, (list, tuple)):
h = hashlib.new("md5")
for item in obj:
self.update(h, item)
return h.digest()
elif isinstance(obj, dict):
h = hashlib.new("md5")
for item in obj.items():
self.update(h, item)
return h.digest()
elif obj is None:
return b"0"
elif obj is True:
return b"1"
elif obj is False:
return b"0"
elif dataclasses.is_dataclass(obj):
return self.to_bytes(dataclasses.asdict(obj))
elif isinstance(obj, Enum):
return str(obj).encode()
elif type_util.is_type(obj, "pandas.core.frame.DataFrame") or type_util.is_type(
obj, "pandas.core.series.Series"
):
import pandas as pd
if len(obj) >= _PANDAS_ROWS_LARGE:
obj = obj.sample(n=_PANDAS_SAMPLE_SIZE, random_state=0)
try:
return b"%s" % pd.util.hash_pandas_object(obj).sum()
except TypeError:
# Use pickle if pandas cannot hash the object for example if
# it contains unhashable objects.
return b"%s" % pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
elif type_util.is_type(obj, "numpy.ndarray"):
h = hashlib.new("md5")
self.update(h, obj.shape)
if obj.size >= _NP_SIZE_LARGE:
import numpy as np
state = np.random.RandomState(0)
obj = state.choice(obj.flat, size=_NP_SAMPLE_SIZE)
self.update(h, obj.tobytes())
return h.digest()
elif type_util.is_type(obj, "PIL.Image.Image"):
import numpy as np
# we don't just hash the results of obj.tobytes() because we want to use
# the sampling logic for numpy data
np_array = np.frombuffer(obj.tobytes(), dtype="uint8")
return self.to_bytes(np_array)
elif inspect.isbuiltin(obj):
return bytes(obj.__name__.encode())
elif type_util.is_type(obj, "builtins.mappingproxy") or type_util.is_type(
obj, "builtins.dict_items"
):
return self.to_bytes(dict(obj))
elif type_util.is_type(obj, "builtins.getset_descriptor"):
return bytes(obj.__qualname__.encode())
elif isinstance(obj, UploadedFile):
# UploadedFile is a BytesIO (thus IOBase) but has a name.
# It does not have a timestamp so this must come before
# temporary files
h = hashlib.new("md5")
self.update(h, obj.name)
self.update(h, obj.tell())
self.update(h, obj.getvalue())
return h.digest()
elif hasattr(obj, "name") and (
isinstance(obj, io.IOBase)
# Handle temporary files used during testing
or isinstance(obj, tempfile._TemporaryFileWrapper)
):
# Hash files as name + last modification date + offset.
# NB: we're using hasattr("name") to differentiate between
# on-disk and in-memory StringIO/BytesIO file representations.
# That means that this condition must come *before* the next
# condition, which just checks for StringIO/BytesIO.
h = hashlib.new("md5")
obj_name = getattr(obj, "name", "wonthappen") # Just to appease MyPy.
self.update(h, obj_name)
self.update(h, os.path.getmtime(obj_name))
self.update(h, obj.tell())
return h.digest()
elif isinstance(obj, Pattern):
return self.to_bytes([obj.pattern, obj.flags])
elif isinstance(obj, io.StringIO) or isinstance(obj, io.BytesIO):
# Hash in-memory StringIO/BytesIO by their full contents
# and seek position.
h = hashlib.new("md5")
self.update(h, obj.tell())
self.update(h, obj.getvalue())
return h.digest()
elif type_util.is_type(obj, "numpy.ufunc"):
# For numpy.remainder, this returns remainder.
return bytes(obj.__name__.encode())
elif inspect.ismodule(obj):
# TODO: Figure out how to best show this kind of warning to the
# user. In the meantime, show nothing. This scenario is too common,
# so the current warning is quite annoying...
# st.warning(('Streamlit does not support hashing modules. '
# 'We did not hash `%s`.') % obj.__name__)
# TODO: Hash more than just the name for internal modules.
return self.to_bytes(obj.__name__)
elif inspect.isclass(obj):
# TODO: Figure out how to best show this kind of warning to the
# user. In the meantime, show nothing. This scenario is too common,
# (e.g. in every "except" statement) so the current warning is
# quite annoying...
# st.warning(('Streamlit does not support hashing classes. '
# 'We did not hash `%s`.') % obj.__name__)
# TODO: Hash more than just the name of classes.
return self.to_bytes(obj.__name__)
elif isinstance(obj, functools.partial):
# The return value of functools.partial is not a plain function:
# it's a callable object that remembers the original function plus
# the values you pickled into it. So here we need to special-case it.
h = hashlib.new("md5")
self.update(h, obj.args)
self.update(h, obj.func)
self.update(h, obj.keywords)
return h.digest()
else:
# As a last resort, hash the output of the object's __reduce__ method
h = hashlib.new("md5")
try:
reduce_data = obj.__reduce__()
except Exception as ex:
raise UnhashableTypeError() from ex
for item in reduce_data:
self.update(h, item)
return h.digest()
class NoResult:
"""Placeholder class for return values when None is meaningful."""
pass | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/runtime/caching/hashing.py | 0.776157 | 0.265751 | hashing.py | pypi |
import threading
from typing import Any, Dict, List, Optional, Set
from streamlit.proto.WidgetStates_pb2 import WidgetState as WidgetStateProto
from streamlit.proto.WidgetStates_pb2 import WidgetStates as WidgetStatesProto
from streamlit.runtime.state.session_state import (
RegisterWidgetResult,
SessionState,
T,
WidgetMetadata,
)
class SafeSessionState:
"""Thread-safe wrapper around SessionState.
When AppSession gets a re-run request, it can interrupt its existing
ScriptRunner and spin up a new ScriptRunner to handle the request.
When this happens, the existing ScriptRunner will continue executing
its script until it reaches a yield point - but during this time, it
must not mutate its SessionState. An interrupted ScriptRunner assigns
a dummy SessionState instance to its wrapper to prevent further mutation.
"""
def __init__(self, state: SessionState):
self._state = state
# TODO: we'd prefer this be a threading.Lock instead of RLock -
# but `call_callbacks` first needs to be rewritten.
self._lock = threading.RLock()
self._disconnected = False
def disconnect(self) -> None:
"""Disconnect the wrapper from its underlying SessionState.
ScriptRunner calls this when it gets a stop request. After this
function is called, all future SessionState interactions are no-ops.
"""
with self._lock:
self._disconnected = True
def register_widget(
self, metadata: WidgetMetadata[T], user_key: Optional[str]
) -> RegisterWidgetResult[T]:
with self._lock:
if self._disconnected:
return RegisterWidgetResult.failure(metadata.deserializer)
return self._state.register_widget(metadata, user_key)
def on_script_will_rerun(self, latest_widget_states: WidgetStatesProto) -> None:
with self._lock:
if self._disconnected:
return
# TODO: rewrite this to copy the callbacks list into a local
# variable so that we don't need to hold our lock for the
# duration. (This will also allow us to downgrade our RLock
# to a Lock.)
self._state.on_script_will_rerun(latest_widget_states)
def on_script_finished(self, widget_ids_this_run: Set[str]) -> None:
with self._lock:
if self._disconnected:
return
self._state.on_script_finished(widget_ids_this_run)
def get_widget_states(self) -> List[WidgetStateProto]:
"""Return a list of serialized widget values for each widget with a value."""
with self._lock:
if self._disconnected:
return []
return self._state.get_widget_states()
def is_new_state_value(self, user_key: str) -> bool:
with self._lock:
if self._disconnected:
return False
return self._state.is_new_state_value(user_key)
@property
def filtered_state(self) -> Dict[str, Any]:
"""The combined session and widget state, excluding keyless widgets."""
with self._lock:
if self._disconnected:
return {}
return self._state.filtered_state
def __getitem__(self, key: str) -> Any:
with self._lock:
if self._disconnected:
raise KeyError(key)
return self._state[key]
def __setitem__(self, key: str, value: Any) -> None:
with self._lock:
if self._disconnected:
return
self._state[key] = value
def __delitem__(self, key: str) -> None:
with self._lock:
if self._disconnected:
raise KeyError(key)
del self._state[key]
def __contains__(self, key: str) -> bool:
with self._lock:
if self._disconnected:
return False
return key in self._state | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/runtime/state/safe_session_state.py | 0.703957 | 0.202956 | safe_session_state.py | pypi |
from typing import Any, Dict, Iterator, MutableMapping
from typing_extensions import Final
from streamlit import logger as _logger
from streamlit import runtime
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.state.safe_session_state import SafeSessionState
from streamlit.runtime.state.session_state import SessionState, require_valid_user_key
from streamlit.type_util import Key
LOGGER: Final = _logger.get_logger(__name__)
_state_use_warning_already_displayed: bool = False
def get_session_state() -> SafeSessionState:
"""Get the SessionState object for the current session.
Note that in streamlit scripts, this function should not be called
directly. Instead, SessionState objects should be accessed via
st.session_state.
"""
global _state_use_warning_already_displayed
from streamlit.runtime.scriptrunner import get_script_run_ctx
ctx = get_script_run_ctx()
# If there is no script run context because the script is run bare, have
# session state act as an always empty dictionary, and print a warning.
if ctx is None:
if not _state_use_warning_already_displayed:
_state_use_warning_already_displayed = True
if not runtime.exists():
LOGGER.warning(
"Session state does not function when running a script without `streamlit run`"
)
return SafeSessionState(SessionState())
return ctx.session_state
class SessionStateProxy(MutableMapping[Key, Any]):
"""A stateless singleton that proxies `st.session_state` interactions
to the current script thread's SessionState instance.
The proxy API differs slightly from SessionState: it does not allow
callers to get, set, or iterate over "keyless" widgets (that is, widgets
that were created without a user_key, and have autogenerated keys).
"""
def __iter__(self) -> Iterator[Any]:
"""Iterator over user state and keyed widget values."""
# TODO: this is unsafe if fastReruns is true! Let's deprecate/remove.
return iter(get_session_state().filtered_state)
def __len__(self) -> int:
"""Number of user state and keyed widget values in session_state."""
return len(get_session_state().filtered_state)
def __str__(self) -> str:
"""String representation of user state and keyed widget values."""
return str(get_session_state().filtered_state)
def __getitem__(self, key: Key) -> Any:
"""Return the state or widget value with the given key.
Raises
------
StreamlitAPIException
If the key is not a valid SessionState user key.
"""
key = str(key)
require_valid_user_key(key)
return get_session_state()[key]
@gather_metrics("session_state.set_item")
def __setitem__(self, key: Key, value: Any) -> None:
"""Set the value of the given key.
Raises
------
StreamlitAPIException
If the key is not a valid SessionState user key.
"""
key = str(key)
require_valid_user_key(key)
get_session_state()[key] = value
def __delitem__(self, key: Key) -> None:
"""Delete the value with the given key.
Raises
------
StreamlitAPIException
If the key is not a valid SessionState user key.
"""
key = str(key)
require_valid_user_key(key)
del get_session_state()[key]
def __getattr__(self, key: str) -> Any:
try:
return self[key]
except KeyError:
raise AttributeError(_missing_attr_error_message(key))
@gather_metrics("session_state.set_attr")
def __setattr__(self, key: str, value: Any) -> None:
self[key] = value
def __delattr__(self, key: str) -> None:
try:
del self[key]
except KeyError:
raise AttributeError(_missing_attr_error_message(key))
def to_dict(self) -> Dict[str, Any]:
"""Return a dict containing all session_state and keyed widget values."""
return get_session_state().filtered_state
def _missing_attr_error_message(attr_name: str) -> str:
return (
f'st.session_state has no attribute "{attr_name}". Did you forget to initialize it? '
f"More info: https://docs.streamlit.io/library/advanced-features/session-state#initialization"
) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/runtime/state/session_state_proxy.py | 0.794903 | 0.214836 | session_state_proxy.py | pypi |
import threading
from dataclasses import dataclass
from enum import Enum
from typing import Optional, cast
from streamlit.proto.WidgetStates_pb2 import WidgetStates
from streamlit.runtime.state import coalesce_widget_states
class ScriptRequestType(Enum):
# The ScriptRunner should continue running its script.
CONTINUE = "CONTINUE"
# If the script is running, it should be stopped as soon
# as the ScriptRunner reaches an interrupt point.
# This is a terminal state.
STOP = "STOP"
# A script rerun has been requested. The ScriptRunner should
# handle this request as soon as it reaches an interrupt point.
RERUN = "RERUN"
@dataclass(frozen=True)
class RerunData:
"""Data attached to RERUN requests. Immutable."""
query_string: str = ""
widget_states: Optional[WidgetStates] = None
page_script_hash: str = ""
page_name: str = ""
@dataclass(frozen=True)
class ScriptRequest:
"""A STOP or RERUN request and associated data."""
type: ScriptRequestType
_rerun_data: Optional[RerunData] = None
@property
def rerun_data(self) -> RerunData:
if self.type is not ScriptRequestType.RERUN:
raise RuntimeError("RerunData is only set for RERUN requests.")
return cast(RerunData, self._rerun_data)
class ScriptRequests:
"""An interface for communicating with a ScriptRunner. Thread-safe.
AppSession makes requests of a ScriptRunner through this class, and
ScriptRunner handles those requests.
"""
def __init__(self):
self._lock = threading.Lock()
self._state = ScriptRequestType.CONTINUE
self._rerun_data = RerunData()
def request_stop(self) -> None:
"""Request that the ScriptRunner stop running. A stopped ScriptRunner
can't be used anymore. STOP requests succeed unconditionally.
"""
with self._lock:
self._state = ScriptRequestType.STOP
def request_rerun(self, new_data: RerunData) -> bool:
"""Request that the ScriptRunner rerun its script.
If the ScriptRunner has been stopped, this request can't be honored:
return False.
Otherwise, record the request and return True. The ScriptRunner will
handle the rerun request as soon as it reaches an interrupt point.
"""
with self._lock:
if self._state == ScriptRequestType.STOP:
# We can't rerun after being stopped.
return False
if self._state == ScriptRequestType.CONTINUE:
# If we're running, we can handle a rerun request
# unconditionally.
self._state = ScriptRequestType.RERUN
self._rerun_data = new_data
return True
if self._state == ScriptRequestType.RERUN:
# If we have an existing Rerun request, we coalesce this
# new request into it.
if self._rerun_data.widget_states is None:
# The existing request's widget_states is None, which
# means it wants to rerun with whatever the most
# recent script execution's widget state was.
# We have no meaningful state to merge with, and
# so we simply overwrite the existing request.
self._rerun_data = new_data
return True
if new_data.widget_states is not None:
# Both the existing and the new request have
# non-null widget_states. Merge them together.
coalesced_states = coalesce_widget_states(
self._rerun_data.widget_states, new_data.widget_states
)
self._rerun_data = RerunData(
query_string=new_data.query_string,
widget_states=coalesced_states,
page_script_hash=new_data.page_script_hash,
page_name=new_data.page_name,
)
return True
# If old widget_states is NOT None, and new widget_states IS
# None, then this new request is entirely redundant. Leave
# our existing rerun_data as is.
return True
# We'll never get here
raise RuntimeError(f"Unrecognized ScriptRunnerState: {self._state}")
def on_scriptrunner_yield(self) -> Optional[ScriptRequest]:
"""Called by the ScriptRunner when it's at a yield point.
If we have no request, return None.
If we have a RERUN request, return the request and set our internal
state to CONTINUE.
If we have a STOP request, return the request and remain stopped.
"""
if self._state == ScriptRequestType.CONTINUE:
# We avoid taking a lock in the common case. If a STOP or RERUN
# request is received between the `if` and `return`, it will be
# handled at the next `on_scriptrunner_yield`, or when
# `on_scriptrunner_ready` is called.
return None
with self._lock:
if self._state == ScriptRequestType.RERUN:
self._state = ScriptRequestType.CONTINUE
return ScriptRequest(ScriptRequestType.RERUN, self._rerun_data)
assert self._state == ScriptRequestType.STOP
return ScriptRequest(ScriptRequestType.STOP)
def on_scriptrunner_ready(self) -> ScriptRequest:
"""Called by the ScriptRunner when it's about to run its script for
the first time, and also after its script has successfully completed.
If we have a RERUN request, return the request and set
our internal state to CONTINUE.
If we have a STOP request or no request, set our internal state
to STOP.
"""
with self._lock:
if self._state == ScriptRequestType.RERUN:
self._state = ScriptRequestType.CONTINUE
return ScriptRequest(ScriptRequestType.RERUN, self._rerun_data)
# If we don't have a rerun request, unconditionally change our
# state to STOP.
self._state = ScriptRequestType.STOP
return ScriptRequest(ScriptRequestType.STOP) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/runtime/scriptrunner/script_requests.py | 0.850111 | 0.216591 | script_requests.py | pypi |
import collections
import threading
from dataclasses import dataclass, field
from typing import Callable, Counter, Dict, List, Optional, Set
from typing_extensions import Final, TypeAlias
from streamlit import runtime
from streamlit.errors import StreamlitAPIException
from streamlit.logger import get_logger
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.PageProfile_pb2 import Command
from streamlit.runtime.state import SafeSessionState
from streamlit.runtime.uploaded_file_manager import UploadedFileManager
LOGGER: Final = get_logger(__name__)
UserInfo: TypeAlias = Dict[str, Optional[str]]
@dataclass
class ScriptRunContext:
"""A context object that contains data for a "script run" - that is,
data that's scoped to a single ScriptRunner execution (and therefore also
scoped to a single connected "session").
ScriptRunContext is used internally by virtually every `st.foo()` function.
It is accessed only from the script thread that's created by ScriptRunner.
Streamlit code typically retrieves the active ScriptRunContext via the
`get_script_run_ctx` function.
"""
session_id: str
_enqueue: Callable[[ForwardMsg], None]
query_string: str
session_state: SafeSessionState
uploaded_file_mgr: UploadedFileManager
page_script_hash: str
user_info: UserInfo
gather_usage_stats: bool = False
command_tracking_deactivated: bool = False
tracked_commands: List[Command] = field(default_factory=list)
tracked_commands_counter: Counter[str] = field(default_factory=collections.Counter)
_set_page_config_allowed: bool = True
_has_script_started: bool = False
widget_ids_this_run: Set[str] = field(default_factory=set)
widget_user_keys_this_run: Set[str] = field(default_factory=set)
form_ids_this_run: Set[str] = field(default_factory=set)
cursors: Dict[int, "streamlit.cursor.RunningCursor"] = field(default_factory=dict)
dg_stack: List["streamlit.delta_generator.DeltaGenerator"] = field(
default_factory=list
)
def reset(self, query_string: str = "", page_script_hash: str = "") -> None:
self.cursors = {}
self.widget_ids_this_run = set()
self.widget_user_keys_this_run = set()
self.form_ids_this_run = set()
self.query_string = query_string
self.page_script_hash = page_script_hash
# Permit set_page_config when the ScriptRunContext is reused on a rerun
self._set_page_config_allowed = True
self._has_script_started = False
self.command_tracking_deactivated: bool = False
self.tracked_commands = []
self.tracked_commands_counter = collections.Counter()
def on_script_start(self) -> None:
self._has_script_started = True
def enqueue(self, msg: ForwardMsg) -> None:
"""Enqueue a ForwardMsg for this context's session."""
if msg.HasField("page_config_changed") and not self._set_page_config_allowed:
raise StreamlitAPIException(
"`set_page_config()` can only be called once per app, "
+ "and must be called as the first Streamlit command in your script.\n\n"
+ "For more information refer to the [docs]"
+ "(https://docs.streamlit.io/library/api-reference/utilities/st.set_page_config)."
)
# We want to disallow set_page config if one of the following occurs:
# - set_page_config was called on this message
# - The script has already started and a different st call occurs (a delta)
if msg.HasField("page_config_changed") or (
msg.HasField("delta") and self._has_script_started
):
self._set_page_config_allowed = False
# Pass the message up to our associated ScriptRunner.
self._enqueue(msg)
SCRIPT_RUN_CONTEXT_ATTR_NAME: Final = "streamlit_script_run_ctx"
def add_script_run_ctx(
thread: Optional[threading.Thread] = None, ctx: Optional[ScriptRunContext] = None
):
"""Adds the current ScriptRunContext to a newly-created thread.
This should be called from this thread's parent thread,
before the new thread starts.
Parameters
----------
thread : threading.Thread
The thread to attach the current ScriptRunContext to.
ctx : ScriptRunContext or None
The ScriptRunContext to add, or None to use the current thread's
ScriptRunContext.
Returns
-------
threading.Thread
The same thread that was passed in, for chaining.
"""
if thread is None:
thread = threading.current_thread()
if ctx is None:
ctx = get_script_run_ctx()
if ctx is not None:
setattr(thread, SCRIPT_RUN_CONTEXT_ATTR_NAME, ctx)
return thread
def get_script_run_ctx() -> Optional[ScriptRunContext]:
"""
Returns
-------
ScriptRunContext | None
The current thread's ScriptRunContext, or None if it doesn't have one.
"""
thread = threading.current_thread()
ctx: Optional[ScriptRunContext] = getattr(
thread, SCRIPT_RUN_CONTEXT_ATTR_NAME, None
)
if ctx is None and runtime.exists():
# Only warn about a missing ScriptRunContext if we were started
# via `streamlit run`. Otherwise, the user is likely running a
# script "bare", and doesn't need to be warned about streamlit
# bits that are irrelevant when not connected to a session.
LOGGER.warning("Thread '%s': missing ScriptRunContext", thread.name)
return ctx
# Needed to avoid circular dependencies while running tests.
import streamlit | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/runtime/scriptrunner/script_run_context.py | 0.905144 | 0.175538 | script_run_context.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.