code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
__docformat__ = "numpy"
import os
import pandas as pd
import plotly.express as px
from plotly.subplots import make_subplots
from openbb_terminal.stocks.discovery import finviz_model
from openbb_terminal.helper_funcs import export_data
def display_heatmap(timeframe: str, export: str = ""):
"""Display heatmap from finviz
Parameters
----------
timeframe: str
Timeframe for performance
export: str
Format to export data
"""
dfs = finviz_model.get_heatmap_data(timeframe)
if dfs.empty:
return
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"heatmap",
dfs,
)
color_bin = [-100, -2, -1, -0.001, 0.001, 1, 2, 100]
dfs["colors"] = pd.cut(
dfs["Change"],
bins=color_bin,
labels=[
"rgb(246, 53, 56)",
"rgb(191, 64, 69)",
"rgb(139, 68, 78)",
"grey",
"rgb(53, 118, 78)",
"rgb(47, 158, 79)",
"rgb(48, 204, 90)",
],
)
path_tree = [px.Constant("SP 500"), "Sector", "Ticker"]
fig = make_subplots(
print_grid=False,
vertical_spacing=0.02,
horizontal_spacing=-0,
specs=[[{"type": "domain"}]],
rows=1,
cols=1,
)
treemap = px.treemap(
dfs,
path=path_tree,
values="value",
custom_data=["Change"],
color="colors",
color_discrete_map={
"(?)": "#262931",
"grey": "grey",
"rgb(246, 53, 56)": "rgb(246, 53, 56)",
"rgb(191, 64, 69)": "rgb(191, 64, 69)",
"rgb(139, 68, 78)": "rgb(139, 68, 78)",
"rgb(53, 118, 78)": "rgb(53, 118, 78)",
"rgb(47, 158, 79)": "rgb(47, 158, 79)",
"rgb(48, 204, 90)": "rgb(48, 204, 90)",
},
)
fig.add_trace(treemap["data"][0], row=1, col=1)
fig.data[
0
].texttemplate = (
"<br> <br> <b>%{label}<br> %{customdata[0]:.2f}% <br> <br> <br><br><b>"
)
fig.data[0].insidetextfont = dict(
family="Arial Black",
size=50,
color="white",
)
fig.update_traces(
textinfo="label+text+value",
textposition="middle center",
selector=dict(type="treemap"),
marker_line_width=0.3,
marker_pad_b=20,
marker_pad_l=0,
marker_pad_r=0,
marker_pad_t=50,
tiling_pad=2,
)
fig.update_layout(
margin=dict(t=0, l=0, r=0, b=0),
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
hovermode=False,
font=dict(
family="Arial Black",
size=20,
color="white",
),
)
fig.show() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/discovery/finviz_view.py | 0.739234 | 0.238578 | finviz_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.stocks.discovery import ark_model
from openbb_terminal import rich_config
logger = logging.getLogger(__name__)
def lambda_direction_color_red_green(val: str) -> str:
"""Adds color tags to the Direction information: Buy -> Green, Sell -> Red
Parameters
----------
val : str
Direction string - either Buy or Sell
Returns
-------
str
Direction string with color tags added
"""
color = "green" if val == "Buy" else "red" if val == "Sell" else ""
return f"[{color}]{val}[/{color}]"
@log_start_end(log=logger)
def ark_orders_view(
limit: int = 10,
sortby: str = "",
ascend: bool = False,
buys_only: bool = False,
sells_only: bool = False,
fund: str = "",
export: str = "",
) -> None:
"""Prints a table of the last N ARK Orders
Parameters
----------
limit: int
Number of stocks to display
sortby: str
Column to sort on
ascend: bool
Flag to sort in ascending order
buys_only: bool
Flag to filter on buys only
sells_only: bool
Flag to sort on sells only
fund: str
Optional filter by fund
export: str
Export dataframe data to csv,json,xlsx file
"""
df_orders = ark_model.get_ark_orders(buys_only, sells_only, fund)
if not df_orders.empty:
df_orders = ark_model.add_order_total(df_orders.head(limit))
if rich_config.USE_COLOR:
df_orders["direction"] = df_orders["direction"].apply(
lambda_direction_color_red_green
)
if sortby:
df_orders = df_orders.sort_values(by=sortby, ascending=ascend)
print_rich_table(
df_orders,
headers=[x.title() for x in df_orders.columns],
show_index=False,
title="Orders by ARK Investment Management LLC",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"arkord",
df_orders,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/discovery/ark_view.py | 0.751192 | 0.200401 | ark_view.py | pypi |
__docformat__ = "numpy"
# pylint:disable=too-many-arguments
import logging
import financedatabase as fd
import pandas as pd
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def show_equities(
country: str,
sector: str,
industry: str,
name: str,
description: str,
marketcap: str,
amount: int,
include_exchanges: bool,
options: str,
):
"""
Display a selection of Equities based on country, sector, industry, name and/or description filtered
by market cap. If no arguments are given, return equities categorized as Large Cap.
[Source: Finance Database]
Parameters
----------
country: str
Search by country to find stocks matching the criteria.
sector : str
Search by sector to find stocks matching the criteria.
industry : str
Search by industry to find stocks matching the criteria.
name : str
Search by name to find stocks matching the criteria.
description : str
Search by description to find stocks matching the criteria.
marketcap : str
Select stocks based on the market cap.
amount : int
Number of stocks to display, default is 10.
include_exchanges: bool
When you wish to include different exchanges use this boolean.
options : str
Show the country, sector or industry options.
"""
if options is not None:
for option in fd.show_options("equities", options):
console.print(option)
return
if country is not None:
country = " ".join(country).title()
if sector is not None:
sector = " ".join(sector).title()
if industry is not None:
industry = " ".join(industry).title()
data = fd.select_equities(
country=country,
sector=sector,
industry=industry,
exclude_exchanges=include_exchanges,
)
if name is not None:
data = fd.search_products(data, query=" ".join(name), search="long_name")
if description is not None:
data = fd.search_products(data, query=" ".join(description), search="summary")
if marketcap is not None:
data = fd.search_products(
data, query=f"{''.join(marketcap)} Cap", search="market_cap"
)
table_data = pd.DataFrame(data).T[
[
"long_name",
"sector",
"industry",
"country",
"city",
"website",
"market_cap",
]
]
print_rich_table(
table_data.iloc[:amount],
show_index=True,
headers=[
"Name",
"Sector",
"Industry",
"Country",
"City",
"Website",
"Market Cap",
],
title="Equities",
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/discovery/financedatabase_view.py | 0.70304 | 0.230779 | financedatabase_view.py | pypi |
import logging
import os
from typing import Optional
from openbb_terminal.decorators import log_start_end
from openbb_terminal.decorators import check_api_key
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.stocks.discovery import finnhub_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_FINNHUB_KEY"])
def past_ipo(
num_days_behind: int = 5,
start_date: Optional[str] = None,
limit: int = 20,
export: str = "",
):
"""Past IPOs dates. [Source: Finnhub]
Parameters
----------
num_days_behind: int
Number of days to look behind for IPOs dates
start_date: str
The starting date (format YYYY-MM-DD) to look for IPOs
limit: int
Limit number of IPOs to display. Default is 20
export : str
Export dataframe data to csv,json,xlsx file
"""
df_past_ipo = finnhub_model.get_past_ipo(num_days_behind, start_date)
if not df_past_ipo.empty:
print_rich_table(
df_past_ipo.head(limit),
headers=list(df_past_ipo.columns),
show_index=False,
title="IPO Dates",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"pipo",
df_past_ipo,
)
@log_start_end(log=logger)
@check_api_key(["API_FINNHUB_KEY"])
def future_ipo(
num_days_ahead: int = 5,
end_date: Optional[str] = None,
limit: int = 20,
export: str = "",
):
"""Future IPOs dates. [Source: Finnhub]
Parameters
----------
num_days_ahead: int
Number of days to look ahead for IPOs dates
end_date: datetime
The end date (format YYYY-MM-DD) to look for IPOs from today onwards
limit: int
Limit number of IPOs to display. Default is 20
export : str
Export dataframe data to csv,json,xlsx file
"""
df_future_ipo = finnhub_model.get_future_ipo(num_days_ahead, end_date)
if not df_future_ipo.empty:
print_rich_table(
df_future_ipo.head(limit),
headers=list(df_future_ipo.columns),
show_index=False,
title="Future IPO Dates",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"fipo",
df_future_ipo,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/discovery/finnhub_view.py | 0.771843 | 0.192577 | finnhub_view.py | pypi |
__docformat__ = "numpy"
import json
import logging
from datetime import timedelta
import numpy as np
import pandas as pd
import requests
import yfinance as yf
from bs4 import BeautifulSoup
from pandas.core.frame import DataFrame
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_ark_orders(
buys_only: bool = False,
sells_only: bool = False,
fund: str = "",
) -> DataFrame:
"""Returns ARK orders in a Dataframe
Parameters
----------
buys_only: bool
Flag to filter on buys only
sells_only: bool
Flag to sort on sells only
fund: str
Optional filter by fund
Returns
-------
DataFrame
ARK orders data frame with the following columns -
(ticker, date, shares, weight, fund, direction)
"""
url_orders = "https://cathiesark.com/ark-funds-combined/trades"
raw_page = requests.get(url_orders, headers={"User-Agent": get_user_agent()}).text
parsed_script = BeautifulSoup(raw_page, "lxml").find(
"script", {"id": "__NEXT_DATA__"}
)
parsed_json = json.loads(parsed_script.string)
df_orders = pd.json_normalize(parsed_json["props"]["pageProps"]["arkTrades"])
if df_orders.empty:
return pd.DataFrame()
df_orders.drop(
[
"hidden",
"images.thumbnail",
"cusip",
"estimated_price",
"updated_at",
"created_at",
"region",
"country",
"isADR",
"companyName",
"clinicalTrialsSearchHandle",
"wasSPACBuy",
"currencyMultiplier",
"useRapidAPI",
"description",
"quandlTicker",
"customThumbnail",
"custom_thumbnail",
"id",
],
axis=1,
inplace=True,
)
df_orders["date"] = pd.to_datetime(df_orders["date"], format="%Y-%m-%d").dt.date
if df_orders.empty:
console.print("The ARK orders aren't available at the moment.\n")
return pd.DataFrame()
if fund:
df_orders = df_orders[df_orders.fund == fund]
if buys_only:
df_orders = df_orders[df_orders.direction == "Buy"]
if sells_only:
df_orders = df_orders[df_orders.direction == "Sell"]
return df_orders
@log_start_end(log=logger)
def add_order_total(data: DataFrame) -> DataFrame:
"""Takes an ARK orders dataframe and pulls data from Yahoo Finance to add
volume, open, close, high, low, and total columns
Parameters
----------
data: DataFrame
ARK orders data frame with the following columns -
(ticker, date, shares, weight, fund, direction)
Returns
-------
DataFrame
ARK orders data frame with the following columns -
(ticker, date, shares, volume, open, close, high, low, total, weight, fund, direction)
"""
start_date = data["date"].iloc[-1] - timedelta(days=1)
tickers = " ".join(data["ticker"].unique())
prices = yf.download(tickers, start=start_date, progress=False)
if prices.empty:
return pd.DataFrame()
for i, candle in enumerate(["Volume", "Open", "Close", "High", "Low", "Total"]):
data.insert(i + 3, candle.lower(), 0)
pd.options.mode.chained_assignment = None
for i, _ in data.iterrows():
if np.isnan(
prices["Open"][data.loc[i, "ticker"]][
data.loc[i, "date"].strftime("%Y-%m-%d")
]
):
for candle in ["Volume", "Open", "Close", "High", "Low", "Total"]:
data.loc[i, candle.lower()] = 0
continue
for candle in ["Volume", "Open", "Close", "High", "Low"]:
data.loc[i, candle.lower()] = prices[candle][data.loc[i, "ticker"]][
data.loc[i, "date"].strftime("%Y-%m-%d")
]
data.loc[i, "total"] = data.loc[i, "close"] * data.loc[i, "shares"]
pd.options.mode.chained_assignment = "warn"
return data | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/discovery/ark_model.py | 0.691602 | 0.292141 | ark_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.stocks.discovery import yahoofinance_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_gainers(limit: int = 5, export: str = "") -> None:
"""Display gainers. [Source: Yahoo Finance]
Parameters
----------
limit: int
Number of stocks to display
export : str
Export dataframe data to csv,json,xlsx file
"""
df_gainers = yahoofinance_model.get_gainers()
if not df_gainers.empty:
print_rich_table(
df_gainers.head(limit),
headers=list(df_gainers.columns),
show_index=False,
title="Gainers",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"gainers",
df_gainers,
)
@log_start_end(log=logger)
def display_losers(limit: int = 5, export: str = "") -> None:
"""Display losers. [Source: Yahoo Finance]
Parameters
----------
limit: int
Number of stocks to display
export : str
Export dataframe data to csv,json,xlsx file
"""
df_losers = yahoofinance_model.get_losers()
if not df_losers.empty:
print_rich_table(
df_losers.head(limit),
headers=list(df_losers.columns),
show_index=False,
title="Display Losers",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"losers",
df_losers,
)
@log_start_end(log=logger)
def display_ugs(limit: int = 5, export: str = "") -> None:
"""Display most undervalued growth stock. [Source: Yahoo Finance]
Parameters
----------
limit: int
Number of stocks to display
export : str
Export dataframe data to csv,json,xlsx file
"""
df = yahoofinance_model.get_ugs()
if not df.empty:
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Undervalued Growth Stocks",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ugs",
df,
)
@log_start_end(log=logger)
def display_gtech(limit: int = 5, export: str = "") -> None:
"""Display growth technology stocks. [Source: Yahoo Finance]
Parameters
----------
limit: int
Number of stocks to display
export : str
Export dataframe data to csv,json,xlsx file
"""
df = yahoofinance_model.get_gtech()
if not df.empty:
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Growth Tech Stocks",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"gtech",
df,
)
@log_start_end(log=logger)
def display_active(limit: int = 5, export: str = "") -> None:
"""Display most active stocks. [Source: Yahoo Finance]
Parameters
----------
limit: int
Number of stocks to display
export : str
Export dataframe data to csv,json,xlsx file
"""
df = yahoofinance_model.get_active()
if not df.empty:
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Most Active Stocks",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"active",
df,
)
@log_start_end(log=logger)
def display_ulc(limit: int = 5, export: str = "") -> None:
"""Display potentially undervalued large cap stocks. [Source: Yahoo Finance]
Parameters
----------
limit: int
Number of stocks to display
export : str
Export dataframe data to csv,json,xlsx file
"""
df = yahoofinance_model.get_ulc()
if not df.empty:
print_rich_table(
df.head(limit).dropna(),
headers=list(df.columns),
show_index=False,
title="Undervalued Large Cap Stocks",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ulc",
df,
)
@log_start_end(log=logger)
def display_asc(limit: int = 5, export: str = "") -> None:
"""Display small cap stocks with earnings growth rates better than 25%. [Source: Yahoo Finance]
Parameters
----------
limit: int
Number of stocks to display
export : str
Export dataframe data to csv,json,xlsx file
"""
df = yahoofinance_model.get_asc()
if not df.empty:
print_rich_table(
df.head(limit).dropna(),
headers=list(df.columns),
show_index=False,
title="High Growth Small Caps",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"asc",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/discovery/yahoofinance_view.py | 0.612194 | 0.341747 | yahoofinance_view.py | pypi |
__docformat__ = "numpy"
from datetime import datetime
import logging
import os
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.discovery import nasdaq_model
from openbb_terminal.decorators import check_api_key
# pylint: disable=E1123
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_KEY_QUANDL"])
def display_top_retail(limit: int = 3, export: str = ""):
"""Display the top 10 retail traded stocks for last days
Parameters
----------
limit : int, optional
Number of days to show by default 3
export : str, optional
Format to export data, by default ""
"""
retails = nasdaq_model.get_retail_tickers()
if retails.empty:
return
for date, df in retails.head(limit * 10).groupby("Date"):
df = df.drop(columns=["Date"])
df = df.reset_index(drop=True)
print_rich_table(
df,
headers=[x.title() for x in df.columns],
show_index=False,
title=f"[bold]{date} Top Retail:[/bold]",
)
console.print("")
export_data(export, os.path.dirname(os.path.abspath(__file__)), "rtat", retails)
@log_start_end(log=logger)
def display_dividend_calendar(
date: str = None,
sortby: str = "Dividend",
ascend: bool = False,
limit: int = 10,
export: str = "",
):
"""Display NASDAQ dividend calendar
Parameters
----------
date: str
Date to get dividend calendar for, format YYYY-MM-DD
sortby: str
Column to sort data for
ascend: bool
Flag to sort in ascending order
limit: int
Number of results to show
export: str
Format to export data
"""
if date is None:
date = datetime.today().strftime("%Y-%m-%d")
div_map = {
"symbol": "Symbol",
"companyName": "Name",
"dividend_Ex_Date": "Ex-Dividend Date",
"payment_Date": "Payment Date",
"record_Date": "Record Date",
"dividend_Rate": "Dividend",
"indicated_Annual_Dividend": "Annual Dividend",
"announcement_Date": "Announcement Date",
}
calendar = nasdaq_model.get_dividend_cal(date)
if calendar.empty:
console.print(
"No data found. Check that the date provided is a market day. If it is then try this function"
" again as the request may have not gone through.\n"
)
return
calendar = calendar.drop(columns=["announcement_Date"])
calendar.columns = calendar.columns.map(div_map)
calendar = calendar.sort_values(by=sortby, ascending=ascend)
print_rich_table(
calendar.head(limit),
headers=[x.title() for x in calendar.columns],
title=f"[bold]Dividend Calendar for {date}[/bold]",
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "divcal", calendar) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/discovery/nasdaq_view.py | 0.713432 | 0.223292 | nasdaq_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
import re
import pandas as pd
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.stocks.discovery import fidelity_model
from openbb_terminal import rich_config
logger = logging.getLogger(__name__)
def lambda_buy_sell_ratio_color_red_green(val: str) -> str:
"""Add color tags to the Buys/Sells ratio cell
Parameters
----------
val : str
Buys/Sells ratio cell
Returns
-------
str
Buys/Sells ratio cell with color tags
"""
buy_sell_match = re.match(r"(\d+)% Buys, (\d+)% Sells", val, re.M | re.I)
if not buy_sell_match:
return val
buys = int(buy_sell_match.group(1))
sells = int(buy_sell_match.group(2))
if buys >= sells:
return f"[green]{buys}%[/green] Buys, {sells}% Sells"
return f"{buys}% Buys, [red]{sells}%[/red] Sells"
def lambda_price_change_color_red_green(val: str) -> str:
"""Add color tags to the price change cell
Parameters
----------
val : str
Price change cell
Returns
-------
str
Price change cell with color tags
"""
val_float = float(val.split(" ")[0])
if val_float > 0:
return f"[green]{val}[/green]"
return f"[red]{val}[/red]"
@log_start_end(log=logger)
def orders_view(limit: int = 5, export: str = ""):
"""Prints last N orders by Fidelity customers. [Source: Fidelity]
Parameters
----------
limit: int
Number of stocks to display
export : str
Export dataframe data to csv,json,xlsx file
"""
order_header, df_orders = fidelity_model.get_orders()
pd.set_option("display.max_colwidth", None)
if rich_config.USE_COLOR:
df_orders["Buy / Sell Ratio"] = df_orders["Buy / Sell Ratio"].apply(
lambda_buy_sell_ratio_color_red_green
)
df_orders["Price Change"] = df_orders["Price Change"].apply(
lambda_price_change_color_red_green
)
df_orders = df_orders.head(n=limit).iloc[:, :-1]
print_rich_table(
df_orders,
headers=[x.title() for x in df_orders.columns],
show_index=False,
title=f"{order_header}:",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ford",
df_orders,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/discovery/fidelity_view.py | 0.775435 | 0.233761 | fidelity_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.discovery import shortinterest_model
from openbb_terminal.stocks.discovery import yahoofinance_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def low_float(limit: int = 5, export: str = ""):
"""Prints top N low float stocks from https://www.lowfloat.com
Parameters
----------
limit: int
Number of stocks to display
export : str
Export dataframe data to csv,json,xlsx file
"""
df_low_float = shortinterest_model.get_low_float()
df_low_float = df_low_float.iloc[1:].head(n=limit)
print_rich_table(
df_low_float,
headers=list(df_low_float.columns),
show_index=False,
title="Top Float Stocks",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"lowfloat",
df_low_float,
)
@log_start_end(log=logger)
def hot_penny_stocks(limit: int = 10, export: str = "", source: str = "YahooFinance"):
"""Prints top N hot penny stocks from https://www.pennystockflow.com
Parameters
----------
limit: int
Number of stocks to display
export : str
Export dataframe data to csv,json,xlsx file
source : str
Where to get the data from. Choose from - YahooFinance or Shortinterest
"""
if source == "YahooFinance":
df_penny_stocks = yahoofinance_model.get_hotpenny()
elif source == "Shortinterest":
console.print("[red]Data from this source is often not penny stocks[/red]\n")
df_penny_stocks = shortinterest_model.get_today_hot_penny_stocks()
else:
console.print("[red]Invalid source provided[/red]\n")
return
if df_penny_stocks.empty:
console.print("[red]No data found.[/red]")
return
print_rich_table(
df_penny_stocks.head(limit),
headers=list(df_penny_stocks.columns) if source != "Shortinterest" else None,
show_index=False,
title="Top Penny Stocks",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"hotpenny",
df_penny_stocks,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/discovery/shortinterest_view.py | 0.583203 | 0.242183 | shortinterest_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Tuple
import pandas as pd
import requests
from bs4 import BeautifulSoup
from pandas.core.frame import DataFrame
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_orders() -> Tuple[str, DataFrame]:
"""Returns Fidelity orders in a Dataframe
Returns
-------
Tuple[str, DataFrame]
First value in the tuple is a Fidelity orders header
Fidelity orders Dataframe with the following columns -
Symbol, Buy / Sell Ratio, Price Change, Company, # Buy Orders, # Sell Orders
"""
url_orders = (
"https://eresearch.fidelity.com/eresearch/gotoBL/fidelityTopOrders.jhtml"
)
text_soup_url_orders = BeautifulSoup(
requests.get(url_orders, headers={"User-Agent": get_user_agent()}).text, "lxml"
)
l_orders = []
l_orders_vals = []
idx = 0
order_list = text_soup_url_orders.findAll(
"td",
{"class": ["second", "third", "fourth", "fifth", "sixth", "seventh", "eight"]},
)
for an_order in order_list:
if ((idx + 1) % 3 == 0) or ((idx + 1) % 4 == 0) or ((idx + 1) % 6 == 0):
if not an_order:
l_orders_vals.append("")
else:
try:
l_orders_vals.append(an_order.contents[1])
except IndexError:
l_orders_vals.append("0")
elif (idx + 1) % 5 == 0:
s_orders = str(an_order)
l_orders_vals.append(
s_orders[
s_orders.find('title="') + len('title="') : s_orders.find('"/>')
]
)
else:
l_orders_vals.append(an_order.text.strip())
idx += 1
# Add value to dictionary
if (idx + 1) % 8 == 0:
l_orders.append(l_orders_vals)
l_orders_vals = []
idx = 0
df_orders = pd.DataFrame(
l_orders,
columns=[
"Symbol",
"Company",
"Price Change",
"# Buy Orders",
"Buy / Sell Ratio",
"# Sell Orders",
"Latest News",
],
)
df_orders = df_orders[
[
"Symbol",
"Buy / Sell Ratio",
"Price Change",
"Company",
"# Buy Orders",
"# Sell Orders",
"Latest News",
]
]
order_header = text_soup_url_orders.findAll("span", {"class": "source"})[
0
].text.capitalize()
return order_header, df_orders | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/discovery/fidelity_model.py | 0.542863 | 0.210442 | fidelity_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Dict, List
import pandas as pd
import requests
from bs4 import BeautifulSoup
from pandas.core.frame import DataFrame
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_earnings_html(url_next_earnings: str) -> str:
"""Wraps HTTP requests.get for testibility
Parameters
----------
url_next_earnings : str
Next earnings URL
Returns
-------
str
HTML page of next earnings
"""
earnings_html = requests.get(
url_next_earnings, headers={"User-Agent": get_user_agent()}
).text
return earnings_html
@log_start_end(log=logger)
def get_next_earnings(limit: int = 10) -> DataFrame:
"""Returns a DataFrame with upcoming earnings
Parameters
----------
limit : int
Number of pages
Returns
-------
DataFrame
Upcoming earnings DataFrame
"""
earnings = []
url_next_earnings = "https://seekingalpha.com/earnings/earnings-calendar"
for idx in range(0, limit):
text_soup_earnings = BeautifulSoup(
get_earnings_html(url_next_earnings),
"lxml",
)
for stock_rows in text_soup_earnings.findAll("tr", {"data-exchange": "NASDAQ"}):
stocks = [a_stock.text for a_stock in stock_rows.contents[:3]]
earnings.append(stocks)
url_next_earnings = (
f"https://seekingalpha.com/earnings/earnings-calendar/{idx+1}"
)
df_earnings = pd.DataFrame(earnings, columns=["Ticker", "Name", "Date"])
df_earnings["Date"] = pd.to_datetime(df_earnings["Date"])
df_earnings = df_earnings.set_index("Date")
return df_earnings
@log_start_end(log=logger)
def get_articles_html(url_articles: str) -> str:
"""Wraps HTTP requests.get for testability
Parameters
----------
url_articles : str
Articles URL
Returns
-------
str
HTML page of articles
"""
articles_html = requests.get(
url_articles, headers={"User-Agent": get_user_agent()}
).text
return articles_html
@log_start_end(log=logger)
def get_trending_list(limit: int = 5) -> list:
"""Returns a list of trending articles
Parameters
----------
limit: int
Number of articles
Returns
-------
list
Trending articles list
"""
articles = []
url_articles = "https://seekingalpha.com/news/trending_news"
response = requests.get(url_articles, headers={"User-Agent": get_user_agent()})
# Check that the API response was successful
if response.status_code != 200:
console.print("Invalid response\n")
else:
for item in response.json():
article_url = item["uri"]
if not article_url.startswith("/news/"):
continue
article_id = article_url.split("/")[2].split("-")[0]
articles.append(
{
"title": item["title"],
"publishedAt": item["publish_on"][: item["publish_on"].rfind(".")],
"url": "https://seekingalpha.com" + article_url,
"id": article_id,
}
)
return articles[:limit]
@log_start_end(log=logger)
def get_article_data(article_id: int) -> dict:
"""Returns an article
Parameters
----------
article_id : int
Article ID
Returns
-------
dict
Article data
"""
article_url = f"https://seekingalpha.com/api/v3/news/{article_id}"
response = requests.get(article_url, headers={"User-Agent": get_user_agent()})
jdata = response.json()
content = jdata["data"]["attributes"]["content"].replace("</li>", "</li>\n")
content = BeautifulSoup(content, features="html.parser").get_text()
article = {
"title": jdata["data"]["attributes"]["title"],
"publishedAt": jdata["data"]["attributes"]["lastModified"],
"url": "https://seekingalpha.com" + jdata["data"]["links"]["self"],
"content": content,
}
return article
@log_start_end(log=logger)
def get_news_html(news_type: str = "Top-News") -> dict:
"""Gets news. [Source: SeekingAlpha]
Parameters
----------
news_type : str
From: Top-News, On-The-Move, Market-Pulse, Notable-Calls, Buybacks, Commodities, Crypto, Issuance, Global,
Guidance, IPOs, SPACs, Politics, M-A, Consumer, Energy, Financials, Healthcare, MLPs, REITs, Technology
Returns
-------
dict
HTML page of articles
"""
sa_url = (
f"http://seekingalpha.com/api/v3/news?filter%5Bcategory%5D=market-news%3A%3A{news_type}"
"&filter%5Bsince%5D=0&filter%5Buntil%5D=0&include=author%2CprimaryTickers%2CsecondaryTickers"
"&isMounting=true&page%5Bsize%5D=25&page%5Bnumber%5D=1"
)
articles_html = requests.get(
sa_url, headers={"User-Agent": get_user_agent()}
).json()
return articles_html
@log_start_end(log=logger)
def get_news(news_type: str = "Top-News", limit: int = 5) -> List:
"""Gets news. [Source: SeekingAlpha]
Parameters
----------
news_type : str
From: Top-News, On-The-Move, Market-Pulse, Notable-Calls, Buybacks, Commodities, Crypto, Issuance, Global,
Guidance, IPOs, SPACs, Politics, M-A, Consumer, Energy, Financials, Healthcare, MLPs, REITs, Technology
limit : int
Number of news to display
Returns
-------
List[dict]
List of dict news
"""
news_articles: Dict = get_news_html(news_type)
news_to_display = list()
if "data" in news_articles:
for idx, news in enumerate(news_articles["data"]):
if idx > limit:
break
news_to_display.append(
{
"publishOn": news["attributes"]["publishOn"].replace("T", " ")[:-6],
"id": news["id"],
"title": news["attributes"]["title"],
"url": news["links"]["canonical"],
}
)
return news_to_display | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/discovery/seeking_alpha_model.py | 0.797951 | 0.253272 | seeking_alpha_model.py | pypi |
__docformat__ = "numpy"
import argparse
import logging
from datetime import datetime
from typing import List
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal import feature_flags as obbff
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_ONLY_RAW_DATA_ALLOWED,
check_int_range,
check_non_negative,
check_positive,
valid_date,
valid_date_in_past,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.stocks.discovery import (
ark_view,
finnhub_view,
nasdaq_view,
seeking_alpha_view,
shortinterest_view,
yahoofinance_view,
finviz_view,
)
from openbb_terminal.stocks import stocks_helper
from openbb_terminal.rich_config import console, MenuText
# pylint:disable=C0302
logger = logging.getLogger(__name__)
class DiscoveryController(BaseController):
"""Discovery Controller class"""
CHOICES_COMMANDS = [
"pipo",
"fipo",
"gainers",
"losers",
"ugs",
"gtech",
"active",
"ulc",
"asc",
"arkord",
"upcoming",
"trending",
"lowfloat",
"hotpenny",
"cnews",
"rtat",
"divcal",
"heatmap",
]
arkord_sortby_choices = [
"date",
"volume",
"open",
"high",
"close",
"low",
"total",
"weight",
"shares",
]
arkord_fund_choices = ["ARKK", "ARKF", "ARKW", "ARKQ", "ARKG", "ARKX", ""]
cnews_type_choices = [
nt.lower()
for nt in [
"Top-News",
"On-The-Move",
"Market-Pulse",
"Notable-Calls",
"Buybacks",
"Commodities",
"Crypto",
"Issuance",
"Global",
"Guidance",
"IPOs",
"SPACs",
"Politics",
"M-A",
"Consumer",
"Energy",
"Financials",
"Healthcare",
"MLPs",
"REITs",
"Technology",
]
]
PATH = "/stocks/disc/"
dividend_columns = [
"Name",
"Symbol",
"Ex-Dividend Date",
"Payment Date",
"Record Date",
"Dividend",
"Annual Dividend",
"Announcement Date",
]
heatmap_timeframes = ["day", "week", "month", "3month", "6month", "year", "ytd"]
CHOICES_GENERATION = True
def __init__(self, queue: List[str] = None):
"""Constructor"""
super().__init__(queue)
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
mt = MenuText("stocks/disc/")
mt.add_cmd("pipo", "Finnhub")
mt.add_cmd("fipo", "Finnhub")
mt.add_cmd("gainers", "Yahoo Finance")
mt.add_cmd("losers", "Yahoo Finance")
mt.add_cmd("ugs", "Yahoo Finance")
mt.add_cmd("gtech", "Yahoo Finance")
mt.add_cmd("active", "Yahoo Finance")
mt.add_cmd("ulc", "Yahoo Finance")
mt.add_cmd("asc", "Yahoo Finance")
mt.add_cmd("arkord", "Cathies Ark")
mt.add_cmd("upcoming", "Seeking Alpha")
mt.add_cmd("trending", "Seeking Alpha")
mt.add_cmd("cnews", "Seeking Alpha")
mt.add_cmd("lowfloat", "Fidelity")
mt.add_cmd("hotpenny", "Shortinterest")
mt.add_cmd("rtat", "NASDAQ Data Link")
mt.add_cmd("divcal", "NASDAQ Data Link")
mt.add_cmd("heatmap", "Finviz")
console.print(text=mt.menu_text, menu="Stocks - Discovery")
# TODO Add flag for adding last price to the following table
@log_start_end(log=logger)
def call_divcal(self, other_args: List[str]):
"""Process divcal command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="divcal",
description="""Get dividend calendar for selected date""",
)
parser.add_argument(
"-d",
"--date",
default=datetime.now(),
type=valid_date,
dest="date",
help="Date to get format for",
)
parser.add_argument(
"-s",
"--sort",
default="dividend",
type=str.lower,
choices=stocks_helper.format_parse_choices(self.dividend_columns),
help="Column to sort by",
dest="sort",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-d")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED, limit=10
)
if ns_parser:
# Map fixes
sort_col = stocks_helper.map_parse_choices(self.dividend_columns)[
ns_parser.sort
]
nasdaq_view.display_dividend_calendar(
date=ns_parser.date.strftime("%Y-%m-%d"),
sortby=sort_col,
ascend=ns_parser.reverse,
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_pipo(self, other_args: List[str]):
"""Process pipo command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="pipo",
description="""
Past IPOs dates. [Source: https://finnhub.io]
""",
)
parser.add_argument(
"-d",
"--days",
action="store",
dest="days",
type=check_non_negative,
default=5,
help="Number of past days to look for IPOs.",
)
parser.add_argument(
"-s",
"--start",
type=valid_date_in_past,
default=None,
dest="start",
help="""The starting date (format YYYY-MM-DD) to look for IPOs.
When set, start date will override --days argument""",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_non_negative,
default=20,
help="Limit number of IPOs to display.",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
finnhub_view.past_ipo(
num_days_behind=ns_parser.days,
limit=ns_parser.limit,
start_date=ns_parser.start,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_fipo(self, other_args: List[str]):
"""Process fipo command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="fipo",
description="""
Future IPOs dates. [Source: https://finnhub.io]
""",
)
parser.add_argument(
"-d",
"--days",
action="store",
dest="days",
type=check_non_negative,
default=5,
help="Number of days in the future to look for IPOs.",
)
parser.add_argument(
"-s",
"--end",
type=valid_date,
default=None,
dest="end",
help="""The end date (format YYYY-MM-DD) to look for IPOs, starting from today.
When set, end date will override --days argument""",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_non_negative,
default=20,
help="Limit number of IPOs to display.",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
finnhub_view.future_ipo(
num_days_ahead=ns_parser.days,
limit=ns_parser.limit,
end_date=ns_parser.end,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_gainers(self, other_args: List[str]):
"""Process gainers command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="gainers",
description="Print up to 25 top gainers. [Source: Yahoo Finance]",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_int_range(1, 25),
default=5,
help="Limit of stocks to display.",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
yahoofinance_view.display_gainers(
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_losers(self, other_args: List[str]):
"""Process losers command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="losers",
description="Print up to 25 top losers. [Source: Yahoo Finance]",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_int_range(1, 25),
default=5,
help="Limit of stocks to display.",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
yahoofinance_view.display_losers(
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_ugs(self, other_args: List[str]):
"""Process ugs command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ugs",
description="""
Print up to 25 undervalued stocks with revenue and earnings growth in excess of 25%.
[Source: Yahoo Finance]
""",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_int_range(1, 25),
default=5,
help="Limit of stocks to display.",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
yahoofinance_view.display_ugs(
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_gtech(self, other_args: List[str]):
"""Process gtech command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="gtech",
description="Print up to 25 top tech stocks with revenue and earnings"
+ " growth in excess of 25%. [Source: Yahoo Finance]",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_int_range(1, 25),
default=5,
help="Limit of stocks to display.",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
yahoofinance_view.display_gtech(
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_active(self, other_args: List[str]):
"""Process active command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="active",
description="""
Print up to 25 top most actively traded intraday tickers. [Source: Yahoo Finance]
""",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_int_range(1, 25),
default=5,
help="Limit of stocks to display.",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
yahoofinance_view.display_active(
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_ulc(self, other_args: List[str]):
"""Process ulc command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ulc",
description="""
Print up to 25 potentially undervalued large cap stocks. [Source: Yahoo Finance]
""",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_int_range(1, 25),
default=5,
help="Limit of stocks to display.",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
yahoofinance_view.display_ulc(
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_asc(self, other_args: List[str]):
"""Process asc command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="asc",
description="""
Print up to 25 small cap stocks with earnings growth rates better than 25%. [Source: Yahoo Finance]
""",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_int_range(1, 25),
default=5,
help="Limit of stocks to display.",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
yahoofinance_view.display_asc(
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_arkord(self, other_args: List[str]):
"""Process arkord command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="arkord",
description="""
Orders by ARK Investment Management LLC - https://ark-funds.com/. [Source: https://cathiesark.com]
""",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_positive,
default=10,
help="Limit of stocks to display.",
)
parser.add_argument(
"-s",
"--sortby",
dest="sort_col",
choices=self.arkord_sortby_choices,
type=str,
help="Column to sort by",
default="",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
parser.add_argument(
"-b",
"--buy_only",
dest="buys_only",
help="Flag to look at buys only",
action="store_true",
default=False,
)
parser.add_argument(
"-c",
"--sell_only",
dest="sells_only",
help="Flag to look at sells only",
action="store_true",
default=False,
)
parser.add_argument(
"--fund",
type=str,
default="",
help="Filter by fund",
dest="fund",
choices=self.arkord_fund_choices,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
ark_view.ark_orders_view(
limit=ns_parser.limit,
sortby=ns_parser.sort_col,
ascend=ns_parser.reverse,
buys_only=ns_parser.buys_only,
sells_only=ns_parser.sells_only,
fund=ns_parser.fund,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_upcoming(self, other_args: List[str]):
# TODO: switch to nasdaq
"""Process upcoming command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="upcoming",
description="""Upcoming earnings release dates. [Source: Seeking Alpha]""",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_positive,
default=1,
help="Limit of upcoming earnings release dates to display.",
)
parser.add_argument(
"-p",
"--pages",
action="store",
dest="n_pages",
type=check_positive,
default=10,
help="Number of pages to read upcoming earnings from in Seeking Alpha website.",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
seeking_alpha_view.upcoming_earning_release_dates(
num_pages=ns_parser.n_pages,
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_trending(self, other_args: List[str]):
"""Process trending command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="trending",
description="""Trending news articles. [Source: Seeking Alpha]""",
)
parser.add_argument(
"-i",
"--id",
action="store",
dest="n_id",
type=check_positive,
default=-1,
help="article ID",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_positive,
default=5,
help="limit of articles being printed",
)
parser.add_argument(
"-d",
"--date",
action="store",
dest="s_date",
type=valid_date,
default=datetime.now().strftime("%Y-%m-%d"),
help="starting date of articles",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-i")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
seeking_alpha_view.news(
article_id=ns_parser.n_id,
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_lowfloat(self, other_args: List[str]):
"""Process lowfloat command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="lowfloat",
description="""
Print top stocks with lowest float. LowFloat.com provides a convenient
sorted database of stocks which have a float of under 10 million shares. Additional key
data such as the number of outstanding shares, short interest, and company industry is
displayed. Data is presented for the Nasdaq Stock Market, the New York Stock Exchange,
the American Stock Exchange, and the Over the Counter Bulletin Board. [Source: www.lowfloat.com]
""",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_positive,
default=5,
help="limit of stocks to display",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
shortinterest_view.low_float(
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_cnews(self, other_args: List[str]):
"""Process cnews command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="cnews",
description="""Customized news. [Source: Seeking Alpha]""",
)
parser.add_argument(
"-t",
"--type",
action="store",
dest="s_type",
choices=self.cnews_type_choices,
default="Top-News",
help="number of news to display",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_positive,
default=5,
help="limit of news to display",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-t")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
seeking_alpha_view.display_news(
news_type=ns_parser.s_type,
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_hotpenny(self, other_args: List[str]):
"""Process hotpenny command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="hotpenny",
description="Provides top penny stocks from various websites. [Source: Yfinance]",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_positive,
default=5,
help="limit of stocks to display",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
shortinterest_view.hot_penny_stocks(
limit=ns_parser.limit,
export=ns_parser.export,
source=ns_parser.source,
)
@log_start_end(log=logger)
def call_rtat(self, other_args: List[str]):
"""Process rtat command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="rtat",
description="""
Tracking over $30B USD/day of individual investors trades,
RTAT gives a daily view into retail activity and sentiment for over 9,500 US traded stocks,
ADRs, and ETPs
""",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_positive,
default=3,
help="limit of days to display",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
nasdaq_view.display_top_retail(
limit=ns_parser.limit, export=ns_parser.export
)
@log_start_end(log=logger)
def call_heatmap(self, other_args: List[str]):
"""Process heatmap command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="heatmap",
description="""
Get the SP 500 heatmap from finviz and display in interactive treemap
""",
)
parser.add_argument(
"-t",
"--timeframe",
default="day",
choices=self.heatmap_timeframes,
help="Timeframe to get heatmap data for",
dest="timeframe",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
finviz_view.display_heatmap(ns_parser.timeframe, ns_parser.export) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/discovery/disc_controller.py | 0.51562 | 0.190611 | disc_controller.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
from openbb_terminal.decorators import log_start_end
from openbb_terminal.stocks.discovery.disc_helpers import get_df
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_gainers() -> pd.DataFrame:
"""Get top gainers. [Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Stock Gainers
"""
df_gainers = get_df("https://finance.yahoo.com/screener/predefined/day_gainers")[0]
df_gainers.dropna(how="all", axis=1, inplace=True)
df_gainers = df_gainers.replace(float("NaN"), "")
if df_gainers.empty:
console.print("No gainers found.")
return df_gainers
@log_start_end(log=logger)
def get_losers() -> pd.DataFrame:
"""Get top losers. [Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Stock Losers
"""
df_losers = get_df("https://finance.yahoo.com/screener/predefined/day_losers")[0]
df_losers.dropna(how="all", axis=1, inplace=True)
df_losers = df_losers.replace(float("NaN"), "")
if df_losers.empty:
console.print("No losers found.")
return df_losers
@log_start_end(log=logger)
def get_ugs() -> pd.DataFrame:
"""Get stocks with earnings growth rates better than 25% and relatively low PE and PEG ratios.
[Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Undervalued stocks
"""
df = get_df(
"https://finance.yahoo.com/screener/predefined/undervalued_growth_stocks"
)[0]
df.dropna(how="all", axis=1, inplace=True)
df = df.replace(float("NaN"), "")
if df.empty:
console.print("No data found.")
return df
@log_start_end(log=logger)
def get_gtech() -> pd.DataFrame:
"""Get technology stocks with revenue and earnings growth in excess of 25%. [Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Growth technology stocks
"""
df = get_df(
"https://finance.yahoo.com/screener/predefined/growth_technology_stocks"
)[0]
df.dropna(how="all", axis=1, inplace=True)
df = df.replace(float("NaN"), "")
if df.empty:
console.print("No data found.")
return df
@log_start_end(log=logger)
def get_active() -> pd.DataFrame:
"""Get stocks ordered in descending order by intraday trade volume. [Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Most active stocks
"""
df = get_df("https://finance.yahoo.com/screener/predefined/most_actives")[0]
df.dropna(how="all", axis=1, inplace=True)
df = df.replace(float("NaN"), "")
if df.empty:
console.print("No data found.")
return df
@log_start_end(log=logger)
def get_ulc() -> pd.DataFrame:
"""Get Yahoo Finance potentially undervalued large cap stocks.
[Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Most undervalued large cap stocks
"""
df = get_df("https://finance.yahoo.com/screener/predefined/undervalued_large_caps")[
0
]
df.dropna(how="all", axis=1, inplace=True)
df = df.replace(float("NaN"), "")
if df.empty:
console.print("No data found.")
return df
@log_start_end(log=logger)
def get_asc() -> pd.DataFrame:
"""Get Yahoo Finance small cap stocks with earnings growth rates better than 25%.
[Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Most aggressive small cap stocks
"""
df = get_df("https://finance.yahoo.com/screener/predefined/aggressive_small_caps")[
0
]
df.dropna(how="all", axis=1, inplace=True)
df = df.replace(float("NaN"), "")
if df.empty:
console.print("No data found.")
return df
@log_start_end(log=logger)
def get_hotpenny() -> pd.DataFrame:
"""Get Yahoo Finance hot penny stocks. [Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Hottest penny stocks
"""
return get_df(
"https://finance.yahoo.com/u/yahoo-finance/watchlists/most-active-penny-stocks/"
)[1] | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/discovery/yahoofinance_model.py | 0.617628 | 0.33368 | yahoofinance_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List
import pandas as pd
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.discovery import seeking_alpha_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def upcoming_earning_release_dates(
num_pages: int = 5, limit: int = 1, export: str = ""
):
"""Displays upcoming earnings release dates
Parameters
----------
num_pages: int
Number of pages to scrap
limit: int
Number of upcoming earnings release dates
export : str
Export dataframe data to csv,json,xlsx file
"""
# TODO: Check why there are repeated companies
# TODO: Create a similar command that returns not only upcoming, but antecipated earnings
# i.e. companies where expectation on their returns are high
df_earnings = seeking_alpha_model.get_next_earnings(num_pages)
if df_earnings.empty:
console.print("No upcoming earnings release dates found")
pd.set_option("display.max_colwidth", None)
if export:
l_earnings = []
l_earnings_dates = []
for n_days, earning_date in enumerate(df_earnings.index.unique()):
if n_days > (limit - 1):
break
# TODO: Potentially extract Market Cap for each Ticker, and sort
# by Market Cap. Then cut the number of tickers shown to 10 with
# bigger market cap. Didier attempted this with yfinance, but
# the computational time involved wasn't worth pursuing that solution.
df_earn = (
df_earnings[earning_date == df_earnings.index][["Ticker", "Name"]]
.dropna()
.drop_duplicates()
)
if export:
l_earnings_dates.append(earning_date.date())
l_earnings.append(df_earn)
df_earn.index = df_earn["Ticker"].values
df_earn.drop(columns=["Ticker"], inplace=True)
print_rich_table(
df_earn,
show_index=True,
headers=[f"Earnings on {earning_date.date()}"],
title="Upcoming Earnings Releases",
)
if export:
for item in l_earnings:
item.reset_index(drop=True, inplace=True)
df_data = pd.concat(l_earnings, axis=1, ignore_index=True)
df_data.columns = l_earnings_dates
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"upcoming",
df_data,
)
@log_start_end(log=logger)
def news(article_id: int = -1, limit: int = 5, export: str = ""):
"""Prints the latest news article list. [Source: Seeking Alpha]
Parameters
----------
article_id: int
Article ID. If -1, none is selected
limit: int
Number of articles to display. Only used if article_id is -1.
export : str
Export dataframe data to csv,json,xlsx file
"""
# User wants to see all latest news
if article_id == -1:
articles = seeking_alpha_model.get_trending_list(limit)
if export:
df_articles = pd.DataFrame(articles)
for idx, article in enumerate(articles):
console.print(
article["publishedAt"].replace("T", " ").replace("Z", ""),
"-",
article["id"],
"-",
article["title"],
)
console.print(article["url"])
console.print("\n")
if idx >= limit - 1:
break
# User wants to access specific article
else:
article = seeking_alpha_model.get_article_data(article_id)
if export:
df_articles = pd.DataFrame(article)
console.print(
article["publishedAt"][: article["publishedAt"].rfind(":") - 3].replace(
"T", " "
),
" ",
article["title"],
)
console.print(article["url"])
console.print("\n")
console.print(article["content"])
if export:
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"trending",
df_articles,
)
@log_start_end(log=logger)
def display_news(news_type: str = "Top-News", limit: int = 5, export: str = ""):
"""Display news. [Source: SeekingAlpha]
Parameters
----------
news_type : str
From: Top-News, On-The-Move, Market-Pulse, Notable-Calls, Buybacks, Commodities, Crypto, Issuance, Global,
Guidance, IPOs, SPACs, Politics, M-A, Consumer, Energy, Financials, Healthcare, MLPs, REITs, Technology
limit : int
Number of news to display
export : str
Export dataframe data to csv,json,xlsx file
"""
news_to_display: List = seeking_alpha_model.get_news(news_type, limit)
if not news:
console.print("No news found.", "\n")
else:
for news_element in news_to_display:
console.print(
news_element["publishOn"]
+ " - "
+ news_element["id"]
+ " - "
+ news_element["title"]
)
console.print(news_element["url"])
console.print("\n")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cnews : " + news_type,
pd.DataFrame(news_to_display),
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/discovery/seeking_alpha_view.py | 0.476092 | 0.255506 | seeking_alpha_view.py | pypi |
__docformat__ = "numpy"
from datetime import datetime
import logging
import pandas as pd
import requests
import openbb_terminal.config_terminal as cfg
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.helper_funcs import get_user_agent
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_KEY_QUANDL"])
def get_retail_tickers() -> pd.DataFrame:
"""Gets the top 10 retail stocks per day
Returns
-------
pd.DataFrame
Dataframe of tickers
"""
r = requests.get(
f"https://data.nasdaq.com/api/v3/datatables/NDAQ/RTAT10/?api_key={cfg.API_KEY_QUANDL}"
)
df = pd.DataFrame()
if r.status_code == 200:
df = pd.DataFrame(r.json()["datatable"]["data"])
df.columns = ["Date", "Ticker", "Activity", "Sentiment"]
# Wrong API Key
elif r.status_code == 400:
console.print(r.text)
# Premium Feature
elif r.status_code == 403:
console.print(r.text)
# Catching other exception
elif r.status_code != 200:
console.print(r.text)
return df
@log_start_end(log=logger)
def get_dividend_cal(date: str = None) -> pd.DataFrame:
"""Gets dividend calendar for given date. Date represents Ex-Dividend Date
Parameters
----------
date: datetime
Date to get for in format YYYY-MM-DD
Returns
-------
pd.DataFrame
Dataframe of dividend calendar
"""
# TODO: HELP WANTED:
# Nasdaq API doesn't like a lot of stuff. Your ISP or VPN, the specific user agent
# that you might be using, etc. More exploration is required to make this feature
# equally usable for all. In the time being we patch selection of the user agent and
# add a timeout for cases when the URL doesn't respond.
if date is None:
date = datetime.today().strftime("%Y-%m-%d")
ag = get_user_agent()
# Nasdaq API doesn't like this user agent, thus we always get other than this particular one
while (
ag
== "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:82.1) Gecko/20100101 Firefox/82.1"
):
ag = get_user_agent()
try:
r = requests.get(
f"https://api.nasdaq.com/api/calendar/dividends?date={date}",
headers={"User-Agent": ag},
timeout=5,
)
df = pd.DataFrame()
if r.status_code == 200:
df = pd.DataFrame(r.json()["data"]["calendar"]["rows"])
# Wrong API Key
elif r.status_code == 400:
console.print(r.text)
# Premium Feature
elif r.status_code == 403:
console.print(r.text)
# Catching other exception
elif r.status_code != 200:
console.print(r.text)
except requests.exceptions.ReadTimeout:
logger.exception("Request timed out")
return pd.DataFrame()
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/discovery/nasdaq_model.py | 0.494385 | 0.191914 | nasdaq_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
import pandas as pd
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
print_rich_table,
)
from openbb_terminal.stocks.technical_analysis import rsp_model
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_rsp(
s_ticker: str = "",
export: str = "",
tickers_show: bool = False,
):
"""Display Relative Strength Percentile [Source: https://github.com/skyte/relative-strength]
Parameters
----------
s_ticker : str
Stock ticker
export : str
Format of export file
tickers_show : bool
Boolean to check if tickers in the same industry as the stock should be shown
"""
rsp_stock, rsp_industry, df_stock_p, df_industries_p = rsp_model.get_rsp(s_ticker)
if rsp_stock.empty or rsp_industry.empty:
console.print(f"[red]Ticker '{s_ticker}' not found.\n[/red]")
else:
tickers = pd.DataFrame(rsp_industry["Tickers"])
del rsp_industry["Tickers"]
print_rich_table(
rsp_stock,
headers=list(rsp_stock.columns),
show_index=False,
title="Relative Strength Percentile of Stock (relative to SPY)",
)
print_rich_table(
rsp_industry,
headers=list(rsp_industry.columns),
show_index=False,
title="Relative Strength Percentile of Industry the ticker is part of",
)
if tickers_show:
print_rich_table(
tickers,
headers=list(tickers.columns),
show_index=False,
title="Tickers in same industry as chosen stock",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"rsp_stock",
df_stock_p,
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"rsp_industry",
df_industries_p,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/technical_analysis/rsp_view.py | 0.529263 | 0.228458 | rsp_view.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
import requests
from tradingview_ta import TA_Handler
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
INTERVALS = {
"1m": "1 min",
"5m": "5 min",
"15m": "15 min",
"1h": "1 hour",
"4h": "4 hours",
"1d": "1 day",
"1W": "1 week",
"1M": "1 month",
}
SCREENERS = [
"australia",
"brazil",
"cfd",
"crypto",
"euronext",
"forex",
"france",
"germany",
"hongkong",
"india",
"indonesia",
"malaysia",
"philippines",
"russia",
"ksa",
"rsa",
"korea",
"spain",
"sweden",
"taiwan",
"thailand",
"turkey",
"uk",
"america",
"vietnam",
]
@log_start_end(log=logger)
def get_tradingview_recommendation(
symbol: str, screener: str = "america", exchange: str = "", interval: str = ""
) -> pd.DataFrame:
"""Get tradingview recommendation based on technical indicators
Parameters
----------
symbol : str
Ticker symbol to get the recommendation from tradingview based on technical indicators
screener : str
Screener based on tradingview docs https://python-tradingview-ta.readthedocs.io/en/latest/usage.html
exchange: str
Exchange based on tradingview docs https://python-tradingview-ta.readthedocs.io/en/latest/usage.html
interval: str
Interval time to check technical indicators and correspondent recommendation
Returns
-------
df_recommendation: pd.DataFrame
Dataframe of tradingview recommendations based on technical indicators
"""
if not exchange:
s_req = f"https://www.alphavantage.co/query?function=OVERVIEW&symbol={symbol}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
result = requests.get(s_req, stream=True)
data = result.json()
if not data:
return pd.DataFrame()
exchange = data["Exchange"]
if interval:
intervals = [interval]
else:
intervals = ["1M", "1W", "1d", "4h", "1h", "15m", "5m", "1m"]
df_recommendation = pd.DataFrame()
index_recommendation = []
for an_interval in intervals:
if exchange:
stock_recommendation = TA_Handler(
symbol=symbol,
screener=screener,
exchange=exchange,
interval=an_interval,
)
d_recommendation = stock_recommendation.get_analysis().summary
df_recommendation = pd.concat(
[
pd.DataFrame.from_dict(d_recommendation, orient="index").T,
df_recommendation,
],
axis=0,
)
index_recommendation.append(INTERVALS[an_interval])
df_recommendation.index = index_recommendation
df_recommendation[["BUY", "NEUTRAL", "SELL"]] = df_recommendation[
["BUY", "NEUTRAL", "SELL"]
].astype(int)
df_recommendation.index.name = "Interval"
return df_recommendation | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/technical_analysis/tradingview_model.py | 0.694717 | 0.301349 | tradingview_model.py | pypi |
__docformat__ = "numpy"
import logging
from openbb_terminal.decorators import log_start_end
from openbb_terminal.stocks.tradinghours import bursa_model
from openbb_terminal.helper_funcs import print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_exchange(symbol: str):
"""Display current exchange trading hours.
Parameters
----------
symbol : str
Exchange symbol
"""
exchange = bursa_model.get_bursa(symbol)
if len(exchange) == 0 or exchange.empty:
console.print(
"[red]"
+ "No exchange data loaded.\n"
+ "Make sure you picked a valid exchange symbol."
+ "[/red]\n"
)
return
exchange_name = exchange.loc["name"]
print_rich_table(
exchange,
show_index=True,
title=f"[bold]{exchange_name}[/bold]",
)
@log_start_end(log=logger)
def display_open():
"""Display open exchanges.
Parameters
----------
"""
exchanges = bursa_model.get_open()
if exchanges.empty:
console.print("No exchange open.\n")
return
print_rich_table(
exchanges,
show_index=True,
title="[bold]Open markets[/bold]",
)
@log_start_end(log=logger)
def display_closed():
"""Display closed exchanges.
Parameters
----------
"""
exchanges = bursa_model.get_closed()
if exchanges.empty:
console.print("[red]" + "No exchange data loaded.\n" + "[/red]\n")
return
print_rich_table(
exchanges,
show_index=True,
title="[bold]Closed markets[/bold]",
)
@log_start_end(log=logger)
def display_all():
"""Display all exchanges.
Parameters
----------
"""
exchanges = bursa_model.get_all()
if exchanges.empty:
console.print("[red]" + "No exchange data loaded.\n" + "[/red]\n")
return
print_rich_table(
exchanges,
show_index=True,
title="[bold]World markets[/bold]",
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/tradinghours/bursa_view.py | 0.632162 | 0.161419 | bursa_view.py | pypi |
from datetime import datetime
import logging
import os
import pandas as pd
import pytz
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
# pylint: disable=no-member
# pylint: disable=no-member
@log_start_end(log=logger)
def get_bursa(symbol: str) -> pd.DataFrame:
"""Get current exchange open hours.
Parameters
----------
symbol : str
Exchange symbol
Returns
-------
pd.DataFrame
Exchange info
"""
bursa = all_bursa()
symbol = symbol.upper()
if symbol in bursa["short_name"].values:
df = pd.DataFrame(bursa.loc[bursa["short_name"] == symbol]).transpose()
is_open = check_if_open(bursa, symbol)
df_is_open = pd.DataFrame([is_open], index=["open"], columns=df.columns.values)
df = pd.concat([df, df_is_open], axis=0)
return df
if symbol in bursa.index:
df = pd.DataFrame(bursa.loc[symbol])
is_open = check_if_open(bursa, symbol)
df_is_open = pd.DataFrame([is_open], index=["open"], columns=df.columns.values)
df = pd.concat([df, df_is_open], axis=0)
return df
return pd.DataFrame()
@log_start_end(log=logger)
def get_open() -> pd.DataFrame:
"""Get open exchanges.
Parameters
----------
Returns
-------
pd.DataFrame
Currently open exchanges
"""
bursa = all_bursa()
is_open_list = []
for exchange in bursa.index:
is_open = check_if_open(bursa, exchange)
is_open_list.append(is_open)
bursa["open"] = is_open_list
bursa = bursa.loc[bursa["open"]]
return bursa[["name", "short_name"]]
@log_start_end(log=logger)
def get_closed() -> pd.DataFrame:
"""Get closed exchanges.
Parameters
----------
Returns
-------
pd.DataFrame
Currently closed exchanges
"""
bursa = all_bursa()
is_open_list = []
for exchange in bursa.index:
is_open = check_if_open(bursa, exchange)
is_open_list.append(is_open)
bursa["open"] = is_open_list
bursa = bursa.loc[~bursa["open"]]
return bursa[["name", "short_name"]]
@log_start_end(log=logger)
def get_all() -> pd.DataFrame:
"""Get all exchanges.
Parameters
----------
Returns
-------
pd.DataFrame
All available exchanges
"""
bursa = all_bursa()
is_open_list = []
for exchange in bursa.index:
is_open = check_if_open(bursa, exchange)
is_open_list.append(is_open)
bursa["open"] = is_open_list
return bursa[["name", "short_name", "open"]]
@log_start_end(log=logger)
def get_all_exchange_short_names() -> pd.DataFrame:
"""Get all exchanges short names.
Parameters
----------
Returns
-------
pd.DataFrame
All available exchanges short names
"""
bursa = all_bursa()
is_open_list = []
for exchange in bursa.index:
is_open = check_if_open(bursa, exchange)
is_open_list.append(is_open)
bursa["open"] = is_open_list
return bursa[["short_name"]]
@log_start_end(log=logger)
def all_bursa():
"""Get all exchanges from dictionary
Parameters
__________
Returns
_______
pd.DataFrame
All exchanges
"""
path = os.path.join(os.path.dirname(__file__), "data/bursa_open_hours.json")
bursa = pd.read_json(path) # , orient="index")
return bursa
def check_if_open(bursa: pd.DataFrame, exchange: str) -> bool:
"""Check if market open helper function
Parameters
__________
bursa : pd.DataFrame
pd.DataFrame of all exchanges
exchange : str
bursa pd.DataFrame index value for exchange
Returns
_______
bool
If market is open
"""
exchange = exchange.upper()
if exchange in bursa.index.values:
tz = bursa.loc[exchange]["timezone"]
exchange_df = bursa.loc[exchange]
elif exchange in bursa["short_name"].values:
tz = bursa.loc[bursa["short_name"] == exchange]["timezone"].values[0]
exchange_df = bursa.loc[bursa["short_name"] == exchange]
exchange_df = exchange_df.iloc[0].transpose()
utcmoment_naive = datetime.utcnow()
utcmoment = utcmoment_naive.replace(tzinfo=pytz.utc)
local_datetime = utcmoment.astimezone(pytz.timezone(tz))
market_open = datetime.strptime(exchange_df["market_open"], "%H:%M:%S")
market_close = datetime.strptime(exchange_df["market_close"], "%H:%M:%S")
after_market_open = local_datetime.time() >= market_open.time()
before_market_close = local_datetime.time() <= market_close.time()
try:
lunchbreak_start = datetime.strptime(
exchange_df["lunchbreak_start"], "%H:%M:%S"
)
lunchbreak_end = datetime.strptime(exchange_df["lunchbreak_end"], "%H:%M:%S")
after_lunch_start = local_datetime.time() >= lunchbreak_start.time()
before_lunch_end = local_datetime.time() <= lunchbreak_end.time()
except Exception:
after_lunch_start = False
before_lunch_end = False
if local_datetime.weekday() >= 5:
result = False
else:
result = (
after_market_open
and before_market_close
and not (after_lunch_start and before_lunch_end)
)
return result | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/tradinghours/bursa_model.py | 0.664649 | 0.225481 | bursa_model.py | pypi |
__docformat__ = "numpy"
# pylint:disable=too-many-arguments,unexpected-keyword-arg
import logging
import financedatabase as fd
import yfinance as yf
from tqdm import tqdm
from requests.exceptions import ReadTimeout
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_countries(industry: str = "", sector: str = "") -> list:
"""Get all countries in Yahoo Finance data based on sector or industry. [Source: Finance Database]
Parameters
----------
industry : str
Filter retrieved countries by industry
sector : str
Filter retrieved countries by sector
Returns
-------
list
List of possible countries
"""
# industry takes priority since there's 1 sector per industry, but multiple industries per sector
if industry:
return fd.show_options("equities", industry=True)[industry]["Countries"]
if sector:
return fd.show_options("equities", sector=sector)["Countries"]
return [count for count in fd.show_options("equities", "countries") if count]
@log_start_end(log=logger)
def get_sectors(industry: str = "", country: str = "") -> list:
"""Get all sectors in Yahoo Finance data based on country or industry. [Source: Finance Database]
Parameters
----------
industry : str
Filter retrieved sectors by industry
country : str
Filter retrieved sectors by country
Returns
-------
list
List of possible sectors
"""
# industry takes priority since there's 1 sector per industry, but multiple industries per country
if industry:
return [fd.show_options("equities", industry=True)[industry]["Sector"]]
if country:
return fd.show_options("equities", country=country)["Sectors"]
return [sect for sect in fd.show_options("equities", "sectors") if sect]
@log_start_end(log=logger)
def get_industries(country: str = "", sector: str = "") -> list:
"""Get all industries in Yahoo Finance data based on country or sector. [Source: Finance Database]
Parameters
----------
country : str
Filter retrieved industries by country
sector : str
Filter retrieved industries by sector
Returns
-------
list
List of possible industries
"""
if country and sector:
return fd.show_options("equities", country=country, sector=sector)
if country:
return fd.show_options("equities", country=country)["Industries"]
if sector:
return fd.show_options("equities", sector=sector)["Industries"]
return [ind for ind in fd.show_options("equities", "industries") if ind]
@log_start_end(log=logger)
def get_marketcap() -> list:
"""Get all market cap division in Yahoo Finance data. [Source: Finance Database]
Returns
-------
list
List of possible market caps
"""
return ["Small Cap", "Mid Cap", "Large Cap"]
@log_start_end(log=logger)
def filter_stocks(
country: str = None,
sector: str = None,
industry: str = None,
marketcap: str = "",
exclude_exchanges: bool = True,
) -> list:
"""Filter stocks based on country, sector, industry, market cap and exclude exchanges.
[Source: Finance Database]
Parameters
----------
country: str
Search by country to find stocks matching the criteria.
sector: str
Search by sector to find stocks matching the criteria.
industry: str
Search by industry to find stocks matching the criteria.
marketcap: str
Select stocks based on the market cap.
exclude_exchanges: bool
When you wish to include different exchanges use this boolean.
Returns
-------
list
List of filtered stocks
"""
kwargs = {}
if country:
kwargs["country"] = country
if sector:
kwargs["sector"] = sector
if industry:
kwargs["industry"] = industry
try:
data = fd.select_equities(exclude_exchanges=exclude_exchanges, **kwargs)
if marketcap:
data = fd.search_products(data, query=marketcap, search="market_cap")
return list(data.keys())
except ValueError as e:
logger.exception(str(e))
return []
except ReadTimeout:
console.print(
"[red]Unable to retrieve company data from GitHub which limits the search"
" capabilities. This tends to be due to access restrictions for GitHub.com,"
" please check if you can access this website without a VPN.[/red]\n"
)
return []
@log_start_end(log=logger)
def get_stocks_data(
country: str = "United States",
sector: str = "Communication Services",
industry: str = "Internet Content & Information",
marketcap: str = "Mega Cap",
exclude_exchanges: bool = True,
) -> dict:
"""Get stocks data based on country, sector, industry, market cap and exclude exchanges.
[Source: Finance Database]
Parameters
----------
country: str
Search by country to find stocks matching the criteria.
sector: str
Search by sector to find stocks matching the criteria.
industry: str
Search by industry to find stocks matching the criteria.
marketcap: str
Select stocks based on the market cap.
exclude_exchanges: bool
When you wish to include different exchanges use this boolean.
Returns
-------
dict
Dictionary of filtered stocks data
"""
stocks = filter_stocks(country, sector, industry, marketcap, exclude_exchanges)
stocks_data = {
symbol: yf.utils.get_json(f"https://finance.yahoo.com/quote/{symbol}")
for symbol in tqdm(stocks)
}
return stocks_data
@log_start_end(log=logger)
def get_companies_per_sector_in_country(
country: str = "United States",
mktcap: str = "Large",
exclude_exchanges: bool = True,
) -> dict:
"""Get number of companies per sector in a specific country (and specific market cap). [Source: Finance Database]
Parameters
----------
country: str
Select country to get number of companies by each sector
mktcap: str
Select market cap of companies to consider from Small, Mid and Large
exclude_exchanges : bool
Exclude international exchanges
Returns
-------
dict
Dictionary of sectors and number of companies in a specific country
"""
companies_per_sector = {}
for sector in tqdm(get_sectors(country=country)):
if sector:
try:
companies = fd.select_equities(
country=country, sector=sector, exclude_exchanges=exclude_exchanges
)
if mktcap:
companies = fd.search_products(
companies, query=mktcap + " Cap", search="market_cap"
)
companies_per_sector[sector] = len(companies)
except ValueError as e:
logger.exception(str(e))
except ReadTimeout:
console.print(
"[red]Unable to retrieve company data from GitHub which limits the search"
" capabilities. This tends to be due to access restrictions for GitHub.com,"
" please check if you can access this website without a VPN.[/red]\n"
)
return {}
return companies_per_sector
@log_start_end(log=logger)
def get_companies_per_industry_in_country(
country: str = "United States",
mktcap: str = "Large",
exclude_exchanges: bool = True,
) -> dict:
"""Get number of companies per industry in a specific country (and specific market cap).
[Source: Finance Database]
Parameters
----------
country: str
Select country to get number of companies by each industry
mktcap: str
Select market cap of companies to consider from Small, Mid and Large
exclude_exchanges : bool
Exclude international exchanges
Returns
-------
dict
Dictionary of industries and number of companies in a specific country
"""
companies_per_industry = {}
for industry in tqdm(get_industries(country=country)):
if industry:
try:
companies = fd.select_equities(
country=country,
industry=industry,
exclude_exchanges=exclude_exchanges,
)
if mktcap:
companies = fd.search_products(
companies, query=mktcap + " Cap", search="market_cap"
)
companies_per_industry[industry] = len(companies)
except ValueError as e:
logger.exception(str(e))
except ReadTimeout:
console.print(
"[red]Unable to retrieve company data from GitHub which limits the search"
" capabilities. This tends to be due to access restrictions for GitHub.com,"
" please check if you can access this website without a VPN.[/red]\n"
)
return {}
return companies_per_industry
@log_start_end(log=logger)
def get_companies_per_industry_in_sector(
sector: str = "Technology", mktcap: str = "Large", exclude_exchanges: bool = True
) -> dict:
"""Get number of companies per industry in a specific sector (and specific market cap).
[Source: Finance Database]
Parameters
----------
sector: str
Select sector to get number of companies by each industry
mktcap: str
Select market cap of companies to consider from Small, Mid and Large
exclude_exchanges : bool
Exclude international exchanges
Returns
-------
dict
Dictionary of industries and number of companies in a specific sector
"""
companies_per_industry = {}
for industry in tqdm(get_industries(sector=sector)):
if industry:
try:
companies = fd.select_equities(
sector=sector,
industry=industry,
exclude_exchanges=exclude_exchanges,
)
if mktcap:
companies = fd.search_products(
companies, query=mktcap + " Cap", search="market_cap"
)
companies_per_industry[industry] = len(companies)
except ValueError as e:
logger.exception(str(e))
except ReadTimeout:
console.print(
"[red]Unable to retrieve company data from GitHub which limits the search"
" capabilities. This tends to be due to access restrictions for GitHub.com,"
" please check if you can access this website without a VPN.[/red]\n"
)
return {}
return companies_per_industry
@log_start_end(log=logger)
def get_companies_per_country_in_sector(
sector: str = "Technology", mktcap: str = "Large", exclude_exchanges: bool = True
) -> dict:
"""Get number of companies per country in a specific sector (and specific market cap).
[Source: Finance Database]
Parameters
----------
sector: str
Select sector to get number of companies by each country
mktcap: str
Select market cap of companies to consider from Small, Mid and Large
exclude_exchanges : bool
Exclude international exchanges
Returns
-------
dict
Dictionary of countries and number of companies in a specific sector
"""
companies_per_country = {}
for country in tqdm(get_countries(sector=sector)):
if country:
try:
companies = fd.select_equities(
sector=sector,
country=country,
exclude_exchanges=exclude_exchanges,
)
if mktcap:
companies = fd.search_products(
companies, query=mktcap + " Cap", search="market_cap"
)
companies_per_country[country] = len(companies)
except ValueError as e:
logger.exception(str(e))
except ReadTimeout:
console.print(
"[red]Unable to retrieve company data from GitHub which limits the search"
" capabilities. This tends to be due to access restrictions for GitHub.com,"
" please check if you can access this website without a VPN.[/red]\n"
)
return {}
return companies_per_country
@log_start_end(log=logger)
def get_companies_per_country_in_industry(
industry: str = "Internet Content & Information",
mktcap: str = "Large",
exclude_exchanges: bool = True,
) -> dict:
"""Get number of companies per country in a specific industry (and specific market cap).
[Source: Finance Database]
Parameters
----------
industry: str
Select industry to get number of companies by each country
mktcap: str
Select market cap of companies to consider from Small, Mid and Large
exclude_exchanges : bool
Exclude international exchanges
Returns
-------
dict
Dictionary of countries and number of companies in a specific sector
"""
companies_per_country = {}
for country in tqdm(get_countries(industry=industry)):
if country:
try:
companies = fd.select_equities(
industry=industry,
country=country,
exclude_exchanges=exclude_exchanges,
)
if mktcap:
companies = fd.search_products(
companies, query=mktcap + " Cap", search="market_cap"
)
companies_per_country[country] = len(companies)
except ValueError as e:
logger.exception(str(e))
except ReadTimeout:
console.print(
"[red]Unable to retrieve company data from GitHub which limits the search"
" capabilities. This tends to be due to access restrictions for GitHub.com,"
" please check if you can access this website without a VPN.[/red]\n"
)
return {}
return companies_per_country | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/sector_industry_analysis/financedatabase_model.py | 0.721056 | 0.340156 | financedatabase_model.py | pypi |
__docformat__ = "numpy"
# pylint:disable=too-many-arguments, too-many-lines
import copy
import logging
import os
from typing import Dict, Optional, List, Tuple
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.sector_industry_analysis import stockanalysis_model
from openbb_terminal.stocks.sector_industry_analysis.financedatabase_model import (
filter_stocks,
)
from openbb_terminal.helpers_denomination import (
transform as transform_by_denomination,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_plots_financials(
finance_key: str = "re",
country: str = "United States",
sector: str = "Communication Services",
industry: str = "Internet Content & Information",
period: str = "annual",
period_length: int = 12,
marketcap: str = "",
exclude_exchanges: bool = True,
currency: str = "USD",
limit: int = 10,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
raw: bool = False,
already_loaded_stocks_data=None,
) -> Tuple[Dict, List]:
"""Display financials bars comparing sectors, industry, analysis, countries, market cap and excluding exchanges.
Parameters
----------
finance_key: str
Select finance key from StockAnalysis (e.g. re (Revenue), ce (Cash & Equivalents) and inv (Inventory)
country: str
Search by country to find stocks matching the criteria.
sector: str
Search by sector to find stocks matching the criteria.
industry: str
Search by industry to find stocks matching the criteria.
period: str
Collect either annual, quarterly or trailing financial statements.
period_length: int
Determines how far you wish to look to the past (default is 12 quarters or years)
marketcap: str
Select stocks based on the market cap.
exclude_exchanges: bool
When you wish to include different exchanges use this boolean.
currency : str
Choose in what currency you wish to convert each company's financial statement. Default is USD (US Dollars).
limit: int
Limit amount of companies displayed (default is 10)
export: str
Format to export data as
raw: bool
Output all raw data
already_loaded_stocks_data: Dict
Dictionary of filtered stocks data that has been loaded before
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
Returns
-------
dict
Dictionary of filtered stocks data
list
List of tickers filtered
"""
if already_loaded_stocks_data is None:
already_loaded_stocks_data = {}
used_statement = [
item
for item, description in stockanalysis_model.SA_KEYS.items()
if finance_key in description
][0]
if used_statement in already_loaded_stocks_data:
stocks_data = already_loaded_stocks_data
else:
company_tickers = filter_stocks(
country, sector, industry, marketcap, exclude_exchanges
)
if len(company_tickers) <= 1:
console.print("No information is available for the selected market cap. \n")
return dict(), list()
stocks_data = stockanalysis_model.get_stocks_data(
company_tickers,
finance_key,
already_loaded_stocks_data,
period,
currency,
)
stocks_data_statement = copy.deepcopy(stocks_data[used_statement])
if not stocks_data_statement:
console.print(
"It appears the entire dataset is empty. This could be due to the source being unavailable. "
"Please check whether https://stockanalysis.com/ is accessible. \n"
)
return dict(), list()
company_tickers = list(stocks_data[used_statement].keys())
if len(stocks_data_statement[company_tickers[0]].columns) > period_length:
console.print(
f"Limiting the amount of periods to the last {period_length} periods."
)
for company in stocks_data_statement:
stocks_data_statement[company] = stocks_data_statement[company][
stocks_data_statement[company].columns[-period_length:]
]
item_name = stockanalysis_model.SA_KEYS[used_statement][finance_key]
df = pd.DataFrame(
np.nan,
columns=stocks_data_statement.keys(),
index=stocks_data_statement[company_tickers[0]].columns,
)
df.index.name = "Date"
for company in stocks_data_statement:
df[company] = stocks_data_statement[company].loc[item_name]
if len(company_tickers) > limit:
console.print(f"Limiting the amount of companies displayed to {limit}.")
df = df[df.columns[:limit]]
(df, foundDenomination) = transform_by_denomination(df)
if currency:
denomination = f"[{currency} "
else:
denomination = "["
if denomination != "Units":
denomination += f"{foundDenomination}]"
else:
if currency:
denomination = f"[{currency}]"
else:
denomination = ""
if raw:
print_rich_table(
df.fillna("-"),
headers=list(df.columns),
show_index=True,
title=f"{item_name} {denomination}",
)
else:
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return stocks_data, company_tickers
for company in df.columns:
ax.plot(df[company], ls="-", marker="o", label=company)
ax.set_title(f"{item_name} {denomination}")
ax.legend()
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
item_name,
df,
)
return stocks_data, company_tickers | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/sector_industry_analysis/stockanalysis_view.py | 0.754825 | 0.375936 | stockanalysis_view.py | pypi |
__docformat__ = "numpy"
# pylint:disable=too-many-arguments,too-many-lines
import logging
import os
from collections import OrderedDict
from typing import Dict, Optional, List
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.sector_industry_analysis import financedatabase_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_bars_financials(
finance_key: str = "financialData",
finance_metric: str = "ebitda",
country: str = "United States",
sector: str = "Communication Services",
industry: str = "Internet Content & Information",
marketcap: str = "Mega Cap",
exclude_exchanges: bool = True,
limit: int = 10,
export: str = "",
raw: bool = False,
already_loaded_stocks_data: Dict = None,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display financials bars comparing sectors, industry, analysis, countries, market cap and excluding exchanges.
Parameters
----------
finance_key: str
Select finance key from Yahoo Finance(e.g. financialData, defaultKeyStatistics, summaryProfile)
finance_metric: str
Select finance metric from Yahoo Finance (e.g. operatingCashflow, revenueGrowth, ebitda, freeCashflow)
country: str
Search by country to find stocks matching the criteria.
sector: str
Search by sector to find stocks matching the criteria.
industry: str
Search by industry to find stocks matching the criteria.
marketcap: str
Select stocks based on the market cap from Mega Cap, Large Cap, Mid Cap, Small Cap, Micro Cap, Nano Cap
exclude_exchanges: bool
When you wish to include different exchanges use this boolean.
limit: int
Limit amount of companies displayed
export: str
Format to export data as
raw: bool
Output all raw data
already_loaded_stocks_data: Dict
Dictionary of filtered stocks data that has been loaded before
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
Returns
-------
dict
Dictionary of filtered stocks data
list
List of tickers filtered
"""
if already_loaded_stocks_data:
stocks_data = already_loaded_stocks_data
else:
stocks_data = financedatabase_model.get_stocks_data(
country, sector, industry, marketcap, exclude_exchanges
)
metric_data = {}
for symbol in list(stocks_data.keys()):
if finance_key in stocks_data[symbol] and "quoteType" in stocks_data[symbol]:
stock_name = stocks_data[symbol]["quoteType"]["longName"]
metric = (
stocks_data[symbol][finance_key][finance_metric]
if stocks_data[symbol][finance_key] is not None
and finance_metric in stocks_data[symbol][finance_key]
else None
)
if metric and stock_name:
metric_data[stock_name] = (metric, symbol)
if len(metric_data) > 1:
metric_data = dict(
OrderedDict(
sorted(metric_data.items(), key=lambda t: t[1][0], reverse=True)
)
)
company_names = list()
company_metrics = list()
company_tickers = list()
for name, metric in metric_data.items():
company_names.append(name)
company_metrics.append(metric[0])
company_tickers.append(metric[1])
metric_finance_col = (
"".join(
" " + char if char.isupper() else char.strip()
for char in finance_metric
)
.strip()
.capitalize()
)
df_all = pd.DataFrame(
{"Company": company_names, metric_finance_col: company_metrics}
)
if len(df_all) > limit:
console.print(f"Limiting the amount of companies displayed to {limit}.")
company_name = np.array(company_names)[:limit]
company_metric = np.array(company_metrics)[:limit]
company_ticker = np.array(company_tickers)[:limit]
df = df_all.head(limit)
if raw:
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Bars Financials"
)
else:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
# set returns statement to be compatible with others
return dict(), list()
magnitude = 0
while max(company_metric) > 1_000 or abs(min(company_metric)) > 1_000:
company_metric = np.divide(company_metric, 1_000)
magnitude += 1
# check if the value is a percentage
if (
(magnitude == 0)
and all(company_metric >= 0)
and all(company_metric <= 1)
):
unit = "%"
company_metric = company_metric * 100
else:
unit = " KMBTP"[magnitude] if magnitude != 0 else ""
for name, metric, ticker in zip(
company_name[::-1], company_metric[::-1], company_ticker[::-1]
):
if len(name.split(" ")) > 6 and len(name) > 40:
name = f'{" ".join(name.split(" ")[:4])}\n{" ".join(name.split(" ")[4:])}'
ax.barh(f"{name} ({ticker})", metric, label=ticker)
metric_title = (
"".join(
" " + char if char.isupper() else char.strip()
for char in finance_metric
)
.strip()
.capitalize()
)
benchmark = np.median(company_metric)
ax.axvline(x=benchmark, lw=3, ls="--", c="grey")
title = f"The {metric_title.title()} (benchmark: {benchmark:.2f}{unit}) of "
title += marketcap + " cap companies " if marketcap else "Companies "
if industry:
title += f"in {industry} industry "
elif sector:
title += f"in {sector} sector "
if country:
title += f"in {country}"
title += " " if (industry or sector) else ""
title += (
"(excl. data from international exchanges)"
if exclude_exchanges
else "(incl. data from international exchanges)"
)
ax.set_title(title, wrap=True, fontsize=11)
labels = ax.get_xticks().tolist()
ax.set_xticks(labels)
ax.set_xticklabels([f"{label:.2f}{unit}" for label in labels])
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
finance_metric,
df_all,
)
return stocks_data, company_tickers
if len(metric_data) == 1:
console.print(
f"Only 1 company found '{list(metric_data.keys())[0]}'. No barchart will be depicted.\n"
)
return stocks_data, [list(metric_data.values())[0][1]]
console.print("No company found. No barchart will be depicted.\n")
return dict(), list()
@log_start_end(log=logger)
def display_companies_per_sector_in_country(
country: str = "United States",
mktcap: str = "Large",
exclude_exchanges: bool = True,
export: str = "",
raw: bool = False,
max_sectors_to_display: int = 15,
min_pct_to_display_sector: float = 0.015,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display number of companies per sector in a specific country (and market cap). [Source: Finance Database]
Parameters
----------
country: str
Select country to get number of companies by each sector
mktcap: str
Select market cap of companies to consider from Small, Mid and Large
exclude_exchanges : bool
Exclude international exchanges
export: str
Format to export data as
raw: bool
Output all raw data
max_sectors_to_display: int
Maximum number of sectors to display
min_pct_to_display_sector: float
Minimum percentage to display sector
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
companies_per_sector = financedatabase_model.get_companies_per_sector_in_country(
country, mktcap, exclude_exchanges
)
companies_per_sector = dict(
OrderedDict(
sorted(companies_per_sector.items(), key=lambda t: t[1], reverse=True)
)
)
for key, value in companies_per_sector.copy().items():
if value == 0:
del companies_per_sector[key]
if not companies_per_sector:
console.print("No companies found with these parameters!\n")
return
df = pd.DataFrame.from_dict(companies_per_sector, orient="index")
df.index.name = "Sector"
df.columns = ["Number of companies"]
df["Number of companies"] = df["Number of companies"].astype(int)
title = mktcap + " cap companies " if mktcap else "Companies "
title += f"in {country}\n"
title += "excl. exchanges" if exclude_exchanges else " incl. exchanges"
if raw:
print_rich_table(df, headers=list(df.columns), show_index=True, title=title)
else:
colors = theme.get_colors()
if len(companies_per_sector) > 1:
total_num_companies = sum(companies_per_sector.values())
min_companies_to_represent = round(
min_pct_to_display_sector * total_num_companies
)
filter_sectors_to_display = (
np.array(list(companies_per_sector.values()))
> min_companies_to_represent
)
if any(filter_sectors_to_display):
if not all(filter_sectors_to_display):
num_sectors_to_display = np.where(~filter_sectors_to_display)[0][0]
if num_sectors_to_display < max_sectors_to_display:
max_sectors_to_display = num_sectors_to_display
else:
console.print(
"The minimum threshold percentage specified is too high, thus it will be ignored."
)
if len(companies_per_sector) > max_sectors_to_display:
companies_per_sector_sliced = dict(
list(companies_per_sector.items())[: max_sectors_to_display - 1]
)
companies_per_sector_sliced["Others"] = sum(
dict(
list(companies_per_sector.items())[max_sectors_to_display - 1 :]
).values()
)
legend, values = zip(*companies_per_sector_sliced.items())
else:
legend, values = zip(*companies_per_sector.items())
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
plt.pie(
values,
labels=legend,
colors=colors,
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
startangle=45,
)
ax.set_title(title, fontsize=14)
if not external_axes:
theme.visualize_output()
elif len(companies_per_sector) == 1:
console.print(
f"Only 1 sector found '{list(companies_per_sector.keys())[0]}'. No pie chart will be depicted."
)
else:
console.print("No sector found. No pie chart will be depicted.")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cps",
df,
)
@log_start_end(log=logger)
def display_companies_per_industry_in_country(
country: str = "United States",
mktcap: str = "Large",
exclude_exchanges: bool = True,
export: str = "",
raw: bool = False,
max_industries_to_display: int = 15,
min_pct_to_display_industry: float = 0.015,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display number of companies per industry in a specific country. [Source: Finance Database]
Parameters
----------
country: str
Select country to get number of companies by each industry
mktcap: str
Select market cap of companies to consider from Small, Mid and Large
exclude_exchanges : bool
Exclude international exchanges
export: str
Format to export data as
raw: bool
Output all raw data
max_industries_to_display: int
Maximum number of industries to display
min_pct_to_display_industry: float
Minimum percentage to display industry
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
companies_per_industry = (
financedatabase_model.get_companies_per_industry_in_country(
country, mktcap, exclude_exchanges
)
)
companies_per_industry = dict(
OrderedDict(
sorted(companies_per_industry.items(), key=lambda t: t[1], reverse=True)
)
)
for key, value in companies_per_industry.copy().items():
if value == 0:
del companies_per_industry[key]
if not companies_per_industry:
console.print("No companies found with these parameters!\n")
return
df = pd.DataFrame.from_dict(companies_per_industry, orient="index")
df.index.name = "Industry"
df.columns = ["Number of companies"]
df["Number of companies"] = df["Number of companies"].astype(int)
title = mktcap + " cap companies " if mktcap else "Companies "
title += f"in {country}\n"
title += "excl. exchanges" if exclude_exchanges else " incl. exchanges"
if raw:
print_rich_table(df, headers=list(df.columns), show_index=True, title=title)
else:
colors = theme.get_colors()
if len(companies_per_industry) > 1:
total_num_companies = sum(companies_per_industry.values())
min_companies_to_represent = round(
min_pct_to_display_industry * total_num_companies
)
filter_industries_to_display = (
np.array(list(companies_per_industry.values()))
> min_companies_to_represent
)
if any(filter_industries_to_display):
if not all(filter_industries_to_display):
num_industries_to_display = np.where(~filter_industries_to_display)[
0
][0]
if num_industries_to_display < max_industries_to_display:
max_industries_to_display = num_industries_to_display
else:
console.print(
"The minimum threshold percentage specified is too high, thus it will be ignored."
)
if len(companies_per_industry) > max_industries_to_display:
companies_per_industry_sliced = dict(
list(companies_per_industry.items())[
: max_industries_to_display - 1
]
)
companies_per_industry_sliced["Others"] = sum(
dict(
list(companies_per_industry.items())[
max_industries_to_display - 1 :
]
).values()
)
legend, values = zip(*companies_per_industry_sliced.items())
else:
legend, values = zip(*companies_per_industry.items())
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.pie(
values,
labels=legend,
colors=colors,
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
startangle=45,
)
ax.set_title(title, fontsize=14)
if not external_axes:
theme.visualize_output()
elif len(companies_per_industry) == 1:
console.print(
f"Only 1 industry found '{list(companies_per_industry.keys())[0]}'. No pie chart will be depicted."
)
else:
console.print("No industry found. No pie chart will be depicted.")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cpic",
df,
)
@log_start_end(log=logger)
def display_companies_per_industry_in_sector(
sector: str = "Technology",
mktcap: str = "Large",
exclude_exchanges: bool = True,
export: str = "",
raw: bool = False,
max_industries_to_display: int = 15,
min_pct_to_display_industry: float = 0.015,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display number of companies per industry in a specific sector. [Source: Finance Database]
Parameters
----------
sector: str
Select sector to get number of companies by each industry
mktcap: str
Select market cap of companies to consider from Small, Mid and Large
exclude_exchanges : bool
Exclude international exchanges
export: str
Format to export data as
raw: bool
Output all raw data
max_industries_to_display: int
Maximum number of industries to display
min_pct_to_display_industry: float
Minimum percentage to display industry
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
companies_per_industry = financedatabase_model.get_companies_per_industry_in_sector(
sector, mktcap, exclude_exchanges
)
companies_per_industry = dict(
OrderedDict(
sorted(companies_per_industry.items(), key=lambda t: t[1], reverse=True)
)
)
for key, value in companies_per_industry.copy().items():
if value == 0:
del companies_per_industry[key]
if not companies_per_industry:
console.print("No companies found with these parameters!\n")
return
df = pd.DataFrame.from_dict(companies_per_industry, orient="index")
df.index.name = "Industry"
df.columns = ["Number of companies"]
df["Number of companies"] = df["Number of companies"].astype(int)
title = mktcap + " cap companies " if mktcap else "Companies "
title += f"in {sector} sector\n"
title += "excl. exchanges" if exclude_exchanges else " incl. exchanges"
if raw:
print_rich_table(
df,
headers=list(df.columns),
show_index=True,
title=title,
)
else:
colors = theme.get_colors()
if len(companies_per_industry) > 1:
total_num_companies = sum(companies_per_industry.values())
min_companies_to_represent = round(
min_pct_to_display_industry * total_num_companies
)
filter_industries_to_display = (
np.array(list(companies_per_industry.values()))
> min_companies_to_represent
)
if any(filter_industries_to_display):
if not all(filter_industries_to_display):
num_industries_to_display = np.where(~filter_industries_to_display)[
0
][0]
if num_industries_to_display < max_industries_to_display:
max_industries_to_display = num_industries_to_display
else:
console.print(
"The minimum threshold percentage specified is too high, thus it will be ignored."
)
if len(companies_per_industry) > max_industries_to_display:
companies_per_industry_sliced = dict(
list(companies_per_industry.items())[
: max_industries_to_display - 1
]
)
companies_per_industry_sliced["Others"] = sum(
dict(
list(companies_per_industry.items())[
max_industries_to_display - 1 :
]
).values()
)
legend, values = zip(*companies_per_industry_sliced.items())
else:
legend, values = zip(*companies_per_industry.items())
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.pie(
values,
labels=legend,
colors=colors,
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
startangle=45,
)
ax.set_title(title, fontsize=14)
if not external_axes:
theme.visualize_output()
elif len(companies_per_industry) == 1:
console.print(
f"Only 1 industry found '{list(companies_per_industry.keys())[0]}'. No pie chart will be depicted."
)
else:
console.print("No industry found. No pie chart will be depicted.")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cpis",
df,
)
@log_start_end(log=logger)
def display_companies_per_country_in_sector(
sector: str = "Technology",
mktcap: str = "Large",
exclude_exchanges: bool = True,
export: str = "",
raw: bool = False,
max_countries_to_display: int = 15,
min_pct_to_display_country: float = 0.015,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display number of companies per country in a specific sector. [Source: Finance Database]
Parameters
----------
sector: str
Select sector to get number of companies by each country
mktcap: str
Select market cap of companies to consider from Small, Mid and Large
exclude_exchanges : bool
Exclude international exchanges
export: str
Format to export data as
raw: bool
Output all raw data
max_countries_to_display: int
Maximum number of countries to display
min_pct_to_display_country: float
Minimum percentage to display country
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
companies_per_country = financedatabase_model.get_companies_per_country_in_sector(
sector, mktcap, exclude_exchanges
)
companies_per_country = dict(
OrderedDict(
sorted(companies_per_country.items(), key=lambda t: t[1], reverse=True)
)
)
for key, value in companies_per_country.copy().items():
if value == 0:
del companies_per_country[key]
if not companies_per_country:
console.print("No companies found with these parameters!\n")
return
df = pd.DataFrame.from_dict(companies_per_country, orient="index")
df.index.name = "Country"
df.columns = ["Number of companies"]
df["Number of companies"] = df["Number of companies"].astype(int)
title = mktcap + " cap companies " if mktcap else "Companies "
title += f"in {sector} sector\n"
title += "excl. exchanges" if exclude_exchanges else " incl. exchanges"
if raw:
print_rich_table(df, headers=list(df.columns), show_index=True, title=title)
else:
colors = theme.get_colors()
if len(companies_per_country) > 1:
total_num_companies = sum(companies_per_country.values())
min_companies_to_represent = round(
min_pct_to_display_country * total_num_companies
)
filter_countries_to_display = (
np.array(list(companies_per_country.values()))
> min_companies_to_represent
)
if any(filter_countries_to_display):
if not all(filter_countries_to_display):
num_countries_to_display = np.where(~filter_countries_to_display)[
0
][0]
if num_countries_to_display < max_countries_to_display:
max_countries_to_display = num_countries_to_display
else:
console.print(
"The minimum threshold percentage specified is too high, thus it will be ignored."
)
if len(companies_per_country) > max_countries_to_display:
companies_per_country_sliced = dict(
list(companies_per_country.items())[: max_countries_to_display - 1]
)
companies_per_country_sliced["Others"] = sum(
dict(
list(companies_per_country.items())[
max_countries_to_display - 1 :
]
).values()
)
legend, values = zip(*companies_per_country_sliced.items())
else:
legend, values = zip(*companies_per_country.items())
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.pie(
values,
labels=legend,
colors=colors,
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
startangle=45,
)
ax.set_title(title, fontsize=14)
if not external_axes:
theme.visualize_output()
elif len(companies_per_country) == 1:
console.print(
f"Only 1 country found '{list(companies_per_country.keys())[0]}'. No pie chart will be depicted."
)
else:
console.print("No country found. No pie chart will be depicted.")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cpcs",
df,
)
@log_start_end(log=logger)
def display_companies_per_country_in_industry(
industry: str = "Internet Content & Information",
mktcap: str = "Large",
exclude_exchanges: bool = True,
export: str = "",
raw: bool = False,
max_countries_to_display: int = 15,
min_pct_to_display_country: float = 0.015,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display number of companies per country in a specific industry. [Source: Finance Database]
Parameters
----------
industry: str
Select industry to get number of companies by each country
mktcap: str
Select market cap of companies to consider from Small, Mid and Large
exclude_exchanges : bool
Exclude international exchanges
export: str
Format to export data as
raw: bool
Output all raw data
max_countries_to_display: int
Maximum number of countries to display
min_pct_to_display_country: float
Minimum percentage to display country
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
companies_per_country = financedatabase_model.get_companies_per_country_in_industry(
industry, mktcap, exclude_exchanges
)
companies_per_country = dict(
OrderedDict(
sorted(companies_per_country.items(), key=lambda t: t[1], reverse=True)
)
)
for key, value in companies_per_country.copy().items():
if value == 0:
del companies_per_country[key]
if not companies_per_country:
console.print("No companies found with these parameters!\n")
return
df = pd.DataFrame.from_dict(companies_per_country, orient="index")
df.index.name = "Country"
df.columns = ["Number of companies"]
df["Number of companies"] = df["Number of companies"].astype(int)
title = mktcap + " cap companies " if mktcap else "Companies "
title += f"per country in {industry} industry\n"
title += "excl. exchanges" if exclude_exchanges else " incl. exchanges"
if raw:
print_rich_table(df, headers=list(df.columns), show_index=True, title=title)
else:
colors = theme.get_colors()
if len(companies_per_country) > 1:
total_num_companies = sum(companies_per_country.values())
min_companies_to_represent = round(
min_pct_to_display_country * total_num_companies
)
filter_countries_to_display = (
np.array(list(companies_per_country.values()))
> min_companies_to_represent
)
if any(filter_countries_to_display):
if not all(filter_countries_to_display):
num_countries_to_display = np.where(~filter_countries_to_display)[
0
][0]
if num_countries_to_display < max_countries_to_display:
max_countries_to_display = num_countries_to_display
else:
console.print(
"The minimum threshold percentage specified is too high, thus it will be ignored."
)
if len(companies_per_country) > max_countries_to_display:
companies_per_country_sliced = dict(
list(companies_per_country.items())[: max_countries_to_display - 1]
)
companies_per_country_sliced["Others"] = sum(
dict(
list(companies_per_country.items())[
max_countries_to_display - 1 :
]
).values()
)
legend, values = zip(*companies_per_country_sliced.items())
else:
legend, values = zip(*companies_per_country.items())
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.pie(
values,
labels=legend,
colors=colors,
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
startangle=45,
)
ax.set_title(title, fontsize=14)
if not external_axes:
theme.visualize_output()
elif len(companies_per_country) == 1:
console.print(
f"Only 1 country found '{list(companies_per_country.keys())[0]}'. No pie chart will be depicted."
)
else:
console.print("No country found. No pie chart will be depicted.")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cpci",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/sector_industry_analysis/financedatabase_view.py | 0.852429 | 0.304688 | financedatabase_view.py | pypi |
__docformat__ = "numpy"
# pylint:disable=too-many-arguments,unexpected-keyword-arg
import logging
from typing import Dict, Any, List
import numpy as np
import pandas as pd
from tqdm import tqdm
import yfinance as yf
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.fundamental_analysis.dcf_model import create_dataframe
logger = logging.getLogger(__name__)
SA_KEYS = {
"BS": {
"ce": "Cash & Equivalents",
"sti": "Short-Term Investments",
"cce": "Cash & Cash Equivalents",
"rec": "Receivables",
"inv": "Inventory",
"oca": "Other Current Assets",
"tca": "Total Current Assets",
"ppe": "Property, Plant & Equipment",
"lti": "Long-Term Investments",
"gai": "Goodwill and Intangibles",
"olta": "Other Long-Term Assets",
"tlta": "Total Long-Term Assets",
"ta": "Total Assets",
"ap": "Accounts Payable",
"dr": "Deferred Revenue",
"cd": "Current Debt",
"ocl": "Other Current Liabilities",
"tcl": "Total Current Liabilities",
"ltd": "Long-Term Debt",
"oltl": "Other Long-Term Liabilities",
"tltl": "Total Long-Term Liabilities",
"tl": "Total Liabilities",
"ret": "Retained Earnings",
"ci": "Comprehensive Income",
"se": "Shareholders' Equity",
"tle": "Total Liabilities and Equity",
},
"IS": {
"re": "Revenue",
"cr": "Cost of Revenue",
"gp": "Gross Profit",
"sga": "Selling, Genera & Admin",
"rd": "Research & Development",
"ooe": "Other Operating Expenses",
"oi": "Operating Income",
"ie": "Interest Expense / Income",
"oe": "Other Expense / Income",
"it": "Income Tax",
"ni": "Net Income",
"pd": "Preferred Dividends",
},
"CF": {
"ninc": "Net Income",
"da": "Depreciation & Amortization",
"sbc": "Share-Based Compensation",
"ooa": "Other Operating Activities",
"ocf": "Operating Cash Flow",
"cex": "Capital Expenditures",
"acq": "Acquisitions",
"cii": "Change in Investments",
"oia": "Other Investing Activities",
"icf": "Investing Cash Flow",
"dp": "Dividends Paid",
"si": "Share Insurance / Repurchase",
"di": "Debt Issued / Paid",
"ofa": "Other Financing Activities",
"fcf": "Financing Cash Flow",
"ncf": "Net Cash Flow",
},
}
@log_start_end(log=logger)
def get_stocks_data(
symbols: List[str] = None,
finance_key: str = "ncf",
stocks_data: dict = None,
period: str = "annual",
symbol: str = "USD",
):
"""Get stocks data based on a list of stocks and the finance key. The function searches for the
correct financial statement automatically. [Source: StockAnalysis]
Parameters
----------
symbols: list
A list of tickers that will be used to collect data for.
finance_key: str
The finance key used to search within the SA_KEYS for the correct name of item
on the financial statement
stocks_data : dict
A dictionary that is empty on initialisation but filled once data is collected
for the first time.
period : str
Whether you want annually, quarterly or trailing financial statements.
symbol : str
Choose in what currency you wish to convert each company's financial statement.
Default is USD (US Dollars).
Returns
-------
dict
Dictionary of filtered stocks data separated by financial statement
"""
if symbols is None:
symbols = ["FB", "TSLA", "MSFT"]
if stocks_data is None:
stocks_data = {}
no_data = []
for top_item in tqdm(symbols):
for item, description in SA_KEYS.items():
if finance_key in description:
if item not in stocks_data:
stocks_data[item] = {}
used_statement = item
symbol_statement, rounding, currency_dcf = create_dataframe(
top_item, item, period.lower()
)
if symbol_statement.empty:
no_data.append(top_item)
continue
symbol_statement_rounded = (
change_type_dataframes(symbol_statement) * rounding
)
if symbol and symbol != currency_dcf:
currency_data = yf.download(
f"{currency_dcf}{top_item}=X",
start=f"{symbol_statement_rounded.columns[0]}-01-01",
end=f"{symbol_statement_rounded.columns[-1]}-12-31",
progress=False,
)["Adj Close"]
for year in symbol_statement_rounded:
# Since fiscal years differ, take the median and not the last value
# of the year
symbol_statement_rounded[year] = (
symbol_statement_rounded[year]
* currency_data.loc[year].median()
)
stocks_data[item][top_item] = symbol_statement_rounded
if period in ["Quarterly", "Trailing"]:
for item in stocks_data[used_statement]:
stocks_data[used_statement][symbol].columns = (
stocks_data[used_statement][item]
.columns.map(lambda x: pd.Period(x, "Q"))
.astype(str)
)
stocks_data[used_statement] = match_length_dataframes(stocks_data[used_statement])
if no_data:
console.print(
f"No data available for {', '.join(str(symbol) for symbol in no_data)}"
)
return stocks_data
@log_start_end(log=logger)
def match_length_dataframes(dataframes: Dict[pd.DataFrame, Any]):
"""
All unique columns are collected and filled for each DataFrame to
ensure equal length of columns.
Parameters
----------
dataframes : dict
Dict of dataframes to match length
Returns
-------
dataframes : dict
Dict of DataFrames with equal column length
"""
columns = []
for symbol in dataframes:
for column in dataframes[symbol].columns:
if column not in columns:
columns.append(column)
for symbol in dataframes:
for column in columns:
if column not in dataframes[symbol].columns:
dataframes[symbol][column] = np.nan
dataframes[symbol] = dataframes[symbol].sort_index(axis=1)
return dataframes
def change_type_dataframes(data: pd.DataFrame) -> pd.DataFrame:
"""
Adjusts comma-seperated strings to floats
Parameters
----------
data : pd.DataFrame
DataFrame with comma-seperated strings
Returns
-------
pd.DataFrame
Adjusted DataFrame
"""
dataframe = data.apply(
lambda x: x.astype(str).str.replace(",", "").astype(float), axis=1
)
return dataframe | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/sector_industry_analysis/stockanalysis_model.py | 0.8339 | 0.370083 | stockanalysis_model.py | pypi |
import itertools
import logging
import os
from typing import List
import numpy as np
import pandas as pd
from openbb_terminal import rich_config
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
patch_pandas_text_adjustment,
print_rich_table,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.insider.openinsider_model import (
get_open_insider_data,
get_open_insider_link,
get_print_insider_data,
)
logger = logging.getLogger(__name__)
d_notes = {
"A": "A: Amended filing",
"D": "D: Derivative transaction in filing (usually option exercise)",
"E": "E: Error detected in filing",
"M": "M: Multiple transactions in filing; earliest reported transaction date & weighted average transaction price",
}
d_trade_types = {
"S - Sale": "[red]S - Sale: Sale of securities on an exchange or to another person[/red]",
"S - Sale+OE": "[yellow]S - Sale+OE: Sale of securities "
"on an exchange or to another person (after option exercise)[/yellow]",
"F - Tax": "[magenta]F - Tax: Payment of exercise price or "
"tax liability using portion of securities received from the company[/magenta]",
"P - Purchase": "[green]P - Purchase: Purchase of securities on "
"an exchange or from another person[/green]",
}
def lambda_red_highlight(values) -> List[str]:
"""Red highlight
Parameters
----------
values : List[str]
dataframe values to color
Returns
-------
List[str]
colored dataframes values
"""
return [f"[red]{val}[/red]" for val in values]
def lambda_yellow_highlight(values) -> List[str]:
"""Yellow highlight
Parameters
----------
values : List[str]
dataframe values to color
Returns
-------
List[str]
colored dataframes values
"""
return [f"[yellow]{val}[/yellow]" for val in values]
def lambda_magenta_highlight(values):
"""Magenta highlight
Parameters
----------
values : List[str]
dataframe values to color
Returns
-------
List[str]
colored dataframes values
"""
return [f"[magenta]{val}[/magenta]" for val in values]
def lambda_green_highlight(values):
"""Green highlight
Parameters
----------
values : List[str]
dataframe values to color
Returns
-------
List[str]
colored dataframes values
"""
return [f"[green]{val}[/green]" for val in values]
@log_start_end(log=logger)
def print_insider_data(type_insider: str = "lcb", limit: int = 10, export: str = ""):
"""Print insider data
Parameters
----------
type_insider: str
Insider type of data. Available types can be accessed through get_insider_types().
limit: int
Limit of data rows to display
export: str
Export data format
"""
df = get_print_insider_data(type_insider)
if not df.empty:
print_rich_table(
df.head(limit),
headers=[x.title() for x in df.columns],
show_index=False,
title="Insider Data",
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), type_insider, df
)
if df.shape[1] == 13:
l_chars = [list(chars) for chars in df["X"].values if chars != "-"]
l_uchars = np.unique(list(itertools.chain(*l_chars)))
for char in l_uchars:
console.print(d_notes[char])
@log_start_end(log=logger)
def print_insider_filter(
preset: str,
symbol: str,
limit: int = 10,
links: bool = False,
export: str = "",
) -> None:
"""Print insider filter based on loaded preset. [Source: OpenInsider]
Parameters
----------
preset : str
Loaded preset filter
symbol : str
Stock ticker symbol
limit : int
Limit of rows of data to display
links : bool
Flag to show hyperlinks
export : str
Format to export data
"""
if symbol:
link = f"http://openinsider.com/screener?s={symbol}"
else:
link = get_open_insider_link(preset)
if not link:
return
df_insider = get_open_insider_data(link, has_company_name=bool(not symbol))
df_insider_orig = df_insider.copy()
if df_insider.empty:
console.print("No insider data found\n")
return
if links:
df_insider = df_insider[
["Ticker Link", "Insider Link", "Filing Link", "Filing Date"]
].head(limit)
else:
df_insider = df_insider.drop(
columns=["Filing Link", "Ticker Link", "Insider Link"]
).head(limit)
if rich_config.USE_COLOR and not links:
new_df_insider = df_insider.copy()
if not new_df_insider[new_df_insider["Trade Type"] == "S - Sale"].empty:
new_df_insider[new_df_insider["Trade Type"] == "S - Sale"] = new_df_insider[
new_df_insider["Trade Type"] == "S - Sale"
].apply(lambda_red_highlight)
if not new_df_insider[new_df_insider["Trade Type"] == "S - Sale+OE"].empty:
new_df_insider[
new_df_insider["Trade Type"] == "S - Sale+OE"
] = new_df_insider[new_df_insider["Trade Type"] == "S - Sale+OE"].apply(
lambda_yellow_highlight
)
if not new_df_insider[new_df_insider["Trade Type"] == "F - Tax"].empty:
new_df_insider[new_df_insider["Trade Type"] == "F - Tax"] = new_df_insider[
new_df_insider["Trade Type"] == "F - Tax"
].apply(lambda_magenta_highlight)
if not new_df_insider[new_df_insider["Trade Type"] == "P - Purchase"].empty:
new_df_insider[
new_df_insider["Trade Type"] == "P - Purchase"
] = new_df_insider[new_df_insider["Trade Type"] == "P - Purchase"].apply(
lambda_green_highlight
)
patch_pandas_text_adjustment()
pd.set_option("display.max_colwidth", 0)
pd.set_option("display.max_rows", None)
# needs to be done because table is too large :(
new_df_insider = new_df_insider.drop(columns=["Filing Date", "Trade Type"])
else:
# needs to be done because table is too large :(
new_df_insider = df_insider.drop(columns=["Filing Date"], axis=1)
print_rich_table(
new_df_insider,
headers=[x.title() for x in new_df_insider.columns],
title="Insider filtered",
)
if not links:
l_chars = [list(chars) for chars in df_insider_orig["X"].values]
l_uchars = np.unique(list(itertools.chain(*l_chars)))
for char in l_uchars:
console.print(d_notes[char])
l_tradetype = df_insider_orig["Trade Type"].values
l_utradetype = np.unique(l_tradetype)
for tradetype in l_utradetype:
console.print(d_trade_types[tradetype])
if export:
if symbol:
cmd = "stats"
else:
cmd = "filter"
export_data(export, os.path.dirname(os.path.abspath(__file__)), cmd, df_insider) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/insider/openinsider_view.py | 0.666605 | 0.430985 | openinsider_view.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
import requests
from bs4 import BeautifulSoup
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_insider_activity(symbol: str) -> pd.DataFrame:
"""Get insider activity. [Source: Business Insider]
Parameters
----------
symbol : str
Ticker symbol to get insider activity data from
Returns
-------
df_insider : pd.DataFrame
Insider activity data
"""
url_market_business_insider = (
f"https://markets.businessinsider.com/stocks/{symbol.lower()}-stock"
)
text_soup_market_business_insider = BeautifulSoup(
requests.get(
url_market_business_insider, headers={"User-Agent": get_user_agent()}
).text,
"lxml",
)
d_insider = dict()
l_insider_vals = list()
for idx, insider_val in enumerate(
text_soup_market_business_insider.findAll(
"td", {"class": "table__td text-center"}
)
):
l_insider_vals.append(insider_val.text.strip())
# Add value to dictionary
if (idx + 1) % 6 == 0:
# Check if we are still parsing insider trading activity
if "/" not in l_insider_vals[0]:
break
d_insider[(idx + 1) // 6] = l_insider_vals
l_insider_vals = list()
df_insider = pd.DataFrame.from_dict(
d_insider,
orient="index",
columns=["Date", "Shares Traded", "Shares Held", "Price", "Type", "Option"],
)
df_insider["Date"] = pd.to_datetime(df_insider["Date"])
df_insider = df_insider.set_index("Date")
df_insider = df_insider.sort_index(ascending=True)
l_names = list()
for s_name in text_soup_market_business_insider.findAll(
"a", {"onclick": "silentTrackPI()"}
):
l_names.append(s_name.text.strip())
df_insider["Insider"] = l_names
return df_insider | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/insider/businessinsider_model.py | 0.57344 | 0.206474 | businessinsider_model.py | pypi |
__docformat__ = "numpy"
import logging
import math
import os
from datetime import datetime, timedelta
from typing import List, Optional
import matplotlib.pyplot as plt
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
get_next_stock_market_days,
is_valid_axes_count,
plot_autoscale,
print_rich_table,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.insider import businessinsider_model
logger = logging.getLogger(__name__)
register_matplotlib_converters()
# pylint: disable=R0912
@log_start_end(log=logger)
def insider_activity(
data: pd.DataFrame,
symbol: str,
start_date: Optional[str] = None,
interval: str = "1440min",
limit: int = 10,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display insider activity. [Source: Business Insider]
Parameters
----------
data: pd.DataFrame
Stock dataframe
symbol: str
Due diligence ticker symbol
start_date: Optional[str]
Initial date (e.g., 2021-10-01). Defaults to 3 years back
interval: str
Stock data interval
limit: int
Number of latest days of inside activity
raw: bool
Print to console
export: str
Export dataframe data to csv,json,xlsx file
external_axes: Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=1100)).strftime("%Y-%m-%d")
df_ins = businessinsider_model.get_insider_activity(symbol)
if df_ins.empty:
logger.warning("The insider activity on the ticker does not exist")
console.print("[red]The insider activity on the ticker does not exist.\n[/red]")
else:
if start_date:
df_insider = df_ins[start_date:].copy() # type: ignore
else:
df_insider = df_ins.copy()
if raw:
df_insider.index = pd.to_datetime(df_insider.index).date
print_rich_table(
df_insider.sort_index(ascending=False)
.head(n=limit)
.applymap(lambda x: x.replace(".00", "").replace(",", "")),
headers=list(df_insider.columns),
show_index=True,
title="Insider Activity",
)
else:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
if interval == "1440min":
ax.plot(data.index, data["Adj Close"].values, lw=3)
else: # Intraday
ax.plot(data.index, data["Close"].values, lw=3)
ax.set_title(f"{symbol.upper()}'s Insider Trading Activity & Share Price")
ax.set_ylabel("Share Price")
df_insider["Trade"] = df_insider.apply(
lambda row: (1, -1)[row.Type == "Sell"]
* float(row["Shares Traded"].replace(",", "")),
axis=1,
)
ax.set_xlim(right=data.index[-1])
min_price, max_price = ax.get_ylim()
price_range = max_price - min_price
maxshares = (
df_insider[df_insider["Type"] == "Buy"]
.groupby(by=["Date"])
.sum(numeric_only=True)["Trade"]
.max()
)
minshares = (
df_insider[df_insider["Type"] == "Sell"]
.groupby(by=["Date"])
.sum(numeric_only=True)["Trade"]
.min()
)
if math.isnan(maxshares):
shares_range = minshares
elif math.isnan(minshares):
shares_range = maxshares
else:
shares_range = maxshares - minshares
n_proportion = price_range / shares_range
bar_1 = None
for ind in (
df_insider[df_insider["Type"] == "Sell"]
.groupby(by=["Date"])
.sum(numeric_only=True)
.index
):
if ind in data.index:
ind_dt = ind
else:
ind_dt = get_next_stock_market_days(ind, 1)[0]
n_stock_price = 0
if interval == "1440min":
n_stock_price = data["Adj Close"][ind_dt]
else:
n_stock_price = data["Close"][ind_dt]
bar_1 = ax.vlines(
x=ind_dt,
ymin=n_stock_price
+ n_proportion
* float(
df_insider[df_insider["Type"] == "Sell"]
.groupby(by=["Date"])
.sum(numeric_only=True)["Trade"][ind]
),
ymax=n_stock_price,
colors=theme.down_color,
ls="-",
lw=5,
)
bar_2 = None
for ind in (
df_insider[df_insider["Type"] == "Buy"]
.groupby(by=["Date"])
.sum(numeric_only=True)
.index
):
if ind in data.index:
ind_dt = ind
else:
ind_dt = get_next_stock_market_days(ind, 1)[0]
n_stock_price = 0
if interval == "1440min":
n_stock_price = data["Adj Close"][ind_dt]
else:
n_stock_price = data["Close"][ind_dt]
bar_2 = ax.vlines(
x=ind_dt,
ymin=n_stock_price,
ymax=n_stock_price
+ n_proportion
* float(
df_insider[df_insider["Type"] == "Buy"]
.groupby(by=["Date"])
.sum(numeric_only=True)["Trade"][ind]
),
colors=theme.up_color,
ls="-",
lw=5,
)
if bar_1 and bar_2:
ax.legend(
handles=[bar_1, bar_2],
labels=["Insider Selling", "Insider Buying"],
loc="best",
)
elif bar_1:
ax.legend(
handles=[bar_1],
labels=["Insider Selling"],
loc="best",
)
elif bar_2:
ax.legend(
handles=[bar_2],
labels=["Insider Buying"],
loc="best",
)
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"act",
df_insider,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/insider/businessinsider_view.py | 0.756268 | 0.296527 | businessinsider_view.py | pypi |
__docformat__ = "numpy"
import pandas as pd
from openbb_terminal.stocks.insider import openinsider_model
def stats(symbol: str) -> pd.DataFrame:
"""Get OpenInsider stats for ticker
Parameters
----------
symbol : str
Ticker to get insider stats for
Returns
-------
pd.DataFrame
DataFrame of insider stats
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.stocks.ins.stats("AAPL")
"""
link = f"http://openinsider.com/screener?s={symbol}"
return openinsider_model.get_open_insider_data(link, has_company_name=False)
def insider_filter(preset: str) -> pd.DataFrame:
"""GEt insider trades based on preset filter
Parameters
----------
preset : str
Name of preset filter
Returns
-------
pd.DataFrame
DataFrame of filtered insider data
Examples
--------
>>> from openbb_terminal.sdk import openbb
In order to filter, we pass one of the predefined .ini filters from OpenBBUserData/presets/stocks/insider
>>> filter = "Gold-Silver"
>>> insider_trades = openbb.stocks.ins.filter(filter)
"""
url = openinsider_model.get_open_insider_link(preset)
return openinsider_model.get_open_insider_data(url, has_company_name=True)
def lcb() -> pd.DataFrame:
"""Get latest cluster buys
Returns
-------
pd.DataFrame
DataFrame of latest insider trades
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.stocks.ins.lcb()
"""
return openinsider_model.get_print_insider_data("lcb")
def lpsb() -> pd.DataFrame:
"""Get latest penny stock buys
Returns
-------
pd.DataFrame
DataFrame of latest insider trades
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.stocks.ins.lpsb()
"""
return openinsider_model.get_print_insider_data("lpsb")
def lit() -> pd.DataFrame:
"""Get latest insider trades
Returns
-------
pd.DataFrame
DataFrame of latest insider trades
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.stocks.ins.lit()
"""
return openinsider_model.get_print_insider_data("lit")
def lip() -> pd.DataFrame:
"""Get latest insider purchases
Returns
-------
pd.DataFrame
DataFrame of latest insider trades
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.stocks.ins.lip()
"""
return openinsider_model.get_print_insider_data("lip")
def blip() -> pd.DataFrame:
"""Get latest insider purchases > 25k
Returns
-------
pd.DataFrame
DataFrame of latest insider trades
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.stocks.ins.blip()
"""
return openinsider_model.get_print_insider_data("blip")
def blop() -> pd.DataFrame:
"""Get latest officer purchases > 25k
Returns
-------
pd.DataFrame
DataFrame of latest insider trades
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.stocks.ins.blop()
"""
return openinsider_model.get_print_insider_data("blop")
def blcp() -> pd.DataFrame:
"""Get latest CEO/CFO purchases > 25k
Returns
-------
pd.DataFrame
DataFrame of latest insider trades
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.stocks.ins.blcp()
"""
return openinsider_model.get_print_insider_data("blcp")
def lis() -> pd.DataFrame:
"""Get latest insider sales
Returns
-------
pd.DataFrame
DataFrame of latest insider trades
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.stocks.ins.lis()
"""
return openinsider_model.get_print_insider_data("lis")
def blis() -> pd.DataFrame:
"""Get latest insider sales > 100k
Returns
-------
pd.DataFrame
DataFrame of latest insider trades
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.stocks.ins.blis()
"""
return openinsider_model.get_print_insider_data("blis")
def blos() -> pd.DataFrame:
"""Get latest officer sales > 100k
Returns
-------
pd.DataFrame
DataFrame of latest insider trades
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.stocks.ins.blos()
"""
return openinsider_model.get_print_insider_data("blos")
def blcs() -> pd.DataFrame:
"""Get latest CEO/CFO sales > 100k
Returns
-------
pd.DataFrame
DataFrame of latest insider trades
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.stocks.ins.blcs()
"""
return openinsider_model.get_print_insider_data("blcs") | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/insider/sdk_helper.py | 0.837686 | 0.534916 | sdk_helper.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import mstl_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_mstl_forecast(
data: Union[pd.DataFrame, pd.Series],
target_column: str = "close",
dataset_name: str = "",
seasonal_periods: int = 7,
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display MSTL Model
Parameters
----------
data: Union[pd.Series, np.array]
Data to forecast
dataset_name str
The name of the ticker to be predicted
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
seasonal_periods: int
Number of seasonal periods in a year
If not set, inferred from frequency of the series.
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes: Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(data, start_date, end_date, target_column, None)
if not helpers.check_data(data, target_column, None):
return
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = mstl_model.get_mstl_data(
data=data,
target_column=target_column,
seasonal_periods=seasonal_periods,
n_predict=n_predict,
start_window=start_window,
forecast_horizon=forecast_horizon,
)
if ticker_series == []:
return
probabilistic = False
helpers.plot_forecast(
name="MSTL",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=None,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, None, ticker_series, forecast_horizon=forecast_horizon
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/mstl_view.py | 0.944029 | 0.411525 | mstl_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List, Tuple
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import nhits_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_nhits_forecast(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
dataset_name: str = "",
n_predict: int = 5,
past_covariates: str = None,
train_split: float = 0.85,
forecast_horizon: int = 5,
input_chunk_length: int = 14,
output_chunk_length: int = 5,
num_stacks: int = 3,
num_blocks: int = 1,
num_layers: int = 2,
layer_widths: int = 512,
pooling_kernel_sizes: Optional[Tuple[Tuple[int]]] = None,
n_freq_downsample: Optional[Tuple[Tuple[int]]] = None,
dropout: float = 0.1,
activation: str = "ReLU",
max_pool_1d: bool = True,
batch_size: int = 32,
n_epochs: int = 100,
learning_rate: float = 1e-3,
model_save_name: str = "nhits_model",
force_reset: bool = True,
save_checkpoints: bool = True,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display Nhits forecast
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
target_column: str
Target column to forecast. Defaults to "close".
dataset_name: str
The name of the ticker to be predicted
n_predict: int
Days to predict. Defaults to 5.
train_split: float
Train/val split. Defaults to 0.85.
past_covariates: str
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
input_chunk_length: int
Number of past time steps that are fed to the forecasting module at prediction time. Defaults to 14.
output_chunk_length: int
The length of the forecast of the model. Defaults to 5.
num_stacks: int
The number of stacks that make up the whole model.
num_blocks: int
The number of blocks making up every stack.
num_layers: int
The number of fully connected layers preceding the final forking layers in each block
of every stack.
layer_widths: int
Determines the number of neurons that make up each fully connected layer in each
block of every stack. If a list is passed, it must have a length equal to num_stacks
and every entry in that list corresponds to the layer width of the corresponding stack.
If an integer is passed, every stack will have blocks with FC layers of the same width.
pooling_kernel_size: Optional[Tuple[Tuple[int]]]
If set, this parameter must be a tuple of tuples, of size (num_stacks x num_blocks),
specifying the kernel size for each block in each stack used for the input pooling
layer. If left to None, some default values will be used based on input_chunk_length.
n_freq_downsample: Optional[Tuple[Tuple[int]]]
If set, this parameter must be a tuple of tuples, of size (num_stacks x num_blocks),
specifying the downsampling factors before interpolation, for each block in each stack.
If left to None, some default values will be used based on output_chunk_length.
dropout: float
The dropout probability to be used in fully connected layers.
activation: str
Supported activations: [[‘ReLU’,’RReLU’, ‘PReLU’, ‘Softplus’, ‘Tanh’, ‘SELU’, ‘LeakyReLU’, ‘Sigmoid’]
max_pool_1d: bool
Use max_pool_1d pooling. False uses AvgPool1d.
batch_size: int
Number of time series (input and output sequences) used in each training pass. Defaults to 32.
n_epochs: int
Number of epochs over which to train the model. Defaults to 100.
learning_rate: float
Defaults to 1e-3.
model_save_name: str
Name for model. Defaults to "brnn_model".
force_reset: bool
If set to True, any previously-existing model with the same name will be reset
(all checkpoints will be discarded). Defaults to True.
save_checkpoints: bool
Whether or not to automatically save the untrained model and checkpoints from training.
Defaults to True.
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes: Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(
data, start_date, end_date, target_column, past_covariates
)
if not helpers.check_data(data, target_column, past_covariates):
return
output_chunk_length = helpers.check_output(
output_chunk_length, n_predict, bool(past_covariates)
)
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = nhits_model.get_nhits_data(
data=data,
n_predict=n_predict,
target_column=target_column,
past_covariates=past_covariates,
train_split=train_split,
forecast_horizon=forecast_horizon,
input_chunk_length=input_chunk_length,
output_chunk_length=output_chunk_length,
num_stacks=num_stacks,
num_blocks=num_blocks,
num_layers=num_layers,
layer_widths=layer_widths,
pooling_kernel_sizes=pooling_kernel_sizes,
n_freq_downsample=n_freq_downsample,
dropout=dropout,
activation=activation,
max_pool_1d=max_pool_1d,
batch_size=batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
model_save_name=model_save_name,
force_reset=force_reset,
save_checkpoints=save_checkpoints,
)
if ticker_series == []:
return
probabilistic = False
helpers.plot_forecast(
name="NHITS",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=past_covariates,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, past_covariates, ticker_series, forecast_horizon=forecast_horizon
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/nhits_view.py | 0.948241 | 0.477189 | nhits_view.py | pypi |
"""Probabilistic Exponential Smoothing Model"""
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List, Tuple
import warnings
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from numpy import ndarray
import pandas as pd
from darts import TimeSeries
from darts.models import ExponentialSmoothing
from darts.utils.utils import ModelMode, SeasonalityMode
from darts.metrics import mape
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.forecast import helpers
warnings.simplefilter("ignore", ConvergenceWarning)
TRENDS = ["N", "A", "M"]
SEASONS = ["N", "A", "M"]
PERIODS = [4, 5, 7]
DAMPEN = ["T", "F"]
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_expo_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
trend: str = "A",
seasonal: str = "A",
seasonal_periods: int = 7,
dampen: str = "F",
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
) -> Tuple[
List[TimeSeries],
List[TimeSeries],
List[TimeSeries],
Optional[Union[float, ndarray]],
ExponentialSmoothing,
]:
"""Performs Probabilistic Exponential Smoothing forecasting
This is a wrapper around statsmodels Holt-Winters' Exponential Smoothing;
we refer to this link for the original and more complete documentation of the parameters.
https://unit8co.github.io/darts/generated_api/darts.models.forecasting.exponential_smoothing.html
Parameters
----------
data : Union[pd.Series, np.ndarray]
Input data.
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
trend: str
Trend component. One of [N, A, M]
Defaults to ADDITIVE.
seasonal: str
Seasonal component. One of [N, A, M]
Defaults to ADDITIVE.
seasonal_periods: int
Number of seasonal periods in a year (7 for daily data)
If not set, inferred from frequency of the series.
dampen: str
Dampen the function
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
Returns
-------
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries], Optional[Union[float, ndarray]], ExponentialSmoothing]
Adjusted Data series,
List of historical fcast values,
List of predicted fcast values,
Optional[float] - precision,
Fit Prob. Expo model object.
"""
use_scalers = False
_, ticker_series = helpers.get_series(data, target_column, is_scaler=use_scalers)
if trend == "M":
trend_model = ModelMode.MULTIPLICATIVE
elif trend == "N":
trend_model = ModelMode.NONE
else: # Default
trend_model = ModelMode.ADDITIVE
if seasonal == "M":
seasonal_model = SeasonalityMode.MULTIPLICATIVE
elif seasonal == "N":
seasonal_model = SeasonalityMode.NONE
else: # Default
seasonal_model = SeasonalityMode.ADDITIVE
damped = True
if dampen == "F":
damped = False
# Model Init
model_es = ExponentialSmoothing(
trend=trend_model,
seasonal=seasonal_model,
seasonal_periods=int(seasonal_periods),
damped=damped,
random_state=42,
)
try:
# Historical backtesting
historical_fcast_es = model_es.historical_forecasts(
ticker_series, # backtest on entire ts
start=float(start_window),
forecast_horizon=int(forecast_horizon),
verbose=True,
)
except Exception as e: # noqa
error = str(e)
# lets translate this to something everyone understands
if "with`overlap_end` set to `False`." in error:
console.print(
"[red]Dataset too small.[/red]"
"[red] Please increase size to at least 100 data points.[/red]"
)
else:
console.print(f"[red]{error}[/red]")
return [], [], [], None, None
# train new model on entire timeseries to provide best current forecast
best_model = ExponentialSmoothing(
trend=trend_model,
seasonal=seasonal_model,
seasonal_periods=int(seasonal_periods),
damped=damped,
random_state=42,
)
# we have the historical fcast, now lets train on entire set and predict.
best_model.fit(ticker_series)
probabilistic_forecast = best_model.predict(int(n_predict), num_samples=500)
precision = mape(actual_series=ticker_series, pred_series=historical_fcast_es)
console.print(f"Exponential smoothing obtains MAPE: {precision:.2f}% \n")
return (
ticker_series,
historical_fcast_es,
probabilistic_forecast,
precision,
best_model,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/expo_model.py | 0.945908 | 0.535159 | expo_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Optional, Tuple, Union, List
import warnings
from statsmodels.tools.sm_exceptions import ConvergenceWarning
import numpy as np
import pandas as pd
from darts import TimeSeries
from darts.models import Theta
from darts.utils.utils import SeasonalityMode
from darts.metrics import mape
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.forecast import helpers
warnings.simplefilter("ignore", ConvergenceWarning)
SEASONS = ["N", "A", "M"]
PERIODS = [4, 5, 7]
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_theta_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
seasonal: str = "M",
seasonal_periods: int = 7,
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
) -> Tuple[
Optional[List[TimeSeries]],
Optional[List[TimeSeries]],
Optional[List[TimeSeries]],
Optional[float],
Optional[float],
Optional[type[Theta]],
]:
"""Performs Theta forecasting
An implementation of the 4Theta method with configurable theta parameter.
https://unit8co.github.io/darts/generated_api/darts.models.forecasting.theta.html
Parameters
----------
data : Union[pd.Series, np.ndarray]
Input data.
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
seasonal: str
Seasonal component. One of [N, A, M]
Defaults to MULTIPLICATIVE.
seasonal_periods: int
Number of seasonal periods in a year
If not set, inferred from frequency of the series.
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
Returns
-------
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries], float, float, type[Theta]]
Adjusted Data series,
Historical forecast by best theta,
list of Predictions,
Mean average precision error,
Best Theta,
Theta Model.
"""
use_scalers = False
_, ticker_series = helpers.get_series(data, target_column, is_scaler=use_scalers)
train, val = ticker_series.split_before(start_window)
if seasonal == "A":
seasonal = SeasonalityMode.ADDITIVE
elif seasonal == "N":
seasonal = SeasonalityMode.NONE
else: # Default
seasonal = SeasonalityMode.MULTIPLICATIVE
thetas = np.linspace(-10, 10, 50)
best_mape = float("inf")
best_theta = 0
error = ""
for theta in thetas:
model = Theta(
theta=theta,
season_mode=seasonal,
seasonality_period=seasonal_periods,
)
try:
model.fit(train)
pred_theta = model.predict(len(val))
res = mape(val, pred_theta)
if res < best_mape:
best_mape = res
best_theta = theta
except Exception as e: # noqa
error = str(e)
if best_theta == 0:
console.print(f"[red]{error}[/red]")
return [], [], [], 0, 0, None
best_theta_model = Theta(
best_theta,
season_mode=seasonal,
seasonality_period=seasonal_periods,
)
# Training model based on historical backtesting
historical_fcast_theta = best_theta_model.historical_forecasts(
ticker_series,
start=float(start_window),
forecast_horizon=int(forecast_horizon),
verbose=True,
)
best_theta_model_final = Theta(
best_theta,
season_mode=seasonal,
seasonality_period=seasonal_periods,
)
# fit model on entire series for final prediction
best_theta_model_final.fit(ticker_series)
prediction = best_theta_model_final.predict(int(n_predict))
precision = mape(
actual_series=ticker_series, pred_series=historical_fcast_theta
) # mape = mean average precision error
console.print(f"Theta Model obtains MAPE: {precision:.2f}% \n")
return (
ticker_series,
historical_fcast_theta,
prediction,
precision,
best_theta,
best_theta_model,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/theta_model.py | 0.915212 | 0.440951 | theta_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import tft_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_tft_forecast(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
dataset_name: str = "",
n_predict: int = 5,
past_covariates: str = None,
train_split: float = 0.85,
forecast_horizon: int = 5,
input_chunk_length: int = 14,
output_chunk_length: int = 5,
hidden_size: int = 16,
lstm_layers: int = 1,
num_attention_heads: int = 4,
full_attention: bool = False,
dropout: float = 0.1,
hidden_continuous_size: int = 8,
n_epochs: int = 200,
batch_size: int = 32,
model_save_name: str = "tft_model",
force_reset: bool = True,
save_checkpoints: bool = True,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display Temporal Fusion Transformer forecast
Parameters
----------
data (Union[pd.Series, pd.DataFrame]):
Input Data
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
dataset_name str
The name of the ticker to be predicted
n_predict (int, optional):
Days to predict. Defaults to 5.
train_split (float, optional):
Train/val split. Defaults to 0.85.
past_covariates (str, optional):
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon (int, optional):
Forecast horizon when performing historical forecasting. Defaults to 5.
input_chunk_length (int, optional):
Number of past time steps that are fed to the forecasting module at prediction time.
Defaults to 14.
output_chunk_length (int, optional):
The length of the forecast of the model. Defaults to 5.
hidden_size (int, optional):
Hidden state size of the TFT. Defaults to 16.
lstm_layers (int, optional):
Number of layers for the Long Short Term Memory Encoder and Decoder. Defaults to 16.
num_attention_headers (int, optional):
Number of attention heads. Defaults to 4.
full_attention (bool, optional):
Whether to apply a multi-head attention query. Defaults to False>
dropout (float, optional):
Fraction of neurons affected by dropout. Defaults to 0.1.
hidden_continuous_size (int, optional):
Default hidden size for processing continuous variables. Defaults to 8.
n_epochs (int, optional):
Number of epochs to run during training. Defaults to 200.
batch_size (int, optional):
Number of samples to pass through network during a single epoch. Defaults to 32.
model_save_name (str, optional):
The name for the model. Defaults to tft_model
force_reset (bool, optional):
If set to True, any previously-existing model with the same name will be reset
(all checkpoints will be discarded). Defaults to True.
save_checkpoints (bool, optional):
Whether or not to automatically save the untrained model and checkpoints from training.
Defaults to True.
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes:Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(
data, start_date, end_date, target_column, past_covariates
)
if not helpers.check_data(data, target_column, past_covariates):
return
output_chunk_length = helpers.check_output(
output_chunk_length, n_predict, bool(past_covariates)
)
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = tft_model.get_tft_data(
data=data,
n_predict=n_predict,
target_column=target_column,
past_covariates=past_covariates,
train_split=train_split,
forecast_horizon=forecast_horizon,
input_chunk_length=input_chunk_length,
output_chunk_length=output_chunk_length,
hidden_size=hidden_size,
lstm_layers=lstm_layers,
num_attention_heads=num_attention_heads,
full_attention=full_attention,
dropout=dropout,
hidden_continuous_size=hidden_continuous_size,
n_epochs=n_epochs,
batch_size=batch_size,
model_save_name=model_save_name,
force_reset=force_reset,
save_checkpoints=save_checkpoints,
)
if ticker_series == []:
return
probabilistic = True
helpers.plot_forecast(
name="TFT",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=past_covariates,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, past_covariates, ticker_series, forecast_horizon=forecast_horizon
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/tft_view.py | 0.939955 | 0.38318 | tft_view.py | pypi |
"""Temporal Fusion Transformer Model"""
__docformat__ = "numpy"
import logging
from typing import Tuple, Union, List, Optional
import warnings
from statsmodels.tools.sm_exceptions import ConvergenceWarning
import pandas as pd
from darts import TimeSeries
from darts.models import TFTModel
from darts.utils.likelihood_models import QuantileRegression
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
from openbb_terminal.core.config.paths import USER_FORECAST_MODELS_DIRECTORY
warnings.simplefilter("ignore", ConvergenceWarning)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_tft_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
n_predict: int = 5,
past_covariates: str = None,
train_split: float = 0.85,
forecast_horizon: int = 5,
input_chunk_length: int = 14,
output_chunk_length: int = 5,
hidden_size: int = 16,
lstm_layers: int = 1,
num_attention_heads: int = 4,
full_attention: bool = False,
dropout: float = 0.1,
hidden_continuous_size: int = 8,
n_epochs: int = 200,
batch_size: int = 32,
model_save_name: str = "tft_model",
force_reset: bool = True,
save_checkpoints: bool = True,
) -> Tuple[
Optional[List[TimeSeries]],
Optional[List[TimeSeries]],
Optional[List[TimeSeries]],
Optional[float],
Optional[type[TFTModel]],
]:
"""Performs Temporal Fusion Transformer forecasting
The TFT applies multi-head attention queries on future inputs from mandatory future_covariates.
Specifying future encoders with add_encoders (read below) can automatically generate future
covariates and allows to use the model without having to pass any future_covariates to fit()
and predict().
https://unit8co.github.io/darts/generated_api/darts.models.forecasting.tft_model.html
Parameters
----------
data (Union[pd.Series, pd.DataFrame]):
Input Data
target_column: Optional[str]
Target column to forecast. Defaults to "close".
n_predict: (int, optional)
Days to predict. Defaults to 5.
train_split: (float, optional)
Train/val split. Defaults to 0.85.
past_covariates: (str, optional)
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon: (int, optional)
Forecast horizon when performing historical forecasting. Defaults to 5.
input_chunk_length: (int, optional)
Number of past time steps that are fed to the forecasting module at prediction time.
Defaults to 14.
output_chunk_length: (int, optional)
The length of the forecast of the model. Defaults to 5.
hidden_size: (int, optional)
Hidden state size of the TFT. Defaults to 16.
lstm_layers: (int, optional)
Number of layers for the Long Short Term Memory Encoder and Decoder. Defaults to 16.
num_attention_headers: (int, optional)
Number of attention heads. Defaults to 4.
full_attention: (bool, optional)
Whether to apply a multi-head attention query. Defaults to False>
dropout: (float, optional)
Fraction of neurons affected by dropout. Defaults to 0.1.
hidden_continuous_size: (int, optional)
Default hidden size for processing continuous variables. Defaults to 8.
n_epochs: (int, optional)
Number of epochs to run during training. Defaults to 200.
batch_size: (int, optional)
Number of samples to pass through network during a single epoch. Defaults to 32.
model_save_name: (str, optional)
The name for the model. Defaults to tft_model
force_reset: (bool, optional)
If set to True, any previously-existing model with the same name will be reset
(all checkpoints will be discarded). Defaults to True.
save_checkpoints: (bool, optional)
Whether or not to automatically save the untrained model and checkpoints from training.
Defaults to True.
Returns
-------
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries], Optional[float], type[TFTModel]]:
Adjusted Data series,
List of historical fcast values,
List of predicted fcast values,
Optional[float] - precision,
Fit Prob. TFT model object.
"""
use_scalers = True
probabilistic = True
scaler, ticker_series = helpers.get_series(
data, target_column, is_scaler=use_scalers
)
train, val = ticker_series.split_before(train_split)
valid = helpers.check_data_length(
train, val, input_chunk_length, output_chunk_length
)
if not valid:
return [], [], [], None, None
(
past_covariate_whole,
past_covariate_train,
past_covariate_val,
) = helpers.past_covs(past_covariates, data, train_split, use_scalers)
quantiles = [
0.01,
0.05,
0.1,
0.15,
0.2,
0.25,
0.3,
0.4,
0.5,
0.6,
0.7,
0.75,
0.8,
0.85,
0.9,
0.95,
0.99,
]
tft_model = TFTModel(
input_chunk_length=input_chunk_length,
output_chunk_length=output_chunk_length,
hidden_size=hidden_size,
lstm_layers=lstm_layers,
num_attention_heads=num_attention_heads,
full_attention=full_attention,
dropout=dropout,
hidden_continuous_size=hidden_continuous_size,
model_name=model_save_name,
force_reset=force_reset,
save_checkpoints=save_checkpoints,
random_state=42,
n_epochs=n_epochs,
batch_size=batch_size,
pl_trainer_kwargs=helpers.get_pl_kwargs(accelerator="cpu"),
likelihood=QuantileRegression(
quantiles=quantiles
), # QuantileRegression is set per default
add_relative_index=True, # TODO There is a bug with this. Must fix. Should be false
log_tensorboard=True,
work_dir=USER_FORECAST_MODELS_DIRECTORY,
)
# fit model on train series for historical forecasting
with warnings.catch_warnings():
warnings.simplefilter("ignore")
helpers.fit_model(
tft_model,
train,
val,
past_covariate_train,
past_covariate_val,
)
best_model = TFTModel.load_from_checkpoint(
model_name=model_save_name, best=True, work_dir=USER_FORECAST_MODELS_DIRECTORY
)
helpers.print_tensorboard_logs(model_save_name, USER_FORECAST_MODELS_DIRECTORY)
# Showing historical backtesting without retraining model (too slow)
return helpers.get_prediction(
"TFT",
probabilistic,
use_scalers,
scaler,
past_covariates,
best_model,
ticker_series,
past_covariate_whole,
train_split,
forecast_horizon,
n_predict,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/tft_model.py | 0.92756 | 0.538559 | tft_model.py | pypi |
"""Nhits Model"""
__docformat__ = "numpy"
import logging
import warnings
from typing import Union, Optional, List, Tuple
import pandas as pd
from darts import TimeSeries
from darts.models.forecasting.nhits import NHiTSModel
from darts.utils.likelihood_models import GaussianLikelihood
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
from openbb_terminal.core.config.paths import USER_FORECAST_MODELS_DIRECTORY
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_nhits_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
n_predict: int = 5,
train_split: float = 0.85,
past_covariates: str = None,
forecast_horizon: int = 5,
input_chunk_length: int = 14,
output_chunk_length: int = 5,
num_stacks: int = 3,
num_blocks: int = 1,
num_layers: int = 2,
layer_widths: int = 512,
pooling_kernel_sizes: Optional[Tuple[Tuple[int]]] = None,
n_freq_downsample: Optional[Tuple[Tuple[int]]] = None,
dropout: float = 0.1,
activation: str = "ReLU",
max_pool_1d: bool = True,
batch_size: int = 32,
n_epochs: int = 100,
learning_rate: float = 1e-3,
model_save_name: str = "nhits_model",
force_reset: bool = True,
save_checkpoints: bool = True,
) -> Tuple[
Optional[List[TimeSeries]],
Optional[List[TimeSeries]],
Optional[List[TimeSeries]],
Optional[float],
Optional[type[NHiTSModel]],
]:
"""Performs Nhits forecasting
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
target_column: str
Target column to forecast. Defaults to "close".
n_predict: int
Days to predict. Defaults to 5.
train_split: float
Train/val split. Defaults to 0.85.
past_covariates: str
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
input_chunk_length: int
Number of past time steps that are fed to the forecasting module at prediction time. Defaults to 14.
output_chunk_length: int
The length of the forecast of the model. Defaults to 5.
num_stacks: int
The number of stacks that make up the whole model.
num_blocks: int
The number of blocks making up every stack.
num_layers: int
The number of fully connected layers preceding the final forking layers in each block
of every stack.
layer_widths: int
Determines the number of neurons that make up each fully connected layer in each
block of every stack. If a list is passed, it must have a length equal to num_stacks
and every entry in that list corresponds to the layer width of the corresponding stack.
If an integer is passed, every stack will have blocks with FC layers of the same width.
pooling_kernel_size: Optional[Tuple[Tuple[int]]]:
If set, this parameter must be a tuple of tuples, of size (num_stacks x num_blocks),
specifying the kernel size for each block in each stack used for the input pooling
layer. If left to None, some default values will be used based on input_chunk_length.
n_freq_downsample: Optional[Tuple[Tuple[int]]]
If set, this parameter must be a tuple of tuples, of size (num_stacks x num_blocks),
specifying the downsampling factors before interpolation, for each block in each stack.
If left to None, some default values will be used based on output_chunk_length.
dropout: float
The dropout probability to be used in fully connected layers.
activation: str
Supported activations: [‘ReLU’,’RReLU’, ‘PReLU’, ‘Softplus’, ‘Tanh’, ‘SELU’, ‘LeakyReLU’, ‘Sigmoid’]
max_pool_1d: bool
Use max_pool_1d pooling. False uses AvgPool1d.
batch_size: int
Number of time series (input and output sequences) used in each training pass. Defaults to 32.
n_epochs: int
Number of epochs over which to train the model. Defaults to 100.
learning_rate: float
Defaults to 1e-3.
model_save_name: str
Name for model. Defaults to "brnn_model".
force_reset: bool
If set to True, any previously-existing model with the same name will be reset (all checkpoints will be
discarded). Defaults to True.
save_checkpoints: bool
Whether or not to automatically save the untrained model and checkpoints from training. Defaults to True.
Returns
-------
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries], Optional[float], type[NHiTSModel]]
Adjusted Data series,
Historical forecast by best RNN model,
list of Predictions,
Mean average precision error,
Best BRNN Model.
"""
# TODO Check if torch GPU AVAILABLE
use_scalers = True
probabilistic = False
scaler, ticker_series = helpers.get_series(
data, target_column, is_scaler=use_scalers
)
train, val = ticker_series.split_before(train_split)
valid = helpers.check_data_length(
train, val, input_chunk_length, output_chunk_length
)
if not valid:
return [], [], [], None, None
(
past_covariate_whole,
past_covariate_train,
past_covariate_val,
) = helpers.past_covs(past_covariates, data, train_split, use_scalers)
# Early Stopping
nhits_model = NHiTSModel(
input_chunk_length=input_chunk_length,
output_chunk_length=output_chunk_length,
num_stacks=num_stacks,
num_blocks=num_blocks,
num_layers=num_layers,
layer_widths=layer_widths,
pooling_kernel_sizes=pooling_kernel_sizes,
n_freq_downsample=n_freq_downsample,
dropout=dropout,
activation=activation,
MaxPool1d=max_pool_1d,
n_epochs=n_epochs,
batch_size=batch_size,
optimizer_kwargs={"lr": learning_rate},
model_name=model_save_name,
random_state=42,
pl_trainer_kwargs=helpers.get_pl_kwargs(accelerator="cpu"),
force_reset=force_reset,
save_checkpoints=save_checkpoints,
likelihood=GaussianLikelihood(),
log_tensorboard=True,
work_dir=USER_FORECAST_MODELS_DIRECTORY,
)
# fit model on train series for historical forecasting
with warnings.catch_warnings():
warnings.simplefilter("ignore")
helpers.fit_model(
nhits_model,
train,
val,
past_covariate_train,
past_covariate_val,
)
best_model = NHiTSModel.load_from_checkpoint(
model_name=model_save_name, best=True, work_dir=USER_FORECAST_MODELS_DIRECTORY
)
helpers.print_tensorboard_logs(model_save_name, USER_FORECAST_MODELS_DIRECTORY)
# Showing historical backtesting without retraining model (too slow)
return helpers.get_prediction(
"NHITS",
probabilistic,
use_scalers,
scaler,
past_covariates,
best_model,
ticker_series,
past_covariate_whole,
train_split,
forecast_horizon,
n_predict,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/nhits_model.py | 0.909697 | 0.535584 | nhits_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import brnn_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_brnn_forecast(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
dataset_name: str = "",
n_predict: int = 5,
past_covariates: str = None,
train_split: float = 0.85,
forecast_horizon: int = 5,
input_chunk_length: int = 14,
output_chunk_length: int = 5,
model_type: str = "LSTM",
n_rnn_layers: int = 1,
dropout: float = 0.0,
batch_size: int = 32,
n_epochs: int = 100,
learning_rate: float = 1e-3,
model_save_name: str = "brnn_model",
force_reset: bool = True,
save_checkpoints: bool = True,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display BRNN forecast
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
target_column: str
Target column to forecast. Defaults to "close".
dataset_name: str
The name of the ticker to be predicted
n_predict: int
Days to predict. Defaults to 5.
train_split: float
Train/val split. Defaults to 0.85.
past_covariates: str
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
input_chunk_length: int
Number of past time steps that are fed to the forecasting module at prediction time. Defaults to 14.
output_chunk_length: int
The length of the forecast of the model. Defaults to 5.
model_type: str
Either a string specifying the RNN module type ("RNN", "LSTM" or "GRU"). Defaults to "LSTM".
n_rnn_layers: int
Number of layers in the RNN module. Defaults to 1.
dropout: float
Fraction of neurons affected by Dropout. Defaults to 0.0.
batch_size: int
Number of time series (input and output sequences) used in each training pass. Defaults to 32.
n_epochs: int
Number of epochs over which to train the model. Defaults to 101.
learning_rate: float
Defaults to 1e-3.
model_save_name: str
Name for model. Defaults to "brnn_model".
force_reset: bool
If set to True, any previously-existing model with the same name will be reset
(all checkpoints will be discarded). Defaults to True.
save_checkpoints: bool
Whether or not to automatically save the untrained model and checkpoints from training.
Defaults to True.
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes: Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(
data, start_date, end_date, target_column, past_covariates
)
if not helpers.check_data(data, target_column, past_covariates):
return
output_chunk_length = helpers.check_output(
output_chunk_length, n_predict, bool(past_covariates)
)
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = brnn_model.get_brnn_data(
data=data,
n_predict=n_predict,
target_column=target_column,
past_covariates=past_covariates,
train_split=train_split,
forecast_horizon=forecast_horizon,
input_chunk_length=input_chunk_length,
output_chunk_length=output_chunk_length,
model_type=model_type,
n_rnn_layers=n_rnn_layers,
dropout=dropout,
batch_size=batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
model_save_name=model_save_name,
force_reset=force_reset,
save_checkpoints=save_checkpoints,
)
if ticker_series == []:
return
probabilistic = False
helpers.plot_forecast(
name="BRNN",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=past_covariates,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, past_covariates, ticker_series, forecast_horizon=forecast_horizon
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/brnn_view.py | 0.936059 | 0.388009 | brnn_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import rwd_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_rwd_forecast(
data: Union[pd.DataFrame, pd.Series],
target_column: str = "close",
dataset_name: str = "",
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display Random Walk with Drift Model
Parameters
----------
data : Union[pd.Series, np.array]
Data to forecast
dataset_name: str
The name of the ticker to be predicted
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes:Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(data, start_date, end_date, target_column, None)
if not helpers.check_data(data, target_column, None):
return
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = rwd_model.get_rwd_data(
data=data,
target_column=target_column,
n_predict=n_predict,
start_window=start_window,
forecast_horizon=forecast_horizon,
)
if ticker_series == []:
return
probabilistic = False
helpers.plot_forecast(
name="RWD",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=None,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, None, ticker_series, forecast_horizon=forecast_horizon
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/rwd_view.py | 0.937826 | 0.38445 | rwd_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Dict, Optional, List
import matplotlib.pyplot as plt
import pandas as pd
from pandas.plotting import register_matplotlib_converters
import seaborn as sns
from darts.utils.statistics import plot_acf
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
)
from openbb_terminal.helper_funcs import (
print_rich_table,
)
from openbb_terminal.rich_config import console
from openbb_terminal.forecast import forecast_model
from openbb_terminal.config_terminal import theme
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def show_options(
datasets: Dict[str, pd.DataFrame],
dataset_name: str = None,
export: str = "",
):
"""Plot custom data
Parameters
----------
datasets: dict
The loaded in datasets
dataset_name: str
The name of the dataset you wish to show options for
export: str
Format to export image
"""
if not datasets:
console.print(
"Please load in a dataset by using the 'load' command before using this feature."
)
else:
option_tables = forecast_model.get_options(datasets, dataset_name)
for dataset, data_values in option_tables.items():
print_rich_table(
data_values,
headers=list(data_values.columns),
show_index=False,
title=f"Options for dataset: '{dataset}'",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
f"{dataset}_options",
data_values.set_index("column"),
)
@log_start_end(log=logger)
def display_plot(
data: pd.DataFrame,
columns: List[str],
export: str = "",
external_axes: Optional[List[plt.axes]] = None,
):
"""Plot data from a dataset
Parameters
----------
data: pd.DataFrame
The dataframe to plot
columns: List[str]
The columns to show
export: str
Format to export image
external_axes:Optional[List[plt.axes]]
External axes to plot on
"""
# Check that there's at least a valid dataframe
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
# Only do if data is not plotted, otherwise an error will occur
if "date" in data.columns and "date" not in columns:
data = data.set_index("date")
for column in columns:
ax.plot(data[column], label=column)
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
ax.legend()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"plot",
)
@log_start_end(log=logger)
def display_seasonality(
data: pd.DataFrame,
column: str = "close",
export: str = "",
m: Optional[int] = None,
max_lag: int = 24,
alpha: float = 0.05,
external_axes: Optional[List[plt.axes]] = None,
):
"""Plot seasonality from a dataset
Parameters
----------
data: pd.DataFrame
The dataframe to plot
column: str
The column of the dataframe to analyze
export: str
Format to export image
m: Optional[int]
Optionally, a time lag to highlight on the plot. Default is none.
max_lag: int
The maximal lag order to consider. Default is 24.
alpha: float
The confidence interval to display. Default is 0.05.
external_axes:Optional[List[plt.axes]]
External axes to plot on
"""
if not data.empty:
_, series = helpers.get_series(data, column)
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
# TODO: Add darts check_seasonality here
plot_acf(
series, m=m, max_lag=max_lag, alpha=alpha, axis=ax, default_formatting=False
)
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "plot")
@log_start_end(log=logger)
def display_corr(
dataset: pd.DataFrame,
export: str = "",
external_axes: Optional[List[plt.axes]] = None,
):
"""Plot correlation coefficients for dataset features
Parameters
----------
dataset : pd.DataFrame
The dataset fore calculating correlation coefficients
export: str
Format to export image
external_axes:Optional[List[plt.axes]]
External axes to plot on
"""
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
# correlation
correlation = forecast_model.corr_df(dataset)
sns.heatmap(
correlation,
vmin=correlation.values.min(),
vmax=1,
square=True,
linewidths=0.1,
annot=True,
annot_kws={"size": 8},
cbar_kws=dict(use_gridspec=True, location="left"),
)
ax.set_title("Correlation Matrix")
for t in ax.get_yticklabels():
t.set_fontsize(7)
t.set_fontweight("bold")
t.set_horizontalalignment("left")
for t in ax.get_xticklabels():
t.set_fontsize(7)
t.set_fontweight("bold")
t.set_rotation(60)
t.set_horizontalalignment("right")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "plot")
@log_start_end(log=logger)
def show_df(
data: pd.DataFrame,
limit: int = 15,
limit_col: int = 10,
name: str = "",
export: str = "",
):
console.print(
f"[green]{name} dataset has shape (row, column): {data.shape}\n[/green]"
)
if len(data.columns) > limit_col:
console.print(
f"[red]Dataframe has more than {limit_col} columns."
" If you have extra screen space, consider increasing the `limit_col`,"
" else export to see all of the data.[/red]\n"
)
data = data.iloc[:, :limit_col]
print_rich_table(
data.head(limit),
headers=list(data.columns),
show_index=True,
title=f"Dataset {name} | Showing {limit} of {len(data)} rows",
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), f"{name}_show", data
)
@log_start_end(log=logger)
def describe_df(data: pd.DataFrame, name: str = "", export: str = ""):
new_df = forecast_model.describe_df(data)
print_rich_table(
new_df,
headers=list(data.describe().columns),
show_index=True,
title=f"Showing Descriptive Statistics for Dataset {name}",
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), f"{name}_show")
@log_start_end(log=logger)
def export_df(data: pd.DataFrame, export: str, name: str = "") -> None:
export_data(
export, os.path.dirname(os.path.abspath(__file__)), f"{name}_show", data
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/forecast_view.py | 0.834576 | 0.365089 | forecast_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import seasonalnaive_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_seasonalnaive_forecast(
data: Union[pd.DataFrame, pd.Series],
target_column: str = "close",
dataset_name: str = "",
seasonal_periods: int = 7,
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display SeasonalNaive Model
Parameters
----------
data : Union[pd.Series, np.array]
Data to forecast
dataset_name: str
The name of the ticker to be predicted
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
seasonal_periods: int
Number of seasonal periods in a year
If not set, inferred from frequency of the series.
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes:Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(data, start_date, end_date, target_column, None)
if not helpers.check_data(data, target_column, None):
return
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = seasonalnaive_model.get_seasonalnaive_data(
data=data,
target_column=target_column,
seasonal_periods=seasonal_periods,
n_predict=n_predict,
start_window=start_window,
forecast_horizon=forecast_horizon,
)
if ticker_series == []:
return
probabilistic = False
helpers.plot_forecast(
name="SeasonalNaive",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=None,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, None, ticker_series, forecast_horizon=forecast_horizon
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/seasonalnaive_view.py | 0.94696 | 0.420481 | seasonalnaive_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, List, Optional
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import linregr_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_linear_regression(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
dataset_name: str = "",
n_predict: int = 5,
past_covariates: str = None,
train_split: float = 0.85,
forecast_horizon: int = 5,
output_chunk_length: int = 5,
lags: Union[int, List[int]] = 14,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
explainability_raw: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display Linear Regression Forecasting
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
dataset_name: str
The name of the ticker to be predicted
n_predict: int
Days to predict. Defaults to 5.
target_col: str
Target column to forecast. Defaults to "close".
train_split: (float, optional)
Train/val split. Defaults to 0.85.
past_covariates: str
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
output_chunk_length: int
The length of the forecast of the model. Defaults to 1.
lags: Union[int, List[int]]
lagged target values to predict the next time step
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the
same as the previous day's closing price. Defaults to False.
external_axes: Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(
data, start_date, end_date, target_column, past_covariates
)
if not helpers.check_data(data, target_column, past_covariates):
return
output_chunk_length = helpers.check_output(
output_chunk_length, n_predict, bool(past_covariates)
)
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = linregr_model.get_linear_regression_data(
data=data,
n_predict=n_predict,
target_column=target_column,
past_covariates=past_covariates,
train_split=train_split,
forecast_horizon=forecast_horizon,
output_chunk_length=output_chunk_length,
lags=lags,
)
probabilistic = True
helpers.plot_forecast(
name="LINREGR",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=past_covariates,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, past_covariates, ticker_series, forecast_horizon=forecast_horizon
)
# SHAP
helpers.plot_explainability(_model, explainability_raw) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/linregr_view.py | 0.930387 | 0.425486 | linregr_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import trans_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_trans_forecast(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
dataset_name: str = "",
n_predict: int = 5,
past_covariates: str = None,
train_split: float = 0.85,
forecast_horizon: int = 5,
input_chunk_length: int = 14,
output_chunk_length: int = 5,
d_model: int = 64,
nhead: int = 4,
num_encoder_layers: int = 3,
num_decoder_layers: int = 3,
dim_feedforward: int = 512,
activation: str = "relu",
dropout: float = 0.0,
batch_size: int = 32,
n_epochs: int = 300,
learning_rate: float = 1e-3,
model_save_name: str = "trans_model",
force_reset: bool = True,
save_checkpoints: bool = True,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display Transformer forecast
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
dataset_name: str
The name of the ticker to be predicted
n_predict: int
Days to predict. Defaults to 5.
target_column: str
Target column to forecast. Defaults to "close".
train_split: float
Train/val split. Defaults to 0.85.
past_covariates: str
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
input_chunk_length: int
Number of past time steps that are fed to the forecasting module at prediction time. Defaults to 14.
output_chunk_length: int
The length of the forecast of the model. Defaults to 5.
d_model: int
The number of expected features in the encoder/decoder inputs. Defaults to 64.
nhead: int
The number of heads in the multi-head attention mechanism. Defaults to 4.
num_encoder_layers: int
The number of encoder layers in the encoder. Defaults to 3.
num_decoder_layers: int
The number of decoder layers in the encoder. Defaults to 3.
dim_feedforward: int
The dimension of the feedforward network model. Defaults to 512.
activation: str
The activation function of encoder/decoder intermediate layer, ‘relu’ or ‘gelu’. Defaults to 'relu'.
dropout: float
Fraction of neurons affected by Dropout. Defaults to 0.1.
batch_size_: int
Number of time series (input and output sequences) used in each training pass. Defaults to 32.
n_epochs: int
Number of epochs over which to train the model. Defaults to 100.
model_save_name: str
Name for model. Defaults to "brnn_model".
force_reset: bool
If set to True, any previously-existing model with the same name will be reset
(all checkpoints will be discarded). Defaults to True.
save_checkpoints: bool
Whether or not to automatically save the untrained model and checkpoints from training.
Defaults to True.
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes: Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(
data, start_date, end_date, target_column, past_covariates
)
if not helpers.check_data(data, target_column, past_covariates):
return
output_chunk_length = helpers.check_output(
output_chunk_length, n_predict, bool(past_covariates)
)
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = trans_model.get_trans_data(
data=data,
n_predict=n_predict,
target_column=target_column,
past_covariates=past_covariates,
train_split=train_split,
forecast_horizon=forecast_horizon,
input_chunk_length=input_chunk_length,
output_chunk_length=output_chunk_length,
d_model=d_model,
nhead=nhead,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
dim_feedforward=dim_feedforward,
activation=activation,
dropout=dropout,
batch_size=batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
model_save_name=model_save_name,
force_reset=force_reset,
save_checkpoints=save_checkpoints,
)
if ticker_series == []:
return
probabilistic = False
helpers.plot_forecast(
name="TRANS",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=past_covariates,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, past_covariates, ticker_series, forecast_horizon=forecast_horizon
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/trans_view.py | 0.927232 | 0.437103 | trans_view.py | pypi |
"""Regression Model"""
__docformat__ = "numpy"
import logging
from typing import Tuple, Union, List
import warnings
import pandas as pd
from darts import TimeSeries
from darts.models import RegressionModel
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_regression_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
n_predict: int = 5,
past_covariates: str = None,
train_split: float = 0.85,
forecast_horizon: int = 5,
output_chunk_length: int = 5,
lags: Union[int, List[int]] = 14,
) -> Tuple[
List[TimeSeries], List[TimeSeries], List[TimeSeries], float, type[RegressionModel]
]:
"""Perform Regression Forecasting
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
n_predict: int
Days to predict. Defaults to 5.
target_column: str
Target column to forecast. Defaults to "close".
train_split: float
Train/val split. Defaults to 0.85.
past_covariates: str
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
output_chunk_length: int
The length of the forecast of the model. Defaults to 1.
lags: Union[int, List[int]]
lagged target values to predict the next time step
Returns
-------
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries], float, type[RegressionModel]]
Adjusted Data series,
Historical forecast by best RNN model,
list of Predictions,
Mean average precision error,
Best Regression Model.
"""
use_scalers = False
probabilistic = False
scaler, ticker_series = helpers.get_series(
data, target_column, is_scaler=use_scalers
)
past_covariate_whole, _, _ = helpers.past_covs(
past_covariates, data, train_split, use_scalers
)
if past_covariates is not None:
lags_past_covariates = lags
else:
lags_past_covariates = None
reg_model = RegressionModel(
output_chunk_length=output_chunk_length,
lags=lags,
lags_past_covariates=lags_past_covariates,
)
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=FutureWarning)
reg_model.fit(ticker_series, past_covariate_whole)
# Showing historical backtesting without retraining model (too slow)
return helpers.get_prediction(
"Regression",
probabilistic,
use_scalers,
scaler,
past_covariates,
reg_model,
ticker_series,
past_covariate_whole,
train_split,
forecast_horizon,
n_predict,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/regr_model.py | 0.948656 | 0.477554 | regr_model.py | pypi |
"""Linear Regression Model"""
__docformat__ = "numpy"
import logging
from typing import Tuple, Union, List, Optional
import warnings
import pandas as pd
from darts import TimeSeries
from darts.models import LinearRegressionModel
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_linear_regression_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
n_predict: int = 5,
past_covariates: str = None,
train_split: float = 0.85,
forecast_horizon: int = 5,
output_chunk_length: int = 5,
lags: Union[int, List[int]] = 14,
random_state: Optional[int] = None,
) -> Tuple[
List[TimeSeries],
List[TimeSeries],
List[TimeSeries],
float,
LinearRegressionModel,
]:
"""Perform Linear Regression Forecasting
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
target_column: str
Target column to forecast. Defaults to "close".
n_predict: int
Days to predict. Defaults to 5.
train_split: float
Train/val split. Defaults to 0.85.
past_covariates: str
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
output_chunk_length: int
The length of the forecast of the model. Defaults to 1.
lags: Union[int, List[int]]
lagged target values to predict the next time step
random_state: Optional[int]
The state for the model
Returns
-------
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries], float, LinearRegressionModel]
Adjusted Data series,
Historical forecast by best RNN model,
list of Predictions,
Mean average precision error,
Best Linear Regression Model.
"""
use_scalers = False
probabilistic = True
scaler, ticker_series = helpers.get_series(
data, target_column, is_scaler=use_scalers
)
past_covariate_whole, _, _ = helpers.past_covs(
past_covariates, data, train_split, use_scalers
)
if past_covariates is not None:
lags_past_covariates = lags
else:
lags_past_covariates = None
lin_reg_model = LinearRegressionModel(
output_chunk_length=output_chunk_length,
lags=lags,
lags_past_covariates=lags_past_covariates,
likelihood="quantile",
quantiles=[0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95],
random_state=random_state,
)
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=FutureWarning)
if past_covariates is not None:
lin_reg_model.fit(
series=ticker_series, past_covariates=past_covariate_whole
)
else:
lin_reg_model.fit(series=ticker_series)
return helpers.get_prediction(
"Logistic Regression",
probabilistic,
use_scalers,
scaler,
past_covariates,
lin_reg_model,
ticker_series,
past_covariate_whole,
train_split,
forecast_horizon,
n_predict,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/linregr_model.py | 0.961316 | 0.540014 | linregr_model.py | pypi |
"""Seasonal Naive Model"""
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List, Tuple
import warnings
import pandas as pd
from darts import TimeSeries
from statsforecast.models import SeasonalNaive
from statsforecast.core import StatsForecast
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.forecast import helpers
warnings.simplefilter("ignore")
logger = logging.getLogger(__name__)
# pylint: disable=E1123
@log_start_end(log=logger)
def get_seasonalnaive_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
seasonal_periods: int = 7,
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
) -> Tuple[
Optional[List[type[TimeSeries]]],
Optional[List[type[TimeSeries]]],
Optional[List[type[TimeSeries]]],
Optional[float],
Optional[type[StatsForecast]],
]:
"""Performs Seasonal Naive forecasting
This is a wrapper around StatsForecast Seasonal Naive;
we refer to this link for the original and more complete documentation of the parameters.
https://nixtla.github.io/statsforecast/models.html#seasonalnaive
Parameters
----------
data : Union[pd.Series, np.ndarray]
Input data.
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
seasonal_periods: int
Number of seasonal periods in a year (7 for daily data)
If not set, inferred from frequency of the series.
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
Returns
-------
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries], Optional[float], StatsForecast]
Adjusted Data series,
List of historical fcast values,
List of predicted fcast values,
Optional[float] - precision,
Fit SeasonalNaive model object.
"""
use_scalers = False
# statsforecast preprocessing
# when including more time series
# the preprocessing is similar
_, ticker_series = helpers.get_series(data, target_column, is_scaler=use_scalers)
freq = ticker_series.freq_str
ticker_series = ticker_series.pd_dataframe().reset_index()
ticker_series.columns = ["ds", "y"]
ticker_series.insert(0, "unique_id", target_column)
try:
# Model Init
model = SeasonalNaive(
season_length=int(seasonal_periods),
)
fcst = StatsForecast(df=ticker_series, models=[model], freq=freq, verbose=True)
except Exception as e: # noqa
error = str(e)
if "got an unexpected keyword argument" in error:
console.print(
"[red]Please update statsforecast to version 1.1.3 or higher.[/red]"
)
else:
console.print(f"[red]{error}[/red]")
return [], [], [], None, None
# Historical backtesting
last_training_point = int((len(ticker_series) - 1) * start_window)
historical_fcast = fcst.cross_validation(
h=int(forecast_horizon),
test_size=len(ticker_series) - last_training_point,
n_windows=None,
input_size=min(10 * forecast_horizon, len(ticker_series)),
)
# train new model on entire timeseries to provide best current forecast
# we have the historical fcast, now lets predict.
forecast = fcst.forecast(int(n_predict))
y_true = historical_fcast["y"].values
y_hat = historical_fcast["SeasonalNaive"].values
precision = helpers.mean_absolute_percentage_error(y_true, y_hat)
console.print(f"SeasonalNaive obtains MAPE: {precision:.2f}% \n")
# transform outputs to make them compatible with
# plots
use_scalers = False
_, ticker_series = helpers.get_series(
ticker_series.rename(columns={"y": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
_, forecast = helpers.get_series(
forecast.rename(columns={"SeasonalNaive": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
_, historical_fcast = helpers.get_series(
historical_fcast.groupby("ds")
.head(1)
.rename(columns={"SeasonalNaive": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
return (
ticker_series,
historical_fcast,
forecast,
float(precision),
fcst,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/seasonalnaive_model.py | 0.883513 | 0.446615 | seasonalnaive_model.py | pypi |
"""RNN Model"""
__docformat__ = "numpy"
import logging
import warnings
from typing import Union, Optional, List, Tuple
import pandas as pd
from darts import TimeSeries
from darts.models import BlockRNNModel
from darts.models.forecasting.torch_forecasting_model import GlobalForecastingModel
from darts.utils.likelihood_models import GaussianLikelihood
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
from openbb_terminal.core.config.paths import USER_FORECAST_MODELS_DIRECTORY
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_brnn_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
n_predict: int = 5,
train_split: float = 0.85,
past_covariates: str = None,
forecast_horizon: int = 5,
input_chunk_length: int = 14,
output_chunk_length: int = 5,
model_type: str = "LSTM",
n_rnn_layers: int = 1,
dropout: float = 0.0,
batch_size: int = 32,
n_epochs: int = 100,
learning_rate: float = 1e-3,
model_save_name: str = "brnn_model",
force_reset: bool = True,
save_checkpoints: bool = True,
) -> Tuple[
Optional[List[TimeSeries]],
Optional[List[TimeSeries]],
Optional[List[TimeSeries]],
Optional[float],
Optional[type[GlobalForecastingModel]],
]:
"""Performs Block RNN forecasting
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
target_column: str
Target column to forecast. Defaults to "close".
n_predict: int
Days to predict. Defaults to 5.
train_split: float
Train/val split. Defaults to 0.85.
past_covariates: str
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
input_chunk_length: int
Number of past time steps that are fed to the forecasting module at prediction time. Defaults to 14.
output_chunk_length: int
The length of the forecast of the model. Defaults to 5.
model_type: str
Either a string specifying the RNN module type ("RNN", "LSTM" or "GRU"). Defaults to "LSTM".
n_rnn_layers: int
Number of layers in the RNN module. Defaults to 1.
dropout: float
Fraction of neurons affected by Dropout. Defaults to 0.0.
batch_size: int
Number of time series (input and output sequences) used in each training pass. Defaults to 32.
n_epochs: int
Number of epochs over which to train the model. Defaults to 100.
learning_rate: float
Defaults to 1e-3.
model_save_name: str
Name for model. Defaults to "brnn_model".
force_reset: bool
If set to True, any previously-existing model with the same name will be reset (all checkpoints will be
discarded). Defaults to True.
save_checkpoints: bool
Whether or not to automatically save the untrained model and checkpoints from training. Defaults to True.
Returns
-------
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries], Optional[Union[float, ndarray]], type[GlobalForecastingModel]] # noqa: E501
Adjusted Data series,
Historical forecast by best RNN model,
list of Predictions,
Mean average precision error,
Best BRNN Model.
"""
# TODO Check if torch GPU AVAILABLE
use_scalers = True
probabilistic = False
scaler, ticker_series = helpers.get_series(
data, target_column, is_scaler=use_scalers
)
train, val = ticker_series.split_before(train_split)
valid = helpers.check_data_length(
train, val, input_chunk_length, output_chunk_length
)
if not valid:
return [], [], [], None, None
(
past_covariate_whole,
past_covariate_train,
past_covariate_val,
) = helpers.past_covs(past_covariates, data, train_split, use_scalers)
# Early Stopping
brnn_model = BlockRNNModel(
input_chunk_length=input_chunk_length,
output_chunk_length=output_chunk_length,
model=model_type,
n_rnn_layers=n_rnn_layers,
dropout=dropout,
batch_size=batch_size,
n_epochs=n_epochs,
optimizer_kwargs={"lr": learning_rate},
model_name=model_save_name,
random_state=42,
pl_trainer_kwargs=helpers.get_pl_kwargs(accelerator="cpu"),
force_reset=force_reset,
save_checkpoints=save_checkpoints,
likelihood=GaussianLikelihood(),
log_tensorboard=True,
work_dir=USER_FORECAST_MODELS_DIRECTORY,
)
# fit model on train series for historical forecasting
with warnings.catch_warnings():
warnings.simplefilter("ignore")
helpers.fit_model(
brnn_model,
train,
val,
past_covariate_train,
past_covariate_val,
)
best_model = BlockRNNModel.load_from_checkpoint(
model_name=model_save_name,
best=True,
work_dir=USER_FORECAST_MODELS_DIRECTORY,
)
helpers.print_tensorboard_logs(model_save_name, USER_FORECAST_MODELS_DIRECTORY)
# Showing historical backtesting without retraining model (too slow)
return helpers.get_prediction(
"Block RNN",
probabilistic,
use_scalers,
scaler,
past_covariates,
best_model,
ticker_series,
past_covariate_whole,
train_split,
forecast_horizon,
n_predict,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/brnn_model.py | 0.937462 | 0.386156 | brnn_model.py | pypi |
"""Multiple Seasonalities and Trend using Loess (MSTL) Model"""
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List, Tuple
import warnings
from darts import TimeSeries
import pandas as pd
from statsforecast.core import StatsForecast
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.forecast import helpers
warnings.simplefilter("ignore")
logger = logging.getLogger(__name__)
# pylint: disable=E1123
@log_start_end(log=logger)
def get_mstl_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
seasonal_periods: int = 7,
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
) -> Tuple[
Optional[List[TimeSeries]],
Optional[List[TimeSeries]],
Optional[List[TimeSeries]],
Optional[float],
Optional[StatsForecast],
]:
"""Performs MSTL forecasting
This is a wrapper around StatsForecast MSTL;
we refer to this link for the original and more complete documentation of the parameters.
https://nixtla.github.io/statsforecast/models.html#mstl
Parameters
----------
data : Union[pd.Series, np.ndarray]
Input data.
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
seasonal_periods: int
Number of seasonal periods in a year (7 for daily data)
If not set, inferred from frequency of the series.
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
Returns
-------
Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], Optional[float], StatsForecast]:
Adjusted Data series,
List of historical fcast values,
List of predicted fcast values,
Optional[float] - precision,
Fit MSTL model object.
"""
use_scalers = False
# statsforecast preprocessing
# when including more time series
# the preprocessing is similar
_, ticker_series = helpers.get_series(data, target_column, is_scaler=use_scalers)
freq = ticker_series.freq_str
ticker_series = ticker_series.pd_dataframe().reset_index()
ticker_series.columns = ["ds", "y"]
ticker_series.insert(0, "unique_id", target_column)
# check MSLT availability
try:
from statsforecast.models import MSTL # pylint: disable=import-outside-toplevel
except Exception as e:
error = str(e)
if "cannot import name" in error:
console.print(
"[red]Please update statsforecast to version 1.2.0 or higher.[/red]"
)
else:
console.print(f"[red]{error}[/red]")
return [], [], [], None, None
try:
# Model Init
model = MSTL(
season_length=int(seasonal_periods),
)
fcst = StatsForecast(df=ticker_series, models=[model], freq=freq, verbose=True)
except Exception as e: # noqa
error = str(e)
if "got an unexpected keyword argument" in error:
console.print(
"[red]Please update statsforecast to version 1.1.3 or higher.[/red]"
)
else:
console.print(f"[red]{error}[/red]")
return [], [], [], None, None
# Historical backtesting
last_training_point = int((len(ticker_series) - 1) * start_window)
historical_fcast = fcst.cross_validation(
h=int(forecast_horizon),
test_size=len(ticker_series) - last_training_point,
n_windows=None,
input_size=min(10 * forecast_horizon, len(ticker_series)),
)
# train new model on entire timeseries to provide best current forecast
# we have the historical fcast, now lets predict.
forecast = fcst.forecast(int(n_predict))
y_true = historical_fcast["y"].values
y_hat = historical_fcast["MSTL"].values
precision = helpers.mean_absolute_percentage_error(y_true, y_hat)
console.print(f"MSTL obtains MAPE: {precision:.2f}% \n")
# transform outputs to make them compatible with
# plots
use_scalers = False
_, ticker_series = helpers.get_series(
ticker_series.rename(columns={"y": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
_, forecast = helpers.get_series(
forecast.rename(columns={"MSTL": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
_, historical_fcast = helpers.get_series(
historical_fcast.groupby("ds").head(1).rename(columns={"MSTL": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
return (
ticker_series,
historical_fcast,
forecast,
float(precision),
fcst,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/mstl_model.py | 0.840095 | 0.537466 | mstl_model.py | pypi |
"""Random Walk with Drift Model"""
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List, Tuple
import warnings
import numpy as np
import pandas as pd
from statsforecast.models import RandomWalkWithDrift
from statsforecast.core import StatsForecast
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.forecast import helpers
warnings.simplefilter("ignore")
logger = logging.getLogger(__name__)
# pylint: disable=E1123
@log_start_end(log=logger)
def get_rwd_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
) -> Tuple[
List[np.ndarray], List[np.ndarray], List[np.ndarray], Optional[float], StatsForecast
]:
"""Performs Random Walk with Drift forecasting
This is a wrapper around StatsForecast RandomWalkWithDrift;
we refer to this link for the original and more complete documentation of the parameters.
https://nixtla.github.io/statsforecast/models.html#randomwalkwithdrift
Parameters
----------
data : Union[pd.Series, np.ndarray]
Input data.
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
Returns
-------
Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], Optional[float], StatsForecast]:
Adjusted Data series,
List of historical fcast values,
List of predicted fcast values,
Optional[float] precision,
Fit RWD model object.
"""
use_scalers = False
# statsforecast preprocessing
# when including more time series
# the preprocessing is similar
_, ticker_series = helpers.get_series(data, target_column, is_scaler=use_scalers)
freq = ticker_series.freq_str
ticker_series = ticker_series.pd_dataframe().reset_index()
ticker_series.columns = ["ds", "y"]
ticker_series.insert(0, "unique_id", target_column)
try:
# Model Init
model = RandomWalkWithDrift()
fcst = StatsForecast(df=ticker_series, models=[model], freq=freq, verbose=True)
except Exception as e: # noqa
error = str(e)
if "got an unexpected keyword argument" in error:
console.print(
"[red]Please update statsforecast to version 1.1.3 or higher.[/red]"
)
else:
console.print(f"[red]{error}[/red]")
return [], [], [], None, None
# Historical backtesting
last_training_point = int((len(ticker_series) - 1) * start_window)
historical_fcast = fcst.cross_validation(
h=int(forecast_horizon),
test_size=len(ticker_series) - last_training_point,
n_windows=None,
input_size=min(10 * forecast_horizon, len(ticker_series)),
)
# train new model on entire timeseries to provide best current forecast
# we have the historical fcast, now lets predict.
forecast = fcst.forecast(int(n_predict))
y_true = historical_fcast["y"].values
y_hat = historical_fcast["RWD"].values
precision = helpers.mean_absolute_percentage_error(y_true, y_hat)
console.print(f"RWD obtains MAPE: {precision:.2f}% \n")
# transform outputs to make them compatible with
# plots
use_scalers = False
_, ticker_series = helpers.get_series(
ticker_series.rename(columns={"y": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
_, forecast = helpers.get_series(
forecast.rename(columns={"RWD": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
_, historical_fcast = helpers.get_series(
historical_fcast.groupby("ds").head(1).rename(columns={"RWD": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
return (
ticker_series,
historical_fcast,
forecast,
float(precision),
fcst,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/rwd_model.py | 0.83868 | 0.474753 | rwd_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, List, Optional
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import regr_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_regression(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
dataset_name: str = "",
n_predict: int = 5,
past_covariates: str = None,
train_split: float = 0.85,
forecast_horizon: int = 5,
output_chunk_length: int = 5,
lags: Union[int, List[int]] = 14,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
explainability_raw: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display Regression Forecasting
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
target_column: str
Target column to forecast. Defaults to "close".
dataset_name: str
The name of the ticker to be predicted
n_predict: int
Days to predict. Defaults to 5.
train_split: float
Train/val split. Defaults to 0.85.
past_covariates: str
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
output_chunk_length: int
The length of the forecast of the model. Defaults to 1.
lags: Union[int, List[int]]
lagged target values to predict the next time step
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes: Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(
data, start_date, end_date, target_column, past_covariates
)
if not helpers.check_data(data, target_column, past_covariates):
return
output_chunk_length = helpers.check_output(
output_chunk_length, n_predict, bool(past_covariates)
)
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = regr_model.get_regression_data(
data=data,
n_predict=n_predict,
target_column=target_column,
past_covariates=past_covariates,
train_split=train_split,
forecast_horizon=forecast_horizon,
output_chunk_length=output_chunk_length,
lags=lags,
)
probabilistic = False
helpers.plot_forecast(
name="REGR",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=past_covariates,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, past_covariates, ticker_series, forecast_horizon=forecast_horizon
)
# SHAP
helpers.plot_explainability(_model, explainability_raw) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/regr_view.py | 0.9255 | 0.414247 | regr_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import rnn_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_rnn_forecast(
data: Union[pd.DataFrame, pd.Series],
target_column: str = "close",
dataset_name: str = "",
n_predict: int = 5,
train_split: float = 0.85,
forecast_horizon: int = 5,
model_type: str = "LSTM",
hidden_dim: int = 20,
dropout: float = 0.0,
batch_size: int = 32,
n_epochs: int = 100,
learning_rate: float = 1e-3,
model_save_name: str = "rnn_model",
training_length: int = 20,
input_chunk_size: int = 14,
force_reset: bool = True,
save_checkpoints: bool = True,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display RNN forecast
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
dataset_name: str
The name of the ticker to be predicted
n_predict: int
Days to predict. Defaults to 5.
target_column: str
Target column to forecast. Defaults to "close".
train_split: float
Train/val split. Defaults to 0.85.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
model_type: str
Either a string specifying the RNN module type ("RNN", "LSTM" or "GRU"). Defaults to "LSTM".
hidden_dim: int
Size for feature maps for each hidden RNN layer.. Defaults to 20.
dropout: float
Fraction of neurons affected by Dropout. Defaults to 0.0.
batch_size: int
Number of time series (input and output sequences) used in each training pass. Defaults to 32.
n_epochs: int
Number of epochs over which to train the model. Defaults to 100.
learning_rate: float
Defaults to 1e-3.
model_save_name: str
Name for model. Defaults to "brnn_model".
force_reset: bool
If set to True, any previously-existing model with the same name will be reset
(all checkpoints will be discarded). Defaults to True.
save_checkpoints: bool
Whether or not to automatically save the untrained model and checkpoints from training. Defaults to True.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes: Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(data, start_date, end_date, target_column, None)
if not helpers.check_data(data, target_column, None):
return
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = rnn_model.get_rnn_data(
data=data,
n_predict=n_predict,
target_column=target_column,
train_split=train_split,
forecast_horizon=forecast_horizon,
model_type=model_type,
hidden_dim=hidden_dim,
dropout=dropout,
batch_size=batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
model_save_name=model_save_name,
training_length=training_length,
input_chunk_size=input_chunk_size,
force_reset=force_reset,
save_checkpoints=save_checkpoints,
)
if ticker_series == []:
return
past_covariates = None
probabilistic = True
helpers.plot_forecast(
name="RNN",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=past_covariates,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, past_covariates, ticker_series, forecast_horizon=forecast_horizon
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/rnn_view.py | 0.93456 | 0.381335 | rnn_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import nbeats_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_nbeats_forecast(
data: Union[pd.DataFrame, pd.Series],
target_column: str = "close",
dataset_name: str = "",
n_predict: int = 5,
past_covariates: str = None,
train_split: float = 0.85,
forecast_horizon: int = 5,
input_chunk_length: int = 14,
output_chunk_length: int = 5,
num_stacks: int = 10,
num_blocks: int = 3,
num_layers: int = 4,
layer_widths: int = 512,
n_epochs: int = 300,
learning_rate: float = 1e-3,
batch_size: int = 800,
model_save_name: str = "nbeats_model",
force_reset: bool = True,
save_checkpoints: bool = True,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display NBEATS forecast
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
target_column: str
Target column to forecast. Defaults to "close".
dataset_name: str
The name of the ticker to be predicted
n_predict: int
Days to predict. Defaults to 5.
train_split: float
Train/val split. Defaults to 0.85.
past_covariates: str
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
input_chunk_length: int
Number of past time steps that are fed to the forecasting module at prediction time. Defaults to 14.
output_chunk_length: int
The length of the forecast of the model. Defaults to 5.
num_stacks: int
The number of stacks that make up the whole model. Defaults to 10.
num_blocks: int
The number of blocks making up every stack. Defaults to 3.
num_layers: int
The number of fully connected layers preceding the final forking layers in each block
of every stack. Defaults to 4.
layer_widths: int
Determines the number of neurons that make up each fully connected layer in each block
of every stack. Defaults to 512.
batch_size: int
Number of time series (input and output sequences) used in each training pass. Defaults
to 32.
n_epochs: int
Number of epochs over which to train the model. Defaults to 100.
learning_rate: float
Defaults to 1e-3.
model_save_name: str
Name for model. Defaults to "brnn_model".
force_reset: bool
If set to True, any previously-existing model with the same name will be reset (all
checkpoints will be discarded). Defaults to True.
save_checkpoints: bool
Whether or not to automatically save the untrained model and checkpoints from training.
Defaults to True.
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes: Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(
data, start_date, end_date, target_column, past_covariates
)
if not helpers.check_data(data, target_column, past_covariates):
return
output_chunk_length = helpers.check_output(
output_chunk_length, n_predict, bool(past_covariates)
)
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = nbeats_model.get_NBEATS_data(
data=data,
n_predict=n_predict,
target_column=target_column,
past_covariates=past_covariates,
train_split=train_split,
forecast_horizon=forecast_horizon,
input_chunk_length=input_chunk_length,
output_chunk_length=output_chunk_length,
num_stacks=num_stacks,
num_blocks=num_blocks,
num_layers=num_layers,
layer_widths=layer_widths,
batch_size=batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
model_save_name=model_save_name,
force_reset=force_reset,
save_checkpoints=save_checkpoints,
)
if ticker_series == []:
return
probabilistic = False
helpers.plot_forecast(
name="NBEATS",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=past_covariates,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, past_covariates, ticker_series, forecast_horizon=forecast_horizon
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/nbeats_view.py | 0.9296 | 0.431165 | nbeats_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import autoces_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_autoces_forecast(
data: Union[pd.DataFrame, pd.Series],
target_column: str = "close",
dataset_name: str = "",
seasonal_periods: int = 7,
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display Automatic Complex Exponential Smoothing Model
Parameters
----------
data : Union[pd.Series, np.array]
Data to forecast
dataset_name str
The name of the ticker to be predicted
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
seasonal_periods: int
Number of seasonal periods in a year
If not set, inferred from frequency of the series.
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes:Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(data, start_date, end_date, target_column, None)
if not helpers.check_data(data, target_column, None):
return
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = autoces_model.get_autoces_data(
data=data,
target_column=target_column,
seasonal_periods=seasonal_periods,
n_predict=n_predict,
start_window=start_window,
forecast_horizon=forecast_horizon,
)
if ticker_series == []:
return
probabilistic = False
helpers.plot_forecast(
name="AutoCES",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=None,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, None, ticker_series, forecast_horizon=forecast_horizon
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/autoces_view.py | 0.946113 | 0.422624 | autoces_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import theta_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_theta_forecast(
data: Union[pd.DataFrame, pd.Series],
target_column: str = "close",
dataset_name: str = "",
seasonal: str = "M",
seasonal_periods: int = 7,
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display Theta forecast
Parameters
----------
data : Union[pd.Series, np.array]
Data to forecast
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
dataset_name: str
The name of the ticker to be predicted
seasonal: str
Seasonal component. One of [N, A, M]
Defaults to MULTIPLICATIVE.
seasonal_periods: int
Number of seasonal periods in a year
If not set, inferred from frequency of the series.
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes:Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(data, start_date, end_date, target_column, None)
if not helpers.check_data(data, target_column, None):
return
(
ticker_series,
historical_fcast,
predicted_values,
precision,
best_theta,
_model,
) = theta_model.get_theta_data(
data=data,
seasonal=seasonal,
seasonal_periods=seasonal_periods,
n_predict=n_predict,
target_column=target_column,
start_window=start_window,
forecast_horizon=forecast_horizon,
)
if ticker_series == []:
return
probabilistic = False
helpers.plot_forecast(
name=f"THETA_{best_theta:.2f}",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=None,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
# TODO: Figure out why residuals do not work with Theta
console.print(
"[red]Theta residual is currently not supported. Please stay tuned![/red]"
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/theta_view.py | 0.893603 | 0.406391 | theta_view.py | pypi |
"""Transformer Model"""
__docformat__ = "numpy"
import logging
import warnings
from typing import Tuple, Union, List, Optional
import pandas as pd
from darts import TimeSeries
from darts.models import TransformerModel
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
from openbb_terminal.core.config.paths import USER_FORECAST_MODELS_DIRECTORY
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_trans_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
n_predict: int = 5,
train_split: float = 0.85,
past_covariates: str = None,
forecast_horizon: int = 5,
input_chunk_length: int = 14,
output_chunk_length: int = 5,
d_model: int = 64,
nhead: int = 4,
num_encoder_layers: int = 3,
num_decoder_layers: int = 3,
dim_feedforward: int = 512,
activation: str = "relu",
dropout: float = 0.0,
batch_size: int = 32,
n_epochs: int = 300,
learning_rate: float = 1e-3,
model_save_name: str = "trans_model",
force_reset: bool = True,
save_checkpoints: bool = True,
) -> Tuple[
Optional[List[TimeSeries]],
Optional[List[TimeSeries]],
Optional[List[TimeSeries]],
Optional[float],
Optional[type[TransformerModel]],
]:
"""Performs Transformer forecasting
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
n_predict: int
Days to predict. Defaults to 5.
target_column: str
Target column to forecast. Defaults to "close".
train_split: float
Train/val split. Defaults to 0.85.
past_covariates: str
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
input_chunk_length: int
Number of past time steps that are fed to the forecasting module at prediction time. Defaults to 14.
output_chunk_length: int
The length of the forecast of the model. Defaults to 5.
d_model: int
The number of expected features in the encoder/decoder inputs. Defaults to 64.
nhead: int
The number of heads in the multi-head attention mechanism. Defaults to 4.
num_encoder_layers: int
The number of encoder layers in the encoder. Defaults to 3.
num_decoder_layers: int
The number of decoder layers in the encoder. Defaults to 3.
dim_feedforward: int
The dimension of the feedforward network model. Defaults to 512.
activation: str
The activation function of encoder/decoder intermediate layer, ‘relu’ or ‘gelu’. Defaults to 'relu'.
dropout: float
Fraction of neurons afected by Dropout. Defaults to 0.0.
batch_size: int
Number of time series (input and output sequences) used in each training pass. Defaults to 32.
n_epochs: int
Number of epochs over which to train the model. Defaults to 100.
learning_rate: float
Defaults to 1e-3.
model_save_name: str
Name for model. Defaults to "brnn_model".
force_reset: bool
If set to True, any previously-existing model with the same name will be reset (all checkpoints will be
discarded). Defaults to True.
save_checkpoints: bool
Whether or not to automatically save the untrained model and checkpoints from training. Defaults to True.
Returns
-------
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries], Optional[float], type[TransformerModel]]
Adjusted Data series,
Historical forecast by best RNN model,
list of Predictions,
Mean average precision error,
Best transformer Model.
"""
# TODO Check if torch GPU AVAILABLE
use_scalers = True
probabilistic = False
scaler, ticker_series = helpers.get_series(
data, target_column, is_scaler=use_scalers
)
train, val = ticker_series.split_before(train_split)
valid = helpers.check_data_length(
train, val, input_chunk_length, output_chunk_length
)
if not valid:
return [], [], [], None, None
(
past_covariate_whole,
past_covariate_train,
past_covariate_val,
) = helpers.past_covs(past_covariates, data, train_split, use_scalers)
trans_model = TransformerModel(
input_chunk_length=input_chunk_length,
output_chunk_length=output_chunk_length,
d_model=d_model,
nhead=nhead,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
dim_feedforward=dim_feedforward,
activation=activation,
dropout=dropout,
batch_size=batch_size,
n_epochs=n_epochs,
optimizer_kwargs={"lr": learning_rate},
model_name=model_save_name,
random_state=42,
pl_trainer_kwargs=helpers.get_pl_kwargs(accelerator="cpu"),
force_reset=force_reset,
save_checkpoints=save_checkpoints,
log_tensorboard=True,
work_dir=USER_FORECAST_MODELS_DIRECTORY,
)
# fit model on train series for historical forecasting
with warnings.catch_warnings():
warnings.simplefilter("ignore")
helpers.fit_model(
trans_model,
train,
val,
past_covariate_train,
past_covariate_val,
)
best_model = TransformerModel.load_from_checkpoint(
model_name=model_save_name, best=True, work_dir=USER_FORECAST_MODELS_DIRECTORY
)
helpers.print_tensorboard_logs(model_save_name, USER_FORECAST_MODELS_DIRECTORY)
# Showing historical backtesting without retraining model (too slow)
return helpers.get_prediction(
"Transformer",
probabilistic,
use_scalers,
scaler,
past_covariates,
best_model,
ticker_series,
past_covariate_whole,
train_split,
forecast_horizon,
n_predict,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/trans_model.py | 0.914058 | 0.441312 | trans_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import autoselect_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_autoselect_forecast(
data: Union[pd.DataFrame, pd.Series],
target_column: str = "close",
dataset_name: str = "",
seasonal_periods: int = 7,
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display Automatic Statistical Forecasting Model
Parameters
----------
data : Union[pd.Series, np.array]
Data to forecast
dataset_name str
The name of the ticker to be predicted
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
seasonal_periods: int
Number of seasonal periods in a year
If not set, inferred from frequency of the series.
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes:Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(data, start_date, end_date, target_column, None)
if not helpers.check_data(data, target_column, None):
return
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
best_model,
) = autoselect_model.get_autoselect_data(
data=data,
target_column=target_column,
seasonal_periods=seasonal_periods,
n_predict=n_predict,
start_window=start_window,
forecast_horizon=forecast_horizon,
)
if ticker_series == []:
return
probabilistic = False
helpers.plot_forecast(
name=f"Best Model: {best_model}",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=None,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, None, ticker_series, forecast_horizon=forecast_horizon
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/autoselect_view.py | 0.9439 | 0.410815 | autoselect_view.py | pypi |
"""Automatic ETS (Error, Trend, and Seasonality) Model"""
__docformat__ = "numpy"
import logging
from typing import List, Optional, Union, Tuple
import warnings
import pandas as pd
from darts import TimeSeries
from statsforecast.models import ETS
from statsforecast.core import StatsForecast
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.forecast import helpers
warnings.simplefilter("ignore")
logger = logging.getLogger(__name__)
# pylint: disable=E1123
@log_start_end(log=logger)
def get_autoets_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
seasonal_periods: int = 7,
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
) -> Tuple[
Optional[List[type[TimeSeries]]],
Optional[List[type[TimeSeries]]],
Optional[List[type[TimeSeries]]],
Optional[float],
Optional[StatsForecast],
]:
"""Performs Automatic ETS forecasting
This is a wrapper around StatsForecast ETS;
we refer to this link for the original and more complete documentation of the parameters.
https://nixtla.github.io/statsforecast/models.html#ets
Parameters
----------
data : Union[pd.Series, np.ndarray]
Input data.
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
seasonal_periods: int
Number of seasonal periods in a year (7 for daily data)
If not set, inferred from frequency of the series.
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
Returns
-------
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries], Optional[float], StatsForecast]
Adjusted Data series,
List of historical fcast values,
List of predicted fcast values,
Optional[float] - precision,
Fit ETS model object.
"""
use_scalers = False
# statsforecast preprocessing
# when including more time series
# the preprocessing is similar
_, ticker_series = helpers.get_series(data, target_column, is_scaler=use_scalers)
freq = ticker_series.freq_str
ticker_series = ticker_series.pd_dataframe().reset_index()
ticker_series.columns = ["ds", "y"]
ticker_series.insert(0, "unique_id", target_column)
try:
# Model Init
model_ets = ETS(
season_length=int(seasonal_periods),
)
fcst = StatsForecast(
df=ticker_series, models=[model_ets], freq=freq, verbose=True
)
except Exception as e: # noqa
error = str(e)
if "got an unexpected keyword argument" in error:
console.print(
"[red]Please update statsforecast to version 1.1.3 or higher.[/red]"
)
else:
console.print(f"[red]{error}[/red]")
return [], [], [], None, None
# Historical backtesting
last_training_point = int((len(ticker_series) - 1) * start_window)
historical_fcast_ets = fcst.cross_validation(
h=int(forecast_horizon),
test_size=len(ticker_series) - last_training_point,
n_windows=None,
input_size=min(10 * forecast_horizon, len(ticker_series)),
)
# train new model on entire timeseries to provide best current forecast
# we have the historical fcast, now lets predict.
forecast = fcst.forecast(int(n_predict))
y_true = historical_fcast_ets["y"].values
y_hat = historical_fcast_ets["ETS"].values
precision = helpers.mean_absolute_percentage_error(y_true, y_hat)
console.print(f"AutoETS obtains MAPE: {precision:.2f}% \n")
# transform outputs to make them compatible with
# plots
use_scalers = False
_, ticker_series = helpers.get_series(
ticker_series.rename(columns={"y": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
_, forecast = helpers.get_series(
forecast.rename(columns={"ETS": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
_, historical_fcast_ets = helpers.get_series(
historical_fcast_ets.groupby("ds")
.head(1)
.rename(columns={"ETS": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
return (
ticker_series,
historical_fcast_ets,
forecast,
float(precision),
fcst,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/autoets_model.py | 0.867948 | 0.493714 | autoets_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import autoarima_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_autoarima_forecast(
data: Union[pd.DataFrame, pd.Series],
target_column: str = "close",
dataset_name: str = "",
seasonal_periods: int = 7,
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display Automatic ARIMA model.
Parameters
----------
data : Union[pd.Series, np.array]
Data to forecast
dataset_name str
The name of the ticker to be predicted
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
seasonal_periods: int
Number of seasonal periods in a year
If not set, inferred from frequency of the series.
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes:Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(data, start_date, end_date, target_column, None)
if not helpers.check_data(data, target_column, None):
return
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = autoarima_model.get_autoarima_data(
data=data,
target_column=target_column,
seasonal_periods=seasonal_periods,
n_predict=n_predict,
start_window=start_window,
forecast_horizon=forecast_horizon,
)
if ticker_series == []:
return
probabilistic = False
helpers.plot_forecast(
name="AutoARIMA",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=None,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, None, ticker_series, forecast_horizon=forecast_horizon
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/autoarima_view.py | 0.944587 | 0.411643 | autoarima_view.py | pypi |
"""Automatic Comples Exponential Smoothing Model"""
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List, Tuple
import warnings
import pandas as pd
from darts import TimeSeries
from statsforecast.models import AutoCES
from statsforecast.core import StatsForecast
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.forecast import helpers
warnings.simplefilter("ignore")
logger = logging.getLogger(__name__)
# pylint: disable=E1123
@log_start_end(log=logger)
def get_autoces_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
seasonal_periods: int = 7,
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
) -> Tuple[
Optional[List[type[TimeSeries]]],
Optional[List[type[TimeSeries]]],
Optional[List[type[TimeSeries]]],
Optional[float],
Optional[StatsForecast],
]:
"""Performs Automatic Complex Exponential Smoothing forecasting
This is a wrapper around StatsForecast AutoCES;
we refer to this link for the original and more complete documentation of the parameters.
https://nixtla.github.io/statsforecast/models.html#autoces
Parameters
----------
data : Union[pd.Series, np.ndarray]
Input data.
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
seasonal_periods: int
Number of seasonal periods in a year (7 for daily data)
If not set, inferred from frequency of the series.
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
Returns
-------
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries], Optional[float], StatsForecast]
Adjusted Data series,
List of historical fcast values,
List of predicted fcast values,
Optional[float] - precision,
Fit CES model object.
"""
use_scalers = False
# statsforecast preprocessing
# when including more time series
# the preprocessing is similar
_, ticker_series = helpers.get_series(data, target_column, is_scaler=use_scalers)
freq = ticker_series.freq_str
ticker_series = ticker_series.pd_dataframe().reset_index()
ticker_series.columns = ["ds", "y"]
ticker_series.insert(0, "unique_id", target_column)
try:
# Model Init
model = AutoCES(
season_length=int(seasonal_periods),
)
fcst = StatsForecast(df=ticker_series, models=[model], freq=freq, verbose=True)
except Exception as e: # noqa
error = str(e)
if "got an unexpected keyword argument" in error:
console.print(
"[red]Please update statsforecast to version 1.1.3 or higher.[/red]"
)
else:
console.print(f"[red]{error}[/red]")
return [], [], [], None, None
# Historical backtesting
last_training_point = int((len(ticker_series) - 1) * start_window)
historical_fcast = fcst.cross_validation(
h=int(forecast_horizon),
test_size=len(ticker_series) - last_training_point,
n_windows=None,
input_size=min(10 * forecast_horizon, len(ticker_series)),
)
# train new model on entire timeseries to provide best current forecast
# we have the historical fcast, now lets predict.
forecast = fcst.forecast(int(n_predict))
y_true = historical_fcast["y"].values
y_hat = historical_fcast["CES"].values
precision = helpers.mean_absolute_percentage_error(y_true, y_hat)
console.print(f"AutoCES obtains MAPE: {precision:.2f}% \n")
# transform outputs to make them compatible with
# plots
use_scalers = False
_, ticker_series = helpers.get_series(
ticker_series.rename(columns={"y": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
_, forecast = helpers.get_series(
forecast.rename(columns={"CES": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
_, historical_fcast = helpers.get_series(
historical_fcast.groupby("ds").head(1).rename(columns={"CES": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
return (
ticker_series,
historical_fcast,
forecast,
float(precision),
fcst,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/autoces_model.py | 0.876066 | 0.468 | autoces_model.py | pypi |
"""NBEATS Model"""
__docformat__ = "numpy"
import logging
import warnings
from typing import Tuple, Union, List, Optional
import pandas as pd
from darts import TimeSeries
from darts.models import NBEATSModel
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
from openbb_terminal.core.config.paths import USER_FORECAST_MODELS_DIRECTORY
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_NBEATS_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
n_predict: int = 5,
past_covariates: str = None,
train_split: float = 0.85,
forecast_horizon: int = 5,
input_chunk_length: int = 14,
output_chunk_length: int = 5,
num_stacks: int = 10,
num_blocks: int = 3,
num_layers: int = 4,
layer_widths: int = 512,
batch_size: int = 800,
n_epochs: int = 300,
learning_rate: float = 1e-3,
model_save_name: str = "nbeats_model",
force_reset: bool = True,
save_checkpoints: bool = True,
) -> Tuple[
Optional[List[TimeSeries]],
Optional[List[TimeSeries]],
Optional[List[TimeSeries]],
Optional[float],
Optional[type[NBEATSModel]],
]:
"""Perform NBEATS Forecasting
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
target_column: str
Target column to forecast. Defaults to "close".
n_predict: int
Days to predict. Defaults to 5.
train_split: float
Train/val split. Defaults to 0.85.
past_covariates: str
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
input_chunk_length: int
Number of past time steps that are fed to the forecasting module at prediction time. Defaults to 14.
output_chunk_length: int
The length of the forecast of the model. Defaults to 5.
num_stacks: int
The number of stacks that make up the whole model. Defaults to 10.
num_blocks: int
The number of blocks making up every stack. Defaults to 3.
num_layers: int
The number of fully connected layers preceding the final forking layers in each block
of every stack. Defaults to 4.
layer_widths: int
Determines the number of neurons that make up each fully connected layer in each block
of every stack. Defaults to 512.
batch_size: int
Number of time series (input and output sequences) used in each training pass. Defaults to 32.
n_epochs: int
Number of epochs over which to train the model. Defaults to 100.
learning_rate: float
Defaults to 1e-3.
model_save_name: str
Name for model. Defaults to "brnn_model".
force_reset: bool
If set to True, any previously-existing model with the same name will be reset
(all checkpoints will be discarded). Defaults to True.
save_checkpoints: bool
Whether or not to automatically save the untrained model and checkpoints from training.
Defaults to True.
Returns
-------
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries], Optional[float], type[NBEATSModel]]
Adjusted Data series,
Historical forecast by best RNN model,
list of Predictions,
Mean average precision error,
Best NBEATS Model.
"""
use_scalers = True
probabilistic = False
scaler, ticker_series = helpers.get_series(
data, target_column, is_scaler=use_scalers
)
train, val = ticker_series.split_before(train_split)
valid = helpers.check_data_length(
train, val, input_chunk_length, output_chunk_length
)
if not valid:
return [], [], [], None, None
(
past_covariate_whole,
past_covariate_train,
past_covariate_val,
) = helpers.past_covs(past_covariates, data, train_split, use_scalers)
nbeats_model = NBEATSModel(
input_chunk_length=input_chunk_length,
output_chunk_length=output_chunk_length,
generic_architecture=True,
num_stacks=num_stacks,
num_blocks=num_blocks,
num_layers=num_layers,
layer_widths=layer_widths,
n_epochs=n_epochs,
nr_epochs_val_period=1,
batch_size=batch_size,
optimizer_kwargs={"lr": learning_rate},
model_name=model_save_name,
force_reset=force_reset,
save_checkpoints=save_checkpoints,
random_state=42,
pl_trainer_kwargs=helpers.get_pl_kwargs(accelerator="cpu"),
log_tensorboard=True,
work_dir=USER_FORECAST_MODELS_DIRECTORY,
)
# fit model on train series for historical forecasting
with warnings.catch_warnings():
warnings.simplefilter("ignore")
helpers.fit_model(
nbeats_model,
train,
val,
past_covariate_train,
past_covariate_val,
)
best_model = NBEATSModel.load_from_checkpoint(
model_name=model_save_name, best=True, work_dir=USER_FORECAST_MODELS_DIRECTORY
)
helpers.print_tensorboard_logs(model_save_name, USER_FORECAST_MODELS_DIRECTORY)
# Showing historical backtesting without retraining model (too slow)
return helpers.get_prediction(
"NBEATS",
probabilistic,
use_scalers,
scaler,
past_covariates,
best_model,
ticker_series,
past_covariate_whole,
train_split,
forecast_horizon,
n_predict,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/nbeats_model.py | 0.947051 | 0.469824 | nbeats_model.py | pypi |
"""RNN Model"""
__docformat__ = "numpy"
import logging
import warnings
from typing import Tuple, Union, List, Optional
import pandas as pd
from darts import TimeSeries
from darts.models import RNNModel
from darts.utils.likelihood_models import GaussianLikelihood
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
from openbb_terminal.core.config.paths import USER_FORECAST_MODELS_DIRECTORY
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_rnn_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
n_predict: int = 5,
train_split: float = 0.85,
forecast_horizon: int = 5,
model_type: str = "LSTM",
hidden_dim: int = 20,
dropout: float = 0.0,
batch_size: int = 32,
n_epochs: int = 100,
learning_rate: float = 1e-3,
model_save_name: str = "rnn_model",
training_length: int = 20,
input_chunk_size: int = 14,
force_reset: bool = True,
save_checkpoints: bool = True,
) -> Tuple[
Optional[List[type[TimeSeries]]],
Optional[List[type[TimeSeries]]],
Optional[List[type[TimeSeries]]],
Optional[float],
Optional[type[RNNModel]],
]:
"""Perform RNN forecasting
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
n_predict: int
Days to predict. Defaults to 5.
target_column: str
Target column to forecast. Defaults to "close".
train_split: float
Train/val split. Defaults to 0.85.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
model_type: str
Either a string specifying the RNN module type ("RNN", "LSTM" or "GRU"). Defaults to "LSTM".
hidden_dim: int
Size for feature maps for each hidden RNN layer.. Defaults to 20.
dropout: float
Fraction of neurons affected by Dropout. Defaults to 0.0.
batch_size: int
Number of time series (input and output sequences) used in each training pass. Defaults to 32.
n_epochs: int
Number of epochs over which to train the model. Defaults to 100.
learning_rate: float
Defaults to 1e-3.
model_save_name: str
Name for model. Defaults to "brnn_model".
force_reset: bool
If set to True, any previously-existing model with the same name will be reset
(all checkpoints will be discarded). Defaults to True.
save_checkpoints: bool
Whether or not to automatically save the untrained model and checkpoints from training.
Defaults to True.
Returns
-------
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries], Optional[float], type[RNNModel]]
Adjusted Data series,
Historical forecast by best RNN model,
list of Predictions,
Mean average precision error,
Best RNN Model
"""
# TODO Check if torch GPU AVAILABLE
use_scalers = True
probabilistic = True
past_covariates = None
past_covariate_whole = None
scaler, ticker_series = helpers.get_series(
data, target_column, is_scaler=use_scalers
)
train, val = ticker_series.split_before(train_split)
valid = helpers.check_data_length(train, val, input_chunk_size, 0)
if not valid:
return [], [], [], None, None
rnn_model = RNNModel(
model=model_type,
hidden_dim=hidden_dim,
dropout=dropout,
batch_size=batch_size,
n_epochs=n_epochs,
optimizer_kwargs={"lr": learning_rate},
model_name=model_save_name,
random_state=42,
training_length=training_length,
input_chunk_length=input_chunk_size,
pl_trainer_kwargs=helpers.get_pl_kwargs(accelerator="cpu"),
force_reset=force_reset,
save_checkpoints=save_checkpoints,
likelihood=GaussianLikelihood(),
log_tensorboard=True,
work_dir=USER_FORECAST_MODELS_DIRECTORY,
)
# fit model on train series for historical forecasting
with warnings.catch_warnings():
warnings.simplefilter("ignore")
helpers.fit_model(rnn_model, train, val)
best_model = RNNModel.load_from_checkpoint(
model_name=model_save_name, best=True, work_dir=USER_FORECAST_MODELS_DIRECTORY
)
helpers.print_tensorboard_logs(model_save_name, USER_FORECAST_MODELS_DIRECTORY)
# Showing historical backtesting without retraining model (too slow)
return helpers.get_prediction(
"RNN",
probabilistic,
use_scalers,
scaler,
past_covariates,
best_model,
ticker_series,
past_covariate_whole,
train_split,
forecast_horizon,
n_predict,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/rnn_model.py | 0.925664 | 0.379493 | rnn_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import autoets_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_autoets_forecast(
data: Union[pd.DataFrame, pd.Series],
target_column: str = "close",
dataset_name: str = "",
seasonal_periods: int = 7,
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display Automatic ETS (Error, Trend, Sesonality) Model
Parameters
----------
data : Union[pd.Series, np.array]
Data to forecast
dataset_name str
The name of the ticker to be predicted
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
seasonal_periods: int
Number of seasonal periods in a year
If not set, inferred from frequency of the series.
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes:Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(data, start_date, end_date, target_column, None)
if not helpers.check_data(data, target_column, None):
return
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = autoets_model.get_autoets_data(
data=data,
target_column=target_column,
seasonal_periods=seasonal_periods,
n_predict=n_predict,
start_window=start_window,
forecast_horizon=forecast_horizon,
)
if ticker_series == []:
return
probabilistic = False
helpers.plot_forecast(
name="AutoETS",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=None,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, None, ticker_series, forecast_horizon=forecast_horizon
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/autoets_view.py | 0.943582 | 0.427038 | autoets_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import tcn_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_tcn_forecast(
data: Union[pd.DataFrame, pd.Series],
target_column: str = "close",
dataset_name: str = "",
n_predict: int = 5,
past_covariates: str = None,
train_split: float = 0.85,
forecast_horizon: int = 5,
input_chunk_length: int = 14,
output_chunk_length: int = 5,
dropout: float = 0.1,
num_filters: int = 3,
weight_norm: bool = True,
dilation_base: int = 2,
n_epochs: int = 300,
learning_rate: float = 1e-3,
batch_size: int = 32,
model_save_name: str = "tcn_model",
force_reset: bool = True,
save_checkpoints: bool = True,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display TCN forecast
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
target_column: str
Target column to forecast. Defaults to "close".
dataset_name: str
The name of the ticker to be predicted
n_predict: int
Days to predict. Defaults to 5.
train_split: float
Train/val split. Defaults to 0.85.
past_covariates: str
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
input_chunk_length: int
Number of past time steps that are fed to the forecasting module at prediction time. Defaults to 14.
output_chunk_length: int
The length of the forecast of the model. Defaults to 5.
dropout: float
Fraction of neurons affected by Dropout. Defaults to 0.1.
num_filters: int
The number of filters in a convolutional layer of the TCN. Defaults to 6.
weight_norm: bool
Boolean value indicating whether to use weight normalization. Defaults to True.
dilation_base: int
The base of the exponent that will determine the dilation on every level. Defaults to 2.
batch_size: int
Number of time series (input and output sequences) used in each training pass. Defaults to 32.
n_epochs: int
Number of epochs over which to train the model. Defaults to 100.
learning_rate: float
Defaults to 1e-3.
model_save_name: str
Name for model. Defaults to "brnn_model".
force_reset: bool
If set to True, any previously-existing model with the same name will be reset
(all checkpoints will be discarded). Defaults to True.
save_checkpoints: bool
Whether or not to automatically save the untrained model and checkpoints from training. Defaults to True.
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes: Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(
data, start_date, end_date, target_column, past_covariates
)
if not helpers.check_data(data, target_column, past_covariates):
return
output_chunk_length = helpers.check_output(
output_chunk_length, n_predict, bool(past_covariates)
)
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = tcn_model.get_tcn_data(
data=data,
n_predict=n_predict,
target_column=target_column,
past_covariates=past_covariates,
train_split=train_split,
forecast_horizon=forecast_horizon,
input_chunk_length=input_chunk_length,
output_chunk_length=output_chunk_length,
dropout=dropout,
num_filters=num_filters,
weight_norm=weight_norm,
dilation_base=dilation_base,
batch_size=batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
model_save_name=model_save_name,
force_reset=force_reset,
save_checkpoints=save_checkpoints,
)
if ticker_series == []:
return
probabilistic = False
helpers.plot_forecast(
name="TCN",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=past_covariates,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, past_covariates, ticker_series, forecast_horizon=forecast_horizon
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/tcn_view.py | 0.942454 | 0.415254 | tcn_view.py | pypi |
"""Automatic Statistical Forecast"""
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List, Tuple
import warnings
from darts import TimeSeries
import pandas as pd
from statsforecast.core import StatsForecast
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console, USE_COLOR
from openbb_terminal.forecast import helpers
warnings.simplefilter("ignore")
logger = logging.getLogger(__name__)
# pylint: disable=E1123,E1137
def precision_format(best_model: str, index: str, val: float) -> str:
if index == best_model and USE_COLOR:
return f"[#00AAFF]{val:.2f}% [/#00AAFF]"
return f"{val:.2f}%"
@log_start_end(log=logger)
def get_autoselect_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
seasonal_periods: int = 7,
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
) -> Tuple[
Optional[List[type[TimeSeries]]],
Optional[List[type[TimeSeries]]],
Optional[List[type[TimeSeries]]],
Optional[float],
Optional[StatsForecast],
Optional[Union[int, str]],
]:
"""Performs Automatic Statistical forecasting
This is a wrapper around StatsForecast models;
we refer to this link for the original and more complete documentation of the parameters.
https://nixtla.github.io/statsforecast/models.html
Parameters
----------
data : Union[pd.Series, np.ndarray]
Input data.
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
seasonal_periods: int
Number of seasonal periods in a year (7 for daily data)
If not set, inferred from frequency of the series.
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
Returns
-------
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries], Optional[float], StatsForecast, Union[int, str]]
list[np.ndarray] - Adjusted Data series
list[np.ndarray] - List of historical fcast values
list[np.ndarray] - List of predicted fcast values
Optional[float] - precision
StatsForecast - Fit ETS model object.
Union[int, str] - Best model
"""
use_scalers = False
# statsforecast preprocessing
# when including more time series
# the preprocessing is similar
_, ticker_series = helpers.get_series(data, target_column, is_scaler=use_scalers)
freq = ticker_series.freq_str
ticker_series = ticker_series.pd_dataframe().reset_index()
ticker_series.columns = ["ds", "y"]
ticker_series.insert(0, "unique_id", target_column)
# check statsforecast dependency
try:
from statsforecast.models import ( # pylint: disable=import-outside-toplevel
AutoARIMA,
ETS,
AutoCES,
MSTL,
Naive,
SeasonalNaive,
SeasonalWindowAverage,
RandomWalkWithDrift,
)
except Exception as e:
error = str(e)
if "cannot import name" in error:
console.print(
"[red]Please update statsforecast to version 1.2.0 or higher.[/red]"
)
else:
console.print(f"[red]{error}[/red]")
return [], [], [], None, None, None
try:
# Model Init
season_length = int(seasonal_periods)
models = [
AutoARIMA(season_length=season_length),
ETS(season_length=season_length),
AutoCES(season_length=season_length),
MSTL(season_length=season_length),
SeasonalNaive(season_length=season_length),
SeasonalWindowAverage(
season_length=season_length, window_size=season_length
),
RandomWalkWithDrift(),
]
fcst = StatsForecast(
df=ticker_series,
models=models,
freq=freq,
verbose=True,
fallback_model=Naive(),
)
except Exception as e: # noqa
error = str(e)
if "got an unexpected keyword argument" in error:
console.print(
"[red]Please update statsforecast to version 1.1.3 or higher.[/red]"
)
else:
console.print(f"[red]{error}[/red]")
return [], [], [], None, None, None
# Historical backtesting
last_training_point = int((len(ticker_series) - 1) * start_window)
historical_fcast = fcst.cross_validation(
h=int(forecast_horizon),
test_size=len(ticker_series) - last_training_point,
n_windows=None,
input_size=min(10 * forecast_horizon, len(ticker_series)),
)
# change name to AutoETS and AutoCES
cols = [
c if (c not in ["ETS", "CES"]) else f"Auto{c}" for c in historical_fcast.columns
]
historical_fcast.columns = cols
# train new model on entire timeseries to provide best current forecast
# we have the historical fcast, now lets predict.
forecast = fcst.forecast(int(n_predict))
# change name to AutoETS and AutoCES
cols = [c if (c not in ["ETS", "CES"]) else f"Auto{c}" for c in forecast.columns]
forecast.columns = cols
# Backtesting evaluation
y_true = historical_fcast["y"].values
model_names = historical_fcast.drop(columns=["ds", "cutoff", "y"]).columns
precision_per_model = [
helpers.mean_absolute_percentage_error(y_true, historical_fcast[model].values)
for model in model_names
]
precision: pd.DataFrame = pd.DataFrame(
{"precision": precision_per_model}, index=model_names
)
precision = precision.sort_values(by="precision")
# select best model
best_precision: float = precision["precision"].min()
best_model = precision["precision"].idxmin()
# print results
precision["precision"] = [ # pylint: disable=unsupported-assignment-operation
precision_format(best_model, index, val)
for index, val in precision["precision"].iteritems()
]
console.print("\n")
helpers.print_rich_table(
precision,
show_index=True,
index_name="Model",
headers=["MAPE"],
title=f"Performance per model.\nBest model: [#00AAFF]{best_model}[/#00AAFF]",
)
# transform outputs to make them compatible with
# plots
use_scalers = False
_, ticker_series = helpers.get_series(
ticker_series.rename(columns={"y": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
_, forecast = helpers.get_series(
forecast.rename(columns={best_model: target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
_, historical_fcast = helpers.get_series(
historical_fcast.groupby("ds")
.head(1)
.rename(columns={best_model: target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
return (ticker_series, historical_fcast, forecast, best_precision, fcst, best_model) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/autoselect_model.py | 0.847463 | 0.390912 | autoselect_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import expo_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_expo_forecast(
data: Union[pd.DataFrame, pd.Series],
target_column: str = "close",
dataset_name: str = "",
trend: str = "A",
seasonal: str = "A",
seasonal_periods: int = 7,
dampen: str = "F",
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display Probabilistic Exponential Smoothing forecast
Parameters
----------
data : Union[pd.Series, np.array]
Data to forecast
dataset_name: str
The name of the ticker to be predicted
target_column: Optional[str]:
Target column to forecast. Defaults to "close".
trend: str
Trend component. One of [N, A, M]
Defaults to ADDITIVE.
seasonal: str
Seasonal component. One of [N, A, M]
Defaults to ADDITIVE.
seasonal_periods: int
Number of seasonal periods in a year
If not set, inferred from frequency of the series.
dampen: str
Dampen the function
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes: Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(data, start_date, end_date, target_column, None)
if not helpers.check_data(data, target_column, None):
return
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = expo_model.get_expo_data(
data=data,
target_column=target_column,
trend=trend,
seasonal=seasonal,
seasonal_periods=seasonal_periods,
dampen=dampen,
n_predict=n_predict,
start_window=start_window,
forecast_horizon=forecast_horizon,
)
if ticker_series == []:
return
probabilistic = True
helpers.plot_forecast(
name="PES",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=None,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
console.print("[red]Expo model does not support residuals at this time[/red]\n")
# helpers.plot_residuals(
# _model, None, ticker_series, forecast_horizon=forecast_horizon
# ) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/expo_view.py | 0.910344 | 0.464112 | expo_view.py | pypi |
"""TCN Model"""
__docformat__ = "numpy"
import logging
from typing import Tuple, Union, List, Optional
import warnings
import pandas as pd
from darts import TimeSeries
from darts.models import TCNModel
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
from openbb_terminal.core.config.paths import USER_FORECAST_MODELS_DIRECTORY
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_tcn_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
n_predict: int = 5,
past_covariates: str = None,
train_split: float = 0.85,
forecast_horizon: int = 5,
input_chunk_length: int = 14,
output_chunk_length: int = 5,
dropout: float = 0.1,
num_filters: int = 3,
weight_norm: bool = True,
dilation_base: int = 2,
n_epochs: int = 300,
learning_rate: float = 1e-3,
batch_size: int = 32,
model_save_name: str = "tcn_model",
force_reset: bool = True,
save_checkpoints: bool = True,
) -> Tuple[
List[Optional[TimeSeries]],
List[Optional[TimeSeries]],
List[Optional[TimeSeries]],
Optional[float],
Optional[type[TCNModel]],
]:
"""Perform TCN forecasting
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
target_column: str
Target column to forecast. Defaults to "close".
n_predict: int
Days to predict. Defaults to 5.
train_split: float
Train/val split. Defaults to 0.85.
past_covariates: str
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
input_chunk_length: int
Number of past time steps that are fed to the forecasting module at prediction time. Defaults to 14.
output_chunk_length: int
The length of the forecast of the model. Defaults to 5.
dropout: float
Fraction of neurons affected by Dropout. Defaults to 0.1.
num_filters: int
The number of filters in a convolutional layer of the TCN. Defaults to 6.
weight_norm: bool
Boolean value indicating whether to use weight normalization. Defaults to True.
dilation_base: int
The base of the exponent that will determine the dilation on every level. Defaults to 2.
batch_size: int
Number of time series (input and output sequences) used in each training pass. Defaults to 32.
n_epochs: int
Number of epochs over which to train the model. Defaults to 100.
learning_rate: float
Defaults to 1e-3.
model_save_name: str
Name for model. Defaults to "brnn_model".
force_reset: bool
If set to True, any previously-existing model with the same name will be reset
(all checkpoints will be discarded). Defaults to True.
save_checkpoints: bool
Whether or not to automatically save the untrained model and checkpoints from training.
Defaults to True.
Returns
-------
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries], Optional[float], type[TCNModel]]
Adjusted Data series,
Historical forecast by best RNN model,
list of Predictions,
Mean average precision error,
Best TCN Model.
"""
# TODO Check if torch GPU AVAILABLE
use_scalers = True
probabilistic = False
scaler, ticker_series = helpers.get_series(
data, target_column, is_scaler=use_scalers
)
train, val = ticker_series.split_before(train_split)
valid = helpers.check_data_length(
train, val, input_chunk_length, output_chunk_length
)
if not valid:
return [], [], [], None, None
(
past_covariate_whole,
past_covariate_train,
past_covariate_val,
) = helpers.past_covs(past_covariates, data, train_split, use_scalers)
tcn_model = TCNModel(
input_chunk_length=input_chunk_length,
output_chunk_length=output_chunk_length,
dropout=dropout,
dilation_base=dilation_base,
weight_norm=weight_norm,
num_filters=num_filters,
n_epochs=n_epochs,
nr_epochs_val_period=1,
batch_size=batch_size,
optimizer_kwargs={"lr": learning_rate},
model_name=model_save_name,
force_reset=force_reset,
save_checkpoints=save_checkpoints,
random_state=42,
pl_trainer_kwargs=helpers.get_pl_kwargs(accelerator="cpu"),
log_tensorboard=True,
work_dir=USER_FORECAST_MODELS_DIRECTORY,
)
# fit model on train series for historical forecasting
with warnings.catch_warnings():
warnings.simplefilter("ignore")
helpers.fit_model(
tcn_model,
train,
val,
past_covariate_train,
past_covariate_val,
)
best_model = TCNModel.load_from_checkpoint(
model_name=model_save_name, best=True, work_dir=USER_FORECAST_MODELS_DIRECTORY
)
helpers.print_tensorboard_logs(model_save_name, USER_FORECAST_MODELS_DIRECTORY)
# Showing historical backtesting without retraining model (too slow)
return helpers.get_prediction(
"TCN",
probabilistic,
use_scalers,
scaler,
past_covariates,
best_model,
ticker_series,
past_covariate_whole,
train_split,
forecast_horizon,
n_predict,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/tcn_model.py | 0.936467 | 0.388357 | tcn_model.py | pypi |
```
from datetime import datetime, timedelta
from openbb_terminal import api
import os
import matplotlib.pyplot as plt
import matplotlib_inline.backend_inline
import ipywidgets as widgets
import plotly.graph_objs as go
from IPython.display import display
%matplotlib inline
matplotlib_inline.backend_inline.set_matplotlib_formats("svg")
api.theme.applyMPLstyle()
df = api.forecast.load("weather.csv")
df = api.forecast.ema(df, target_column="MaxTemp")
api.forecast.expo(df, target_column="MaxTemp")
api.forecast.brnn(
df, target_column="MaxTemp", past_covariates="MinTemp,Rainfall,Evaporation"
)
```
| /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/forecast.ipynb | 0.462959 | 0.221793 | forecast.ipynb | pypi |
"""Automatic ARIMA Model"""
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List, Tuple
import warnings
import pandas as pd
from darts import TimeSeries
from statsforecast.models import AutoARIMA
from statsforecast.core import StatsForecast
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.forecast import helpers
warnings.simplefilter("ignore")
logger = logging.getLogger(__name__)
# pylint: disable=E1123
@log_start_end(log=logger)
def get_autoarima_data(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
seasonal_periods: int = 7,
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
) -> Tuple[
Optional[List[type[TimeSeries]]],
Optional[List[type[TimeSeries]]],
Optional[List[type[TimeSeries]]],
Optional[float],
Optional[StatsForecast],
]:
"""Performs Automatic ARIMA forecasting
This is a wrapper around StatsForecast AutoARIMA;
we refer to this link for the original and more complete documentation of the parameters.
https://nixtla.github.io/statsforecast/models.html#autoarima
Parameters
----------
data : Union[pd.Series, np.ndarray]
Input data.
target_column: Optional[str]
Target column to forecast. Defaults to "close".
seasonal_periods: int
Number of seasonal periods in a year (7 for daily data)
If not set, inferred from frequency of the series.
n_predict: int
Number of days to forecast
start_window: float
Size of sliding window from start of timeseries and onwards
forecast_horizon: int
Number of days to forecast when backtesting and retraining historical
Returns
-------
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries], float, StatsForecast]
Adjusted Data series,
List of historical fcast values,
List of predicted fcast values,
Optional[float] - precision
Fit AutoaRIMA model object.
"""
use_scalers = False
# statsforecast preprocessing
# when including more time series
# the preprocessing is similar
_, ticker_series = helpers.get_series(data, target_column, is_scaler=use_scalers)
freq = ticker_series.freq_str
ticker_series = ticker_series.pd_dataframe().reset_index()
ticker_series.columns = ["ds", "y"]
ticker_series.insert(0, "unique_id", target_column)
try:
# Model Init
model = AutoARIMA(season_length=int(seasonal_periods))
fcst = StatsForecast(df=ticker_series, models=[model], freq=freq, verbose=True)
except Exception as e: # noqa
error = str(e)
if "got an unexpected keyword argument" in error:
console.print(
"[red]Please update statsforecast to version 1.1.3 or higher.[/red]"
)
else:
console.print(f"[red]{error}[/red]")
return [], [], [], None, None
# Historical backtesting
last_training_point = int((len(ticker_series) - 1) * start_window)
historical_fcast = fcst.cross_validation(
h=int(forecast_horizon),
test_size=len(ticker_series) - last_training_point,
n_windows=None,
input_size=min(10 * forecast_horizon, len(ticker_series)),
)
# train new model on entire timeseries to provide best current forecast
# we have the historical fcast, now lets predict.
forecast = fcst.forecast(int(n_predict))
y_true = historical_fcast["y"].values
y_hat = historical_fcast["AutoARIMA"].values
precision = helpers.mean_absolute_percentage_error(y_true, y_hat)
console.print(f"AutoARIMA obtains MAPE: {precision:.2f}% \n")
# transform outputs to make them compatible with
# plots
use_scalers = False
_, ticker_series = helpers.get_series(
ticker_series.rename(columns={"y": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
_, forecast = helpers.get_series(
forecast.rename(columns={"AutoARIMA": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
_, historical_fcast = helpers.get_series(
historical_fcast.groupby("ds")
.head(1)
.rename(columns={"AutoARIMA": target_column}),
target_column,
is_scaler=use_scalers,
time_col="ds",
)
return (
ticker_series,
historical_fcast,
forecast,
float(precision),
fcst,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/autoarima_model.py | 0.861101 | 0.444746 | autoarima_model.py | pypi |
__docformat__ = "numpy"
import logging
# pylint: disable=R1732, R0912
import os
from pathlib import Path
from threading import Thread
import webbrowser
from ast import literal_eval
from datetime import datetime
from typing import Any, Dict, List, Union
from ipykernel.kernelapp import IPKernelApp
import papermill as pm
import pandas as pd
from openbb_terminal import feature_flags as obbff
from openbb_terminal.core.config.paths import (
MISCELLANEOUS_DIRECTORY,
USER_PORTFOLIO_DATA_DIRECTORY,
USER_REPORTS_DIRECTORY,
USER_CUSTOM_REPORTS_DIRECTORY,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.forex.forex_controller import FX_TICKERS
logger = logging.getLogger(__name__)
CURRENT_LOCATION = Path(__file__)
REPORTS_FOLDER = CURRENT_LOCATION.parent / "templates"
USER_REPORTS = {
filepath.name: filepath
for file_type in ["ipynb"]
for filepath in USER_CUSTOM_REPORTS_DIRECTORY.rglob(f"*.{file_type}")
}
# TODO: Trim available choices to avoid errors in notebooks.
etf_data_path = CURRENT_LOCATION.parent / "data" / "etf_tickers.csv"
ETF_TICKERS = pd.read_csv(etf_data_path).iloc[:, 0].to_list()
crypto_data_path = CURRENT_LOCATION.parent / "data" / "crypto_tickers.csv"
CRYPTO_TICKERS = pd.read_csv(crypto_data_path).iloc[:, 0].to_list()
stocks_data_path = CURRENT_LOCATION.parent / "data" / "stocks_tickers.csv"
STOCKS_TICKERS = pd.read_csv(stocks_data_path).iloc[:, 0].to_list()
PORTFOLIO_HOLDINGS_FILES = {
filepath.name: filepath
for file_type in ["xlsx", "csv"]
for filepath in (USER_PORTFOLIO_DATA_DIRECTORY / "holdings").rglob(f"*.{file_type}")
}
PORTFOLIO_HOLDINGS_FILES.update(
{
filepath.name: filepath
for file_type in ["xlsx", "csv"]
for filepath in (
MISCELLANEOUS_DIRECTORY / "portfolio_examples" / "holdings"
).rglob(f"*.{file_type}")
}
)
REPORT_CHOICES = {
"etf": {
"--symbol": {c: None for c in ETF_TICKERS},
},
"forex": {
"--symbol": {c: None for c in FX_TICKERS},
},
"portfolio": {
"--transactions": {c: None for c in PORTFOLIO_HOLDINGS_FILES},
},
"economy": None,
"equity": {
"--symbol": {c: None for c in STOCKS_TICKERS},
},
"crypto": {
"--symbol": {c: None for c in CRYPTO_TICKERS},
},
"forecast": {
"--symbol": {c: None for c in STOCKS_TICKERS + ETF_TICKERS},
},
}
@log_start_end(log=logger)
def get_arg_choices(report_name: str, arg_name: str) -> Union[List[str], None]:
"""Get argument choices from autocompletion for crypto, forex and portfolio.
Parameters
----------
report_name: str
Name of report chosen.
arg_name: str
Argument to limit choices.
Returns:
List[str]: List with argument choices from autocompletion.
"""
choices = None
if report_name in ("forex", "portfolio"):
if "--" + arg_name in REPORT_CHOICES[report_name]: # type: ignore
choices = list(REPORT_CHOICES[report_name]["--" + arg_name].keys()) # type: ignore
return choices
@log_start_end(log=logger)
def get_reports_available(
folder: Path = REPORTS_FOLDER, warn: bool = True
) -> List[str]:
"""Get Jupyter notebook available in folder.
Parameters
----------
folder: Path
Path to folder.
Returns:
List[str]: List with names of notebooks available.
"""
bad_format = []
available = []
for notebook in os.listdir(folder):
if notebook.endswith(".ipynb"):
if " " in notebook:
bad_format.append(notebook)
else:
available.append(notebook[:-6])
if bad_format and warn:
s = ", ".join(bad_format)
console.print(
f"[red]Character '_' not allowed in the following names: {s}.[/red]"
)
return available
@log_start_end(log=logger)
def extract_parameters(input_path: str) -> Dict[str, str]:
"""Extract required parameters from notebook content.
Parameters
----------
input_path: str
Path of report to be rendered.
Returns:
Dict[str, str]: Dictionary with parameters names and values.
"""
input_path = add_ipynb_extension(input_path)
with open(input_path, encoding="utf-8") as file:
notebook_content = file.read()
# Look for the metadata cell to understand if there are parameters required by the report
metadata_cell = """"metadata": {\n "tags": [\n "parameters"\n ]\n },\n "outputs":"""
# Locate position of the data of interest and get parameters
if notebook_content.find(metadata_cell) >= 0:
position = notebook_content.find(metadata_cell)
else:
return {}
metadata = notebook_content[position:] # noqa: E203
cell_start = 'source": '
cell_end = "]"
start_position = metadata.find(cell_start)
params = metadata[
start_position : metadata.find(cell_end, start_position) + 1 # noqa: E203
]
# Make sure that the parameters provided are relevant
if "parameters" in notebook_content:
parameters_names = [
param.split("=")[0][:-1]
for param in literal_eval(params.strip('source": '))
if param[0] not in ["#", "\n"]
]
parameters_values = [
param.split("=")[1][2:-2]
for param in literal_eval(params.strip('source": '))
if param[0] not in ["#", "\n"]
]
if "report_name" in parameters_names:
parameters_names.remove("report_name")
parameters_dict = dict(zip(parameters_names, parameters_values))
return parameters_dict
@log_start_end(log=logger)
def render_report(input_path: str, args_dict: Dict[str, str]):
"""Report rendering end to end.
Workflow:
1. Update parameters to use in notebook with received arguments
2. Create output path
3. Update parameters with output_path
4. Validate and execute notebook in a new thread.
Parameters
----------
input_path: str
Path of report to be rendered.
args_dict: Dict[str, str]
Dictionary with received arguments dictionary.
"""
try:
parameters_dict = update_parameters(input_path, args_dict)
output_path = create_output_path(input_path, parameters_dict)
parameters_dict["report_name"] = output_path
if parameters_dict:
t = Thread(
target=execute_notebook, args=(input_path, parameters_dict, output_path)
)
t.start()
t.join()
except Exception as e:
console.print(f"[red]Cannot execute notebook - {e}")
@log_start_end(log=logger)
def update_parameters(input_path: str, args_dict: Dict[str, str]) -> Dict[str, Any]:
"""Update dictionary of parameters to be used in report with received arguments.
Parameters
----------
input_path: str
Path of report to be rendered.
args_dict: Dict[str, str]
Dictionary with received arguments dictionary.
Returns
-------
Dict[str, Any]
Dictionary with report parameters.
"""
parameters_dict = extract_parameters(input_path)
for key, val in args_dict.items():
if key in parameters_dict:
parameters_dict[key] = val
else:
console.print(f"[red]'{key}' not found in notebook parameters.[/red]")
return parameters_dict
@log_start_end(log=logger)
def create_output_path(input_path: str, parameters_dict: Dict[str, Any]) -> str:
"""Create path to save rendered report, thus the output path.
Parameters
----------
input_path: str
Path of report to be rendered.
parameters_dict: Dict[str, Any]
Dictionary with report parameters.
Returns
-------
str
Path of rendered report.
"""
report_name = os.path.split(input_path)[-1]
param_values = list(parameters_dict.values())
args_to_output = f"_{'_'.join(param_values)}" if "_".join(param_values) else ""
report_output_name = (
f"{datetime.now().strftime('%Y%m%d_%H%M%S')}"
+ "_"
+ f"{report_name}{args_to_output}"
)
output_path = str(USER_REPORTS_DIRECTORY / report_output_name)
output_path = output_path.replace(".", "_")
return output_path
@log_start_end(log=logger)
def execute_notebook(input_path, parameters, output_path):
"""Execute the input path's notebook with the parameters provided.
Then, save it in the output path.
Parameters
----------
input_path: str
Path of report to be rendered.
parameters: Dict[str, Any]
Dictionary with report parameters.
output_path: str
Path of rendered report.
"""
input_path = add_ipynb_extension(input_path)
try:
result = pm.execute_notebook(
input_path=input_path,
output_path=output_path + ".ipynb",
parameters=parameters,
)
if not result["metadata"]["papermill"]["exception"]:
console.print(f"\n[green]Notebook:[/green] {output_path}.ipynb")
if obbff.OPEN_REPORT_AS_HTML:
report_output_path = os.path.join(
os.path.abspath(os.path.join(".")), output_path + ".html"
)
webbrowser.open(f"file://{report_output_path}")
console.print(f"\n[green]Report:[/green] {report_output_path}\n")
else:
console.print("\n")
else:
console.print("[red]\nReport .html couldn't be created.\n[/red]")
except pm.PapermillExecutionError as e:
console.print(
f"[red]\nAn error was encountered in cell [{e.exec_count}], check the notebook:[/red]\n"
f"{output_path}.ipynb\n"
)
@log_start_end(log=logger)
def add_ipynb_extension(path: str) -> str:
"""Add .ipynb extension to path.
Parameters
----------
path: str
Path to notebook file.
Returns
-------
str
Path to .ipynb file.
"""
if not path.endswith(".ipynb"):
return path + ".ipynb"
return path
@log_start_end(log=logger)
def check_ipynb(path: str) -> str:
"""Check if there is .ipynb extension in path.
This is useful to the controller type check.
Parameters
----------
path: str
Path to notebook file.
Returns
-------
bool
Path if paths endswith .ipynb, else empty string.
"""
if not path.endswith(".ipynb"):
console.print("[red]Please provide a .ipynb file.[/red]\n")
return ""
return path
def ipykernel_launcher(module_file: str, module_hist_file: str):
"""This function mocks 'ipykernel_launcher.py' launching a Jupyter notebook kernel.
It is useful when running python commands inside a frozen application like our
installer distribution, where sys.executable[0] is not the path to python
interpreter, rather it is the path to the application executable.
Problem:
'papermill' was trying to execute the following command on a subprocess:
$ .../bin/python -m ipykernel_launcher -f ... --HistoryManager.hist_file ...
'papermill' was using '.../bin/python' because it is looks for the sys.executable[0],
which most of the time leads to the python interpreter. In our frozen app,
sys.executable[0] leads to 'OpenBB Terminal/.OpenBB/OpenBBTerminal', which in turn
executes 'terminal.py.
This means that the command was being executed in 'terminal.py'. Consequently,
one gets the following error message:
$ terminal: error: unrecognized arguments: -m ipykernel_launcher -f ... --HistoryManager.hist_file ...
Solution:
Parse 'papermill' command in the 'terminal_controller', which is what follows
'terminal.py' and here receive the parsed 'papermill' command arguments and
route them to IPKernelApp as if this is 'ipykernel_launcher' module
- the kernel is launched.
Source: https://pyinstaller.org/en/stable/runtime-information.html#using-sys-executable-and-sys-argv-0
Parameters
----------
module_file: str
Specified connection file.
module_hist_file: str
History manager file.
"""
IPKernelApp.launch_instance(
argv=[
"-f",
module_file,
"--HistoryManager.hist_file",
module_hist_file,
]
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/reports/reports_model.py | 0.484624 | 0.156427 | reports_model.py | pypi |
import os
from typing import List
import base64
from jinja2 import Template
def price_card_stylesheet():
"""Load a default CSS stylesheet from file."""
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "widgets", "style.css")
) as f:
style = f.read()
return style
def price_card(ticker: str, price: str, price_color: str = "neutral_color") -> str:
"""Prepare a styled HTML element of a 128 by 128 price card.
Parameters
----------
ticker : str
Instrument ticker for the price card
price : str
Instrument price as a string
price_color : str, optional
The color of the price. Accepts "up_color", "down_color" and default "neutral_color"
Returns
-------
str
HTML code as string
"""
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "widgets", "card.j2")
) as f:
template = Template(f.read())
card = template.render(ticker=ticker, price=price, price_color=price_color)
return card
def html_report_stylesheet():
"""Load a default CSS stylesheet from file."""
with open(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "widgets", "report.css"
)
) as f:
style = f.read()
return style
def html_report(title: str = "", stylesheet: str = "", body: str = "") -> str:
"""Prepare a styled HTML page element.
Parameters
----------
title : str, optional
Contents of the title tag, by default ""
stylesheet : str, optional
Contents of the stylesheet tag, by default ""
body : str, optional
Contents of the body tag, by default ""
Returns
-------
str
HTML code as string
"""
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "widgets", "report.j2")
) as f:
template = Template(f.read())
return template.render(title=title, stylesheet=stylesheet, body=body + "</html>")
def h(level: int, text: str) -> str:
"""Wrap text into an HTML `h` tag.
Parameters
----------
level : int
HTML `h` level tag
text : str
Contents for `h` level tag
Returns
-------
str
HTML code as string
"""
return f"<h{str(level)}>{text}</h{level}>"
def p(text: str, style: str = "") -> str:
"""Wrap text into an HTML `p` tag.
Parameters
----------
text : str
Contents for HTML `p` tag
style: str
Div style
Returns
-------
str
HTML code as string
"""
return f'<p style="{style}">{text}</p>'
def row(elements: List) -> str:
"""HTML code elements to add in a single row
Parameters
----------
elements : List
List of HTML code elements to add in a row
Returns
-------
str
HTML code as string
"""
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "widgets", "row.j2")
) as f:
template = Template(f.read())
return template.render(elements=elements)
def kpi(thresholds: List[float], sentences: List[str], value: float) -> str:
"""Add new key performance indicator to main page of report
Parameters
----------
thresholds : List[float]
List of thresholds to take into account
sentences : List[str]
List of sentences to take into account. len(sentences) = len(thresholds)+1
value : float
Current value for the KPI in question
Returns
-------
str
HTML code as string
"""
if len(thresholds) == 1 and len(sentences) == 2:
if value < thresholds[0]:
return f'<p style="color:red">❌ {sentences[0]}. {value} < {thresholds[0]} </p>'
return f'<p style="color:green">✅ {sentences[1]}. {value} > {thresholds[0]} </p>'
if len(thresholds) == 2 and len(sentences) == 3:
if value < thresholds[0]:
return f'<p style="color:red">❌ {sentences[0]}. {value} < {thresholds[0]} </p>'
if value > thresholds[1]:
return f'<p style="color:green">✅ {sentences[2]}. {value} > {thresholds[1]} </p>'
return f'<p style="color:orange">🟡 {sentences[1]}. {thresholds[0]} < {value} < {thresholds[1]} </p>'
print("Error. KPI condition is not correctly set")
return ""
def add_tab(
title: str, htmlcode: str, comment_cell: bool = True, commment_text: str = ""
) -> str:
"""Add new tab section for the report. By default adds an opinion editable box at the start.
Parameters
----------
title : str
Title associated with this tab / section
htmlcode : str
All HTML code contain within this section
comment_cell : bool
Comment cell
Returns
-------
str
HTML code as string
"""
html_text = f'<div id="{title}" class="tabcontent"></br>'
if comment_cell:
comment = commment_text if commment_text else "No comment."
html_text += f"""<p style="border:3px; border-style:solid;
border-color:#000000; padding: 1em; width: 1050px;" contentEditable="true">
{comment}
</p>"""
html_text += f"{htmlcode}</div>"
return html_text
def tab_clickable_and_save_evt() -> str:
"""Adds javascript code within HTML at the bottom that allows the interactivity with tabs.
Parameters
----------
report_name : str
Report name for the file to be saved
Returns
-------
str
javascript code in HTML to process interactive tabs
"""
return """
<script>
function menu(evt, menu_name) {
var i, tabcontent, tablinks;
tabcontent = document.getElementsByClassName("tabcontent");
for (i = 0; i < tabcontent.length; i++) {
tabcontent[i].style.display = "none";
}
tablinks = document.getElementsByClassName("tablinks");
for (i = 0; i < tablinks.length; i++) {
tablinks[i].className = tablinks[i].className.replace(" active", "");
tablinks[i].style.backgroundColor = "white";
tablinks[i].style.color = "black";
}
document.getElementById(menu_name).style.display = "block";
evt.currentTarget.className += " active";
evt.currentTarget.style.backgroundColor = "black";
evt.currentTarget.style.color = "white";
}
function saveReport() {
const markup = document.documentElement.innerHTML;
var bl = new Blob([markup], { type: "text/html" });
var a = document.createElement("a");
a.href = URL.createObjectURL(bl);
a.download = "openbb_report.html";
a.hidden = true;
document.body.appendChild(a);
a.innerHTML = "Download";
a.click();
}
function readCommentsAndUpdateValues() {
var inputs, index;
inputs = document.getElementsByTagName('input');
for (index = 0; index < inputs.length; ++index) {
const elem = inputs[index];
elem.addEventListener('input', (e) => {
console.log(elem.name, elem.value, e.target.value);
elem.setAttribute("value", e.target.value)
});
}
}
window.onload=function(){
readCommentsAndUpdateValues();
menu(event, 'SUMMARY');
};
</script>"""
def tablinks(tabs: List[str]) -> str:
"""Adds list of tabs/sections for the reports that are able to be clicked. For every
6 tabs we push them onto a new line.
Parameters
----------
tabs : List[str]
List of tabs/sections for the reports.
Returns
-------
str
HTML code for interactive tabs
"""
htmlcode = '<div class="tab">'
for idx, tab in enumerate(tabs):
htmlcode += f"""<button class="tablinks" onclick="menu(event, '{tab}')">{tab}</button>"""
if ((idx + 1) % 5) == 0:
htmlcode += "</br>"
htmlcode += "</div>"
return htmlcode
def header(
openbb_img, floppy_disk_img, author, report_date, report_time, report_tz, title
) -> str:
"""Creates reports header
Parameters
----------
openbb_img : str
Image of OpenBB logo
floppy_disk_img : str
Image of floppy disk containing the save button
author : str
Name of author responsible by report
report_date : str
Date when report is run
report_time : str
Time when report is run
report_tz : str
Timezone associated with datetime of report being run
title : str
Title of the report
Returns
-------
str
HTML code for interactive tabs
"""
try:
with open(openbb_img, "rb") as image_file:
openbb_image_encoded = base64.b64encode(image_file.read())
openbb_img = f"""
<img src="data:image/png;base64,{openbb_image_encoded.decode()}"
alt="OpenBB" style="width:144px;">"""
except Exception:
openbb_img = ""
try:
with open(floppy_disk_img, "rb") as image_file:
floppy_disk_encoded = base64.b64encode(image_file.read())
flask_disk_save = f"""
<center><img src="data:image/png;base64,{floppy_disk_encoded.decode()}"
alt="OpenBB" style="width:40px;"></center>"""
except Exception:
flask_disk_save = ""
return f"""
<html lang="en" class="" data-lt-installed="true">
<head>
<meta charset="UTF-8">
<title>OpenBB Terminal Report</title>
<meta name="robots" content="noindex">
</head>
<div style="display:flex; margin-bottom:1cm;">
{openbb_img}
<div style="margin-left:2em">
<p><b>Analyst:</b> {author}</p>
<p><b>Date :</b> {report_date}</p>
<p><b>Time :</b> {report_time} {report_tz}</p>
<br/>
<p>{title}</p>
</div>
<button style="margin-left:7em; border:0px solid black;
background-color: transparent;" onclick="saveReport()">
{flask_disk_save}Save changes
</button>
</div>
"""
def add_external_fig(figloc: str, style: str = "") -> str:
"""Add external figure to HTML
Parameters
----------
figloc : str
Relative figure location
style: str
Div style
Returns
-------
str
HTML code for figure
"""
try:
with open(figloc, "rb") as image_file:
img = base64.b64encode(image_file.read()).decode()
return f'<img src="data:image/{figloc.split(".")[1]};base64,{img}"style="{style}">'
except Exception:
return "" | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/reports/widget_helpers.py | 0.865196 | 0.341308 | widget_helpers.py | pypi |
## Notebook setup
```
import os
import io
import warnings
import datetime
import numpy as np
import base64
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib_inline.backend_inline
from IPython.display import HTML
from openbb_terminal.sdk import widgets
from openbb_terminal.sdk import openbb
from openbb_terminal.helper_classes import TerminalStyle
from openbb_terminal.core.config.paths import REPOSITORY_DIRECTORY
%matplotlib inline
matplotlib_inline.backend_inline.set_matplotlib_formats("svg")
warnings.filterwarnings("ignore")
try:
theme = TerminalStyle("light", "light", "light")
except:
pass
stylesheet = widgets.html_report_stylesheet()
```
## Select Ticker
```
# Parameters that will be replaced when calling this notebook
# Do not leave parameters blank as notebook will not run otherwise
symbol = "EURUSD"
report_name = "Forex Report for EURUSD"
ticker = symbol
author = ""
report_title = f"INVESTMENT RESEARCH REPORT ON {ticker.upper()}"
report_date = datetime.datetime.now().strftime("%d %B, %Y")
report_time = datetime.datetime.now().strftime("%H:%M")
report_timezone = datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo
report_title, report_date, report_time, report_timezone
from_symbol = ticker[:3]
to_symbol = ticker[3:]
last_year = datetime.datetime.now() - datetime.timedelta(days=365)
ticker_data = openbb.forex.load(
from_symbol=from_symbol,
to_symbol=to_symbol,
start_date=last_year.strftime("%Y-%m-%d"),
interval="1day",
resolution="d",
)
# 1 year historical data
ticker_data
last_year = datetime.datetime.now() - datetime.timedelta(days=365)
ticker_data_interval_1month = openbb.forex.load(
from_symbol=from_symbol,
to_symbol=to_symbol,
start_date=last_year.strftime("%Y-%m-%d"),
interval="1month",
resolution="d",
)
ticker_data_interval_1month = ticker_data_interval_1month.drop(columns="Volume")
ticker_data_interval_1month
```
## Data
```
# candle
if ticker_data["Volume"].isnull().all() or ticker_data["Volume"].eq(0).all():
fig, ax1 = plt.subplots(nrows=1, ncols=1, figsize=(11, 5), dpi=150)
external_axes = [ax1]
else:
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(11, 5), dpi=150)
external_axes = [ax1, ax2]
openbb.forex.candle(
data=ticker_data,
to_symbol=to_symbol,
from_symbol=from_symbol,
external_axes=external_axes,
)
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
candle = f.getvalue().decode("utf-8")
# candle graph with moving averages 7/14 and 30/60
fig, ax = plt.subplots(figsize=(11, 3), dpi=150)
candle_ma_7_14 = openbb.forex.candle(
data=ticker_data,
to_symbol=to_symbol,
from_symbol=from_symbol,
ma=[7, 14],
external_axes=[ax],
)
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
candle_ma_7_14 = f.getvalue().decode("utf-8")
fig, ax = plt.subplots(figsize=(11, 3), dpi=150)
candle_ma_30_60 = openbb.forex.candle(
data=ticker_data,
to_symbol=to_symbol,
from_symbol=from_symbol,
ma=[30, 60],
external_axes=[ax],
)
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
candle_ma_30_60 = f.getvalue().decode("utf-8")
fwd = openbb.forex.fwd(
to_symbol=to_symbol,
from_symbol=from_symbol,
)
# forward rates
fwd
```
# Technical analysis
```
# simple moving average
ticker_data.index.names = ["date"]
fig, ax = plt.subplots(figsize=(11, 3), dpi=150)
openbb.ta.ma_chart(
data=ticker_data["Close"],
ma_type="SMA",
symbol=ticker,
external_axes=[ax],
)
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
sma = f.getvalue().decode("utf-8")
# exponential moving average
fig, ax = plt.subplots(figsize=(11, 3), dpi=150)
openbb.ta.ma_chart(
data=ticker_data["Close"],
ma_type="EMA",
symbol=ticker,
external_axes=[ax],
)
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
ema = f.getvalue().decode("utf-8")
# zero lag exponential moving average
fig, ax = plt.subplots(figsize=(11, 3), dpi=150)
openbb.ta.ma_chart(
data=ticker_data["Close"],
ma_type="ZLMA",
symbol=ticker,
external_axes=[ax],
)
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
zlma = f.getvalue().decode("utf-8")
# commodity channel index
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(11, 5), dpi=150)
openbb.ta.cci_chart(
data=ticker_data,
symbol=ticker,
external_axes=[ax1, ax2],
)
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
cci = f.getvalue().decode("utf-8")
# moving average convergence/divergence
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(11, 5), dpi=150)
openbb.ta.macd_chart(
data=ticker_data["Adj Close"],
symbol=ticker,
external_axes=[ax1, ax2],
)
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
macd = f.getvalue().decode("utf-8")
# fisher transform
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(11, 11), dpi=150)
ax3 = ax2.twinx()
openbb.ta.fisher_chart(
data=ticker_data,
symbol=ticker,
external_axes=[ax1, ax2, ax3],
)
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
fisher = f.getvalue().decode("utf-8")
# aroon indicator
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, figsize=(11, 11), dpi=150)
openbb.ta.aroon_chart(
data=ticker_data,
symbol=ticker,
external_axes=[ax1, ax2, ax3],
)
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
aroon = f.getvalue().decode("utf-8")
# bollinger bands
fig, ax = plt.subplots(figsize=(11, 3), dpi=150)
openbb.ta.bbands_chart(
data=ticker_data,
symbol=ticker,
external_axes=[ax],
)
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
bbands = f.getvalue().decode("utf-8")
# fibonacci retracement
fig, ax1 = plt.subplots(figsize=(11, 5), dpi=150)
ax2 = ax1.twinx()
openbb.ta.fib_chart(
data=ticker_data,
symbol=ticker,
external_axes=[ax1, ax2],
)
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
fib = f.getvalue().decode("utf-8")
```
# Quantitative Analysis
```
# summary
summary = openbb.qa.summary(data=ticker_data)
summary
# normality statistics and tests
normality = openbb.qa.normality(
data=ticker_data["Close"],
)
normality
# rolling mean and std deviation of prices
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(11, 5), dpi=150)
openbb.qa.rolling_chart(
symbol=ticker,
data=ticker_data,
target="Close",
external_axes=[ax1, ax2],
)
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
rolling = f.getvalue().decode("utf-8")
# rolling kurtosis of distribution of prices
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(11, 5), dpi=150)
openbb.qa.kurtosis_chart(
symbol=ticker,
data=ticker_data,
target="Close",
external_axes=[ax1, ax2],
)
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
kurtosis = f.getvalue().decode("utf-8")
# latest news
news = openbb.news(term=ticker).head(10)
news["published"] = pd.to_datetime(news["published"])
news["published"] = pd.to_datetime(
news["published"], format="%d/%m/%y %H:%M:%S"
).dt.strftime("%Y-%m-%d")
news = news[["published", "title", "link"]]
news["link"] = news["link"].apply(lambda x: f'<a href="{x}">{x}</a>')
news = news.set_index("published")
news.sort_index()
news
```
## Render the report template to a file
```
body = ""
img = (
str(REPOSITORY_DIRECTORY)
+ "/openbb_terminal/reports/templates/OpenBB_reports_logo.png"
)
floppy_disk_img = (
str(REPOSITORY_DIRECTORY) + "/openbb_terminal/reports/templates/floppy-disc.png"
)
body += widgets.header(
img,
floppy_disk_img,
author,
report_date,
report_time,
report_timezone,
f"<b>INVESTMENT RESEARCH REPORT:</b> {ticker}",
)
body += widgets.tablinks(
[
"SUMMARY",
"Overview",
"Technical Analysis",
"Quantitative Analysis",
]
)
comment = "Every analysis displayed considered every day data YTD"
# TODO: add KPIs
htmlcode = ""
# htmlcode += widgets.h(3, "KPIs")
# htmlcode += widgets.kpi(
# [30, 70],
# ["RSI level is oversold", "RSI level is normal", "RSI level is overbought"],
# 999,
# )
# htmlcode += widgets.kpi(
# [0],
# [
# "The sum of last 10 insider trading (in millions) was negative",
# "The sum of last 10 insider trading (in millions) was positive",
# ],
# 999,
# )
# htmlcode += widgets.kpi(
# [-0.1, 0.1],
# [
# "Last FinBrain sentiment is bearish",
# "Last FinBrain sentiment is neutral",
# "Last FinBrain sentiment is bullish",
# ],
# 999,
# )
# Summary tab
htmlcode += widgets.row([widgets.h(3, "Latest news") + news.to_html(escape=False)])
htmlcode += widgets.row(
[
widgets.h(3, "One year historical data for 1st day each month")
+ ticker_data_interval_1month.to_html()
]
)
body += widgets.add_tab("SUMMARY", htmlcode)
# Overview tab
htmlcode = ""
htmlcode = widgets.row([widgets.h(3, "Candlestick chart")])
htmlcode += widgets.row([candle])
htmlcode += widgets.row(
[widgets.h(3, "Candlestick chart with moving averages 7 and 14")]
)
htmlcode += widgets.row([candle_ma_7_14])
htmlcode += widgets.row(
[widgets.h(3, "Candlestick chart with moving averages 30 and 60")]
)
htmlcode += widgets.row([candle_ma_30_60])
htmlcode += widgets.row([widgets.h(3, "Forward rates")])
htmlcode += widgets.row([fwd.to_html()])
body += widgets.add_tab("Overview", htmlcode, True, comment)
# Technical Analysis tab
htmlcode = widgets.row([widgets.h(3, "Simple moving average")])
htmlcode += widgets.row([sma])
htmlcode += widgets.row([widgets.h(3, "Exponential moving average")])
htmlcode += widgets.row([ema])
htmlcode += widgets.row([widgets.h(3, "Zero lag exponential moving average")])
htmlcode += widgets.row([zlma])
htmlcode += widgets.row([widgets.h(3, "Commodity channel index")])
htmlcode += widgets.row([cci])
htmlcode += widgets.row([widgets.h(3, "Moving average convergence/divergence")])
htmlcode += widgets.row([macd])
htmlcode += widgets.row([widgets.h(3, "Fisher transform")])
htmlcode += widgets.row([fisher])
htmlcode += widgets.row([widgets.h(3, "Aroon indicator")])
htmlcode += widgets.row([aroon])
htmlcode += widgets.row([widgets.h(3, "Bollinger bands")])
htmlcode += widgets.row([bbands])
htmlcode += widgets.row([widgets.h(3, "Fibonacci retracement")])
htmlcode += widgets.row([fib])
body += widgets.add_tab("Technical Analysis", htmlcode, True, comment)
# Quantitative Analysis tab
htmlcode = widgets.row([widgets.h(3, "Summary") + summary.to_html()])
htmlcode += widgets.row(
[widgets.h(3, "Normality statistics and tests") + normality.to_html()]
)
htmlcode += widgets.row([widgets.h(3, "Rolling mean and std deviation of prices")])
htmlcode += widgets.row([rolling])
htmlcode += widgets.row([widgets.h(3, "Rolling kurtosis of distribution of prices")])
htmlcode += widgets.row([kurtosis])
body += widgets.add_tab("Quantitative Analysis", htmlcode, True, comment)
body += widgets.tab_clickable_and_save_evt()
report = widgets.html_report(title=report_name, stylesheet=stylesheet, body=body)
# # to save the results
with open(report_name + ".html", "w", encoding="utf-8") as fh:
fh.write(report)
```
| /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/reports/templates/forex.ipynb | 0.512205 | 0.666483 | forex.ipynb | pypi |
## Notebook setup
```
import os
import io
import warnings
import datetime
import numpy as np
import base64
import pytz
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib_inline.backend_inline
from IPython.display import HTML
from openbb_terminal.reports import widget_helpers as widgets
from openbb_terminal.sdk import openbb
from openbb_terminal.helper_funcs import get_user_timezone
from openbb_terminal.helper_classes import TerminalStyle
from openbb_terminal.core.config.paths import REPOSITORY_DIRECTORY
%matplotlib inline
matplotlib_inline.backend_inline.set_matplotlib_formats("svg")
warnings.filterwarnings("ignore")
theme = TerminalStyle(
os.getenv("OPENBB_MPLSTYLE") or "light",
os.getenv("OPENBB_MPFSTYLE") or "light",
os.getenv("OPENBB_RICHSTYLE") or "light",
)
stylesheet = widgets.html_report_stylesheet()
# Parameters that will be replaced when calling this notebook
# Do not leave parameters blank as notebook will not run otherwise
report_name = "economy_report"
author = "OpenBB"
report_title = f"Economy report"
user_time_zone = pytz.timezone(get_user_timezone())
report_date = pd.Timestamp.now(tz=user_time_zone).strftime("%d %B, %Y")
report_time = pd.Timestamp.now(tz=user_time_zone).strftime("%H:%M")
report_timezone = pd.Timestamp.now(tz=user_time_zone).tzinfo
report_title, report_date, report_time, report_timezone
```
## Data
Summary
```
overview = openbb.economy.overview()
overview.set_index(list(overview.columns[0]), inplace=True)
futures = openbb.economy.futures()
futures.set_index(list(futures.columns[0]), inplace=True)
news = openbb.news("economy")
news.columns = news.columns.str.title()
news.set_index("Published", inplace=True)
rtps_data = openbb.economy.rtps()
fig = plt.figure(figsize=(11, 4), constrained_layout=True)
spec = fig.add_gridspec(1, 2)
ax1 = fig.add_subplot(spec[0, 1])
openbb.economy.rtps_chart(external_axes=[ax1])
ax1.set_title("")
ax1.set_ylabel("")
ax2 = fig.add_subplot(spec[0, 0])
chg = futures["%Chg"]
colors = [theme.up_color if x > 0 else theme.down_color for x in list(chg.values)]
ax2 = chg.plot(kind="barh", color=colors)
theme.style_primary_axis(ax2)
ax2.set_title("")
ax2.tick_params(axis="x", labelrotation=90)
ax2.xaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter("%.2f"))
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
chart_0 = f.getvalue().decode("utf-8")
```
Events
```
events_1 = openbb.economy.events(
countries="United States",
start_date=datetime.datetime.now().strftime("%Y-%m-%d"),
)
if not events_1.empty:
events_1 = events_1.set_index("Date")
events_2 = openbb.economy.events(
countries=[
"Germany",
"Spain",
"Italy",
"United Kingdom",
"France",
"Portugal",
"Greece",
],
start_date=datetime.datetime.now().strftime("%Y-%m-%d"),
)
if not events_2.empty:
events_2 = events_2.set_index("Date")
events_3 = openbb.economy.events(
countries="China",
start_date=datetime.datetime.now().strftime("%Y-%m-%d"),
)
if not events_3.empty:
events_3 = events_3.set_index("Date")
```
Yields
```
ycrv = openbb.economy.ycrv(return_date=True)
if (isinstance(ycrv, pd.DataFrame) and not ycrv.empty) or (
not isinstance(ycrv, pd.DataFrame) and ycrv
):
ycrv_data_1, _ = ycrv
fig, (ax1) = plt.subplots(
nrows=1,
figsize=(16 / 9 * 5, 5),
dpi=150,
)
ycrv_country_1 = "United states"
openbb.economy.ycrv_chart(external_axes=[ax1])
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
ycrv_chart_1 = f.getvalue().decode("utf-8")
if not ycrv_data_1.empty:
ycrv_data_1.drop(["Maturity"], axis=1, inplace=True)
ycrv_data_1.index = [
"1M",
"3M",
"6M",
"1Y",
"2Y",
"3Y",
"5Y",
"7Y",
"10Y",
"20Y",
"30Y",
]
else:
ycrv = None
openbb.economy.treasury_maturities()
fig = plt.figure(figsize=(11, 7), constrained_layout=True)
spec = fig.add_gridspec(2, 2)
ax1 = fig.add_subplot(spec[0, 0])
openbb.economy.treasury_chart(
instruments=["nominal"],
maturities=["2y", "10y"],
frequency="daily",
start_date="2020-01-01",
external_axes=[ax1],
)
ax1.set_title("UST 2y-10y", loc="left")
ax2 = fig.add_subplot(spec[0, 1])
openbb.economy.treasury_chart(
instruments=["nominal"],
maturities=["10y", "30y"],
frequency="daily",
start_date="2020-01-01",
external_axes=[ax2],
)
ax2.set_title("UST 10y-30y", loc="left")
ax3 = fig.add_subplot(spec[1, :])
openbb.economy.treasury_chart(
instruments=["nominal", "inflation"],
maturities=["10y"],
frequency="daily",
start_date="2020-01-01",
external_axes=[ax3],
)
ax3.set_title("Nominal vs TIPS", loc="left")
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
ust = f.getvalue().decode("utf-8")
```
Equity Market
```
valuation = openbb.economy.valuation()
performance = openbb.economy.performance()
fig, ax = plt.subplots(figsize=(11, 5), dpi=150)
data = openbb.economy.index(
indices=["sp500", "dow_djus", "nasdaq100", "russell3000", "cboe_vix"],
start_date="2000-03-09",
)
data = data.pct_change().dropna()
data = (1 + data.shift(periods=1, fill_value=0)).cumprod() - 1
ax.plot(data * 100)
ax.set_title("")
ax.legend(data.columns)
ax.set_ylabel("Cumulative returns (%)")
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
eqty_0 = f.getvalue().decode("utf-8")
fig, ax = plt.subplots(figsize=(11, 5), dpi=150)
data = openbb.economy.index(
indices=["ftse100", "de_dax40", "fr_cac40", "it_mib40", "es_ibex35"],
start_date="2000-01-01",
)
data = data.pct_change().dropna()
data = (1 + data.shift(periods=1, fill_value=0)).cumprod() - 1
ax.plot(data * 100)
ax.set_title("")
ax.legend(data.columns)
ax.set_ylabel("Cumulative returns (%)")
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
eqty_1 = f.getvalue().decode("utf-8")
fig, ax = plt.subplots(figsize=(11, 5), dpi=150)
data = openbb.economy.index(
indices=["jp_n225", "cn_csi300", "hk_china", "au_asx50"], start_date="2014-01-01"
)
data = data.pct_change().dropna()
data = (1 + data.shift(periods=1, fill_value=0)).cumprod() - 1
ax.plot(data * 100)
ax.set_title("")
ax.legend(data.columns)
ax.set_ylabel("Cumulative returns (%)")
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
eqty_2 = f.getvalue().decode("utf-8")
fig, ax = plt.subplots(figsize=(11, 5), dpi=150)
data = openbb.economy.index(
indices=["in_nse50", "br_bvsp", "za_jo", "mx_ipc", "ru_moex"],
start_date="2013-01-01",
)
data = data.pct_change().dropna()
data = (1 + data.shift(periods=1, fill_value=0)).cumprod() - 1
ax.plot(data * 100)
ax.set_title("")
ax.legend(data.columns)
ax.set_title("")
ax.set_ylabel("Cumulative returns (%)")
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
eqty_3 = f.getvalue().decode("utf-8")
fig, ax = plt.subplots(figsize=(11, 5), dpi=150)
data = openbb.economy.index(indices=["arca_xoi", "arca_pse"], start_date="2000-01-01")
data = data.pct_change().dropna()
data = (1 + data.shift(periods=1, fill_value=0)).cumprod() - 1
ax.plot(data * 100)
ax.set_title("")
ax.legend(data.columns)
ax.set_title("")
ax.set_ylabel("Cumulative returns (%)")
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
eqty_4 = f.getvalue().decode("utf-8")
bigmac_res = openbb.economy.bigmac()
if (isinstance(bigmac_res, pd.DataFrame) and not bigmac_res.empty) or (
not isinstance(bigmac_res, pd.DataFrame) and bigmac_res
):
fig, ax = plt.subplots(figsize=(11, 5), dpi=150)
openbb.economy.bigmac_chart(
country_codes=[
"USA",
"CAN",
"ARG",
"PRT",
"NLD",
"FRA",
"UAE",
"LKA",
"VNM",
"RUS",
"SWE",
"GBR",
],
external_axes=[ax],
)
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
bigmac = f.getvalue().decode("utf-8")
else:
bigmac_res = pd.DataFrame()
fig, ax = plt.subplots(figsize=(11, 5), dpi=150)
openbb.economy.macro_chart(
parameters=["CONF", "IP"], start_date="2010-01-01", external_axes=[ax]
)
ax.set_title("")
ax1 = ax.twinx()
df, df_dict, _ = openbb.economy.macro(parameters=["CPI"], start_date="2010-01-01")
cpi_chg = df.pct_change(1) * 100
ax1.plot(cpi_chg, linestyle="--", color="red")
ax1.legend(
["CPI (% change)"],
bbox_to_anchor=(0.005, 0.3, 1, -0.52),
loc="upper left",
borderaxespad=0,
prop={"size": 9},
)
ax1.set_title("")
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
macro_0 = f.getvalue().decode("utf-8")
fred = openbb.economy.fred(series_ids=["T10Y3M"], start_date="1980-01-01")
if (isinstance(fred, pd.DataFrame) and not fred.empty) or (
not isinstance(fred, pd.DataFrame) and fred
):
df, df_dict = fred
fig, ax = plt.subplots(figsize=(11, 5), dpi=150)
ax.plot(df.index, df["T10Y3M"], linestyle="--", color="red")
ax.set_ylim([-2, 6])
ax.legend(
[df_dict["T10Y3M"]["title"]],
bbox_to_anchor=(0, 0.40, 1, -0.52),
loc="upper right",
borderaxespad=0,
prop={"size": 9},
)
ax1 = ax.twinx()
openbb.economy.fred_chart(
series_ids=["A191RP1A027NBEA"],
start_date="1980-01-01",
external_axes=[ax1],
)
ax1.set_ylim([-3, 15])
ax1.set_title("")
fig.tight_layout()
f = io.BytesIO()
fig.savefig(f, format="svg")
fred_0 = f.getvalue().decode("utf-8")
else:
fred = None
```
## Render the report template to a file
```
body = ""
img = (
str(REPOSITORY_DIRECTORY)
+ "/openbb_terminal/reports/templates/OpenBB_reports_logo.png"
)
floppy_disk_img = (
str(REPOSITORY_DIRECTORY) + "/openbb_terminal/reports/templates/floppy-disc.png"
)
body += widgets.header(
img,
floppy_disk_img,
author,
report_date,
report_time,
report_timezone,
f"<b>ECONOMY REPORT: Leverage multiple sources to create custom reports.</b>",
)
body += widgets.tablinks(
[
"SUMMARY",
"Events",
"Yields",
"Equity Market",
"Macro Data",
]
)
# Summary
htmlcode = widgets.row(
[widgets.h(3, "Global markets (source: WSJ)") + overview.to_html()]
)
htmlcode += widgets.row(
[widgets.h(3, "Commodities (source: WSJ)") + futures.to_html()]
+ [
widgets.h(3, "Real-time sector performance (source: Alphavantage)")
+ rtps_data.to_html().replace("table", 'table style="display:inline"')
]
)
htmlcode += widgets.row([chart_0])
htmlcode += widgets.row(
[
widgets.h(3, "Top news on 'economy' keyword (source: Newsapi)")
+ news[:6].to_html()
]
)
body += widgets.add_tab("SUMMARY", htmlcode)
# Events
htmlcode = widgets.row([widgets.h(3, "Economic calendars (source: investing.com)")])
htmlcode += widgets.row([widgets.h(3, "United States") + events_1.to_html()])
htmlcode += widgets.row(
[widgets.h(3, "Select European Countries") + events_2.to_html()]
)
htmlcode += widgets.row([widgets.h(3, "China") + events_3.to_html()])
body += widgets.add_tab("Events", htmlcode)
# Yields
htmlcode = widgets.row([widgets.h(3, "US treasuries (source: EconDB)") + ust])
if ycrv:
htmlcode += widgets.row(
[widgets.h(3, f"{ycrv_country_1} yield curve (source: FRED)") + ycrv_chart_1]
)
htmlcode += widgets.row([widgets.h(3, "") + ycrv_data_1.to_html()])
body += widgets.add_tab("Yields", htmlcode)
# Equity market
htmlcode = widgets.row(
[widgets.h(3, "Valuation (source: Finviz)") + valuation.to_html()]
)
htmlcode += widgets.row(
[widgets.h(3, "Performance (source: Finviz)") + valuation.to_html()]
)
htmlcode += widgets.row([widgets.h(3, "US markets (source: YahooFinance)") + eqty_0])
htmlcode += widgets.row(
[widgets.h(3, "European markets (source: YahooFinance)") + eqty_1]
)
htmlcode += widgets.row([widgets.h(3, "APAC markets (source: YahooFinance)") + eqty_2])
htmlcode += widgets.row(
[widgets.h(3, "Emerging markets (source: YahooFinance)") + eqty_3]
)
htmlcode += widgets.row([widgets.h(3, "ARCA (source: YahooFinance)") + eqty_4])
body += widgets.add_tab("Equity Market", htmlcode)
# Macro
htmlcode = ""
if not bigmac_res.empty:
htmlcode += widgets.row([widgets.h(3, "Bigmac index (source: Nasdaq)") + bigmac])
htmlcode += widgets.row([widgets.h(3, "Leading indicators (source: EconDB)") + macro_0])
if fred:
htmlcode += widgets.row(
[widgets.h(3, "Recession indicator (source: FRED)") + fred_0]
)
body += widgets.add_tab("Macro Data", htmlcode)
body += widgets.tab_clickable_and_save_evt()
report = widgets.html_report(title=report_name, stylesheet=stylesheet, body=body)
# to save the results
with open(report_name + ".html", "w", encoding="utf-8") as fh:
fh.write(report)
```
| /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/reports/templates/economy.ipynb | 0.453504 | 0.624923 | economy.ipynb | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
from matplotlib import pyplot as plt
from matplotlib import ticker
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.alternative.oss import github_model
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_long_number_format_with_type_check,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_star_history(
repo: str, export: str = "", external_axes: Optional[List[plt.Axes]] = None
) -> None:
"""Plots repo summary [Source: https://api.github.com].
Parameters
----------
repo : str
Repository to display star history. Format: org/repo, e.g., openbb-finance/openbbterminal
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = github_model.get_stars_history(repo)
if not df.empty:
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(df["Date"], df["Stars"])
ax.set_xlabel("Date")
ax.set_ylabel("Stars")
ax.set_title(f"Star History for {repo}")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "sh", df)
@log_start_end(log=logger)
def display_top_repos(
sortby: str,
categories: str = "",
limit: int = 10,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots repo summary [Source: https://api.github.com].
Parameters
----------
sortby : str
Sort repos by {stars, forks}
categories : str
Check for repo categories. If more than one separate with a comma: e.g., finance,investment. Default: None
limit : int
Number of repos to look at
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = github_model.get_top_repos(categories=categories, sortby=sortby, limit=limit)
if not df.empty:
if external_axes is None:
_, ax = plt.subplots(figsize=(14, 8), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
for _, row in df.iterrows():
ax.barh(
y=row["full_name"],
width=row["stargazers_count" if sortby == "stars" else "forks_count"],
height=0.5,
)
ax.set_xlabel(sortby.capitalize())
ax.get_xaxis().set_major_formatter(
ticker.FuncFormatter(
lambda x, _: lambda_long_number_format_with_type_check(x)
)
)
ax.yaxis.set_label_position("left")
ax.yaxis.set_ticks_position("left")
ax.set_ylabel("Repository Full Name")
category_substr = "ies" if "," in categories else "y"
category_str = f"categor{category_substr} {categories} " if categories else ""
ax.set_title(f"Top repos {category_str}sorted by {sortby}")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "tr", df)
@log_start_end(log=logger)
def display_repo_summary(repo: str, export: str = "") -> None:
"""Prints table showing repo summary [Source: https://api.github.com].
Parameters
----------
repo : str
Repository to display summary. Format: org/repo, e.g., openbb-finance/openbbterminal
export : str
Export dataframe data to csv,json,xlsx file
"""
data = github_model.get_repo_summary(repo)
if not data.empty:
print_rich_table(
data, headers=list(data.columns), show_index=False, title="Repo summary"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"rs",
data,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/alternative/oss/github_view.py | 0.855806 | 0.342517 | github_view.py | pypi |
__docformat__ = "numpy"
import argparse
import logging
from typing import List
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal import feature_flags as obbff
from openbb_terminal.alternative.oss import github_view
from openbb_terminal.alternative.oss import runa_view, runa_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_BOTH_RAW_DATA_AND_FIGURES,
EXPORT_ONLY_RAW_DATA_ALLOWED,
valid_repo,
parse_and_split_input,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.rich_config import console, MenuText
logger = logging.getLogger(__name__)
class OSSController(BaseController):
"""Open Source Controller class"""
CHOICES_COMMANDS = ["sh", "tr", "rs", "rossidx"]
PATH = "/alternative/oss/"
CHOICES_GENERATION = True
def __init__(self, queue: List[str] = None):
"""Constructor"""
super().__init__(queue)
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
self.completer = NestedCompleter.from_nested_dict(choices)
def parse_input(self, an_input: str) -> List:
"""Parse controller input
Overrides the parent class function to handle github org/repo path convention.
See `BaseController.parse_input()` for details.
"""
# Covering the github "org/repo" convention in rs and sh commands
custom_filters = [
r"(sh .*?(\/[a-zA-Z0-9_\-\/]).*?((?=\/)|(?= )|(?=$)))",
r"(rs .*?(\/[a-zA-Z0-9_\-\/]).*?((?=\/)|(?= )|(?=$)))",
]
commands = parse_and_split_input(
an_input=an_input, custom_filters=custom_filters
)
return commands
def print_help(self):
"""Print help"""
mt = MenuText("alternative/oss/", 80)
mt.add_cmd("rossidx")
mt.add_cmd("rs")
mt.add_cmd("sh")
mt.add_cmd("tr")
console.print(text=mt.menu_text, menu="Alternative - Open Source")
@log_start_end(log=logger)
def call_sh(self, other_args: List[str]):
"""Process sh command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="sh",
description="Display a repo star history [Source: https://api.github.com]",
)
parser.add_argument(
"-r",
"--repo",
type=str,
required="-h" not in other_args,
dest="repo",
help="Repository to search for star history. Format: org/repo, e.g., openbb-finance/openbbterminal",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-r")
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
export_allowed=EXPORT_BOTH_RAW_DATA_AND_FIGURES,
raw=True,
)
if ns_parser:
if valid_repo(ns_parser.repo):
github_view.display_star_history(
repo=ns_parser.repo, export=ns_parser.export
)
@log_start_end(log=logger)
def call_rs(self, other_args: List[str]):
"""Process rs command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="rs",
description="Display a repo summary [Source: https://api.github.com]",
)
parser.add_argument(
"-r",
"--repo",
type=str,
required="-h" not in other_args,
dest="repo",
help="Repository to search for repo summary. Format: org/repo, e.g., openbb-finance/openbbterminal",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-r")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED, raw=True
)
if ns_parser:
if valid_repo(ns_parser.repo):
github_view.display_repo_summary(
repo=ns_parser.repo, export=ns_parser.export
)
@log_start_end(log=logger)
def call_rossidx(self, other_args: List[str]):
"""Process rossidx command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="rossidx",
description="""
Display list of startups from ross index [Source: https://runacap.com/]
Use --chart to display chart and -t {stars,forks} to set chart type
""",
)
parser.add_argument(
"-s",
"--sortby",
type=str,
dest="sortby",
nargs="+",
help="Sort startups by column",
default="Stars AGR [%]",
choices=runa_model.SORT_COLUMNS,
metavar="SORTBY",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
parser.add_argument(
"-c",
"--chart",
action="store_true",
help="Flag to show chart",
dest="show_chart",
default=False,
)
parser.add_argument(
"-g",
"--growth",
action="store_true",
help="Flag to show growth chart",
dest="show_growth",
default=False,
)
parser.add_argument(
"-t",
"--chart-type",
type=str,
dest="chart_type",
help="Chart type: {stars, forks}",
default="stars",
choices=["stars", "forks"],
)
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED,
limit=10,
)
if ns_parser:
runa_view.display_rossindex(
sortby=" ".join(ns_parser.sortby),
ascend=ns_parser.reverse,
limit=ns_parser.limit,
show_chart=ns_parser.show_chart,
show_growth=ns_parser.show_growth,
chart_type=ns_parser.chart_type,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_tr(self, other_args: List[str]):
"""Process tr command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="tr",
description="Display top repositories [Source: https://api.github.com]",
)
parser.add_argument(
"-s",
"--sortby",
type=str,
dest="sortby",
help="Sort repos by {stars, forks}. Default: stars",
default="stars",
choices=["stars", "forks"],
)
parser.add_argument(
"-c",
"--categories",
type=str,
dest="categories",
help="Filter by repo categories. If more than one separate with a comma: e.g., finance,investment",
default="",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-c")
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
export_allowed=EXPORT_BOTH_RAW_DATA_AND_FIGURES,
raw=True,
limit=10,
)
if ns_parser:
github_view.display_top_repos(
sortby=ns_parser.sortby,
categories=ns_parser.categories,
limit=ns_parser.limit,
export=ns_parser.export,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/alternative/oss/oss_controller.py | 0.675444 | 0.154312 | oss_controller.py | pypi |
import logging
import os
from typing import Optional, List
from matplotlib import pyplot as plt
from matplotlib import ticker
from openbb_terminal.alternative.oss import runa_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
lambda_long_number_format,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_rossindex(
limit: int = 10,
sortby: str = "Stars AGR [%]",
ascend: bool = False,
show_chart: bool = False,
show_growth: bool = True,
chart_type: str = "stars",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots list of startups from ross index [Source: https://runacap.com/]
Parameters
----------
limit: int
Number of startups to search
sortby: str
Key by which to sort data. Default: Stars AGR [%]
ascend: bool
Flag to sort data descending
show_chart: bool
Flag to show chart with startups
show_growth: bool
Flag to show growth line chart
chart_type: str
Chart type {stars,forks}
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = runa_model.get_startups()
if df.empty:
console.print("\nError in runa request\n")
else:
if sortby in runa_model.SORT_COLUMNS:
df = df.sort_values(by=sortby, ascending=ascend)
df = df.head(limit)
if show_chart:
if external_axes is None:
_, ax1 = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 2):
(ax1, _) = external_axes
else:
return
for _, row in df[::-1].iterrows():
ax1.barh(
y=row["GitHub"],
width=row["Forks" if chart_type == "forks" else "Stars"],
)
ax1.set_xlabel("Forks" if chart_type == "forks" else "Stars")
ax1.get_xaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
ax1.grid(axis="y")
ax1.yaxis.set_label_position("left")
ax1.yaxis.set_ticks_position("left")
ax1.set_ylabel("Company")
ax1.yaxis.set_tick_params(labelsize=8)
title = "Total Forks" if chart_type == "forks" else "Total Stars"
ax1.set_title(f"ROSS Index - {title}")
if external_axes is None:
theme.visualize_output()
if show_growth:
if external_axes is None:
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 2):
(ax, _) = external_axes
else:
return
for _, row in df[::-1].iterrows():
ax.barh(
y=row["GitHub"],
width=row["FG" if chart_type == "forks" else "SG"],
)
ax.yaxis.set_label_position("left")
ax.yaxis.set_ticks_position("left")
ax.set_ylabel("Company")
ax.set_xlabel("Annual Growth [times]")
ax.yaxis.set_tick_params(labelsize=8)
title = (
"Forks Annual Growth"
if chart_type == "forks"
else "Stars Annual Growth"
)
ax.set_title(f"ROSS Index - {title}")
fig.tight_layout()
if external_axes is None:
theme.visualize_output()
show_df = df.drop(["SG", "FG"], axis=1)
show_df = show_df.fillna("")
show_df["GitHub"] = show_df["GitHub"].str.wrap(10)
print_rich_table(
show_df.head(limit),
headers=list(show_df.columns),
floatfmt=".1f",
show_index=False,
title="ROSS Index - the fastest-growing open-source startups",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"runaidx",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/alternative/oss/runa_view.py | 0.764012 | 0.375277 | runa_view.py | pypi |
import logging
from typing import Union
import pandas as pd
import requests
from bs4 import BeautifulSoup
from requests.adapters import HTTPAdapter, RetryError
from urllib3.util.retry import Retry
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
SORT_COLUMNS = [
"GitHub",
"Company",
"Country",
"City",
"Founded",
"Raised [$M]",
"Stars",
"Forks",
"Stars AGR [%]",
"Forks AGR [%]",
]
@log_start_end(log=logger)
def _retry_session(
url: str, retries: int = 3, backoff_factor: float = 1.0
) -> requests.Session:
"""Helper methods that retries to make request.
Parameters
----------
url: str
Url to mount a session
retries: int
How many retries
backoff_factor: float
Backoff schema - time periods between retry
Returns
-------
requests.Session
Mounted session
"""
session = requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
status_forcelist=[500, 502, 503, 504],
backoff_factor=backoff_factor,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount(url, adapter)
return session
@log_start_end(log=logger)
def _make_request(url: str) -> Union[BeautifulSoup, None]:
"""Helper method to scrap.
Parameters
----------
url : str
url to scrape
Returns
-------
Union[BeautifulSoup, None]
BeautifulSoup object or None
"""
headers = {"User-Agent": get_user_agent()}
session = _retry_session("https://runacap.com/")
try:
req = session.get(url, headers=headers, timeout=5)
except Exception as error:
logger.exception(str(error))
console.print(error)
raise RetryError(
"Connection error. Couldn't connect to CoinGecko and scrape the data. "
"Please visit CoinGecko site, and check if it's not under maintenance"
) from error
if req.status_code == 404:
return None
if req.status_code >= 400:
raise Exception(
f"Couldn't connect to {url}. Status code: {req.status_code}. Reason: {req.reason}"
)
return BeautifulSoup(req.text, features="lxml")
@log_start_end(log=logger)
def get_startups() -> pd.DataFrame:
"""Get startups from ROSS index [Source: https://runacap.com/].
Returns
-------
pd.DataFrame
list of startups
"""
response = requests.get("https://runacap.com/ross-index/", timeout=10)
soup = BeautifulSoup(response.content, "html.parser")
startups = []
if soup:
table = soup.find("table", {"id": "table_1"})
if table:
rows = table.find_all("tr")
for row in rows:
td = row.find_all("td")
startup = [tr.text for tr in td]
if len(startup) > 0:
startups.append(startup)
df = pd.DataFrame(
startups,
columns=[
"GitHub",
"Company",
"Country",
"City",
"Founded",
"Raised [$M]",
"REPO",
"Language",
"Stars",
"Forks",
"Stars AGR [%]",
"Forks AGR [%]",
"SG",
"FG",
],
)
df.drop("REPO", inplace=True, axis=1)
df["SG"] = pd.to_numeric(df["SG"], errors="coerce")
df["Stars"] = pd.to_numeric(
df["Stars"].str.replace(",", ""), errors="coerce"
)
df["FG"] = pd.to_numeric(df["FG"], errors="coerce")
df["Forks"] = pd.to_numeric(
df["Forks"].str.replace(",", ""), errors="coerce"
)
df["Raised [$M]"] = pd.to_numeric(df["Raised [$M]"], errors="coerce")
df["Stars AGR [%]"] = pd.to_numeric(
df["Stars AGR [%]"].str.replace(",", ""), errors="coerce"
)
df["Forks AGR [%]"] = pd.to_numeric(
df["Forks AGR [%]"].str.replace(",", ""), errors="coerce"
)
return df
return pd.DataFrame() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/alternative/oss/runa_model.py | 0.614047 | 0.205236 | runa_model.py | pypi |
__docformat__ = "numpy"
# pylint: disable=C0201,W1401
import logging
from typing import Any, Dict, Optional
import math
from datetime import datetime
import requests
import pandas as pd
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.helper_funcs import get_user_agent
logger = logging.getLogger(__name__)
@check_api_key(["API_GITHUB_KEY"])
def get_github_data(url: str, **kwargs) -> Optional[Dict[str, Any]]:
"""Get repository stats.
Parameters
----------
url: str
github api endpoint
params: dict
params to pass to api endpoint
Returns
-------
Dict[str, Any]
Dictionary with data
"""
res = requests.get(
url,
headers={
"Authorization": f"token {cfg.API_GITHUB_KEY}",
"User-Agent": get_user_agent(),
"Accept": "application/vnd.github.v3.star+json",
},
**kwargs,
)
if res.status_code == 200:
return res.json()
if res.status_code in (401, 403):
console.print("[red]Rate limit reached, please provide a GitHub API key.[/red]")
elif res.status_code == 404:
console.print("[red]Repo not found.[/red]")
else:
console.print(f"[red]Error occurred {res.json()}[/red]")
return None
def search_repos(
sortby: str = "stars", page: int = 1, categories: str = ""
) -> pd.DataFrame:
"""Get repos sorted by stars or forks. Can be filtered by categories.
Parameters
----------
sortby : str
Sort repos by {stars, forks}
categories : str
Check for repo categories. If more than one separate with a comma: e.g., finance,investment. Default: None
page : int
Page number to get repos
Returns
-------
pd.DataFrame
Dataframe with repos
"""
params: Dict[str, Any] = {"page": page}
if categories:
params["sort"] = sortby
params["q"] = categories.replace(",", "+")
else:
params["q"] = f"{sortby}:>1"
data = get_github_data("https://api.github.com/search/repositories", params=params)
if data and "items" in data:
return pd.DataFrame(data["items"])
return pd.DataFrame()
@log_start_end(log=logger)
def get_stars_history(repo: str) -> pd.DataFrame:
"""Get repository star history.
Parameters
----------
repo : str
Repo to search for Format: org/repo, e.g., openbb-finance/openbbterminal
Returns
-------
pd.DataFrame
Dataframe with star history - Columns: Date, Stars
"""
data = get_github_data(f"https://api.github.com/repos/{repo}")
if data and "stargazers_count" in data:
stars_number = data["stargazers_count"]
stars: Dict[str, int] = {}
pages = math.ceil(stars_number / 100)
for page in range(0, pages):
data = get_github_data(
f"https://api.github.com/repos/{repo}/stargazers",
params={"per_page": 100, "page": page},
)
if data:
for star in data:
day = star["starred_at"].split("T")[0]
if day in stars:
stars[day] += 1
else:
stars[day] = 1
sorted_keys = sorted(stars.keys())
for i in range(1, len(sorted_keys)):
stars[sorted_keys[i]] += stars[sorted_keys[i - 1]]
df = pd.DataFrame(
{
"Date": [
datetime.strptime(date, "%Y-%m-%d").date() for date in stars.keys()
],
"Stars": stars.values(),
}
)
df.set_index("Date")
return df
return pd.DataFrame()
@log_start_end(log=logger)
def get_top_repos(sortby: str, limit: int = 50, categories: str = "") -> pd.DataFrame:
"""Get repos sorted by stars or forks. Can be filtered by categories.
Parameters
----------
sortby : str
Sort repos by {stars, forks}
categories : str
Check for repo categories. If more than one separate with a comma: e.g., finance,investment. Default: None
limit : int
Number of repos to search for
Returns
-------
pd.DataFrame
Dataframe with repos
"""
initial_top = limit
df = pd.DataFrame(
columns=[
"full_name",
"open_issues",
"stargazers_count",
"forks_count",
"language",
"created_at",
"updated_at",
"html_url",
]
)
if limit <= 100:
df2 = search_repos(sortby=sortby, page=1, categories=categories)
df = pd.concat([df, df2], ignore_index=True)
else:
p = 2
while limit > 0:
df2 = search_repos(sortby=sortby, page=p, categories=categories)
df = pd.concat([df, df2], ignore_index=True)
limit -= 100
p += 1
return df.head(initial_top)
@log_start_end(log=logger)
def get_repo_summary(repo: str) -> pd.DataFrame:
"""Get repository summary.
Parameters
----------
repo : str
Repo to search for Format: org/repo, e.g., openbb-finance/openbbterminal
Returns
-------
pd.DataFrame
Dataframe with repo summary - Columns: Metric, Value
"""
data = get_github_data(f"https://api.github.com/repos/{repo}")
if not data:
return pd.DataFrame()
release_data = get_github_data(f"https://api.github.com/repos/{repo}/releases")
if not release_data:
return pd.DataFrame()
total_release_downloads: Any = "N/A"
if len(release_data) > 0:
total_release_downloads = 0
for asset in release_data[0]["assets"]:
total_release_downloads += asset["download_count"]
obj: Dict[str, Any] = {
"Metric": [
"Name",
"Owner",
"Creation Date",
"Last Update",
"Topics",
"Stars",
"Forks",
"Open Issues",
"Language",
"License",
"Releases",
"Last Release Downloads",
],
"Value": [
data["name"],
data["owner"]["login"],
data["created_at"].split("T")[0],
data["updated_at"].split("T")[0],
", ".join(data["topics"]),
data["stargazers_count"],
data["forks"],
data["open_issues"],
data["language"],
data["license"]["name"],
len(release_data),
total_release_downloads,
],
}
return pd.DataFrame(obj) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/alternative/oss/github_model.py | 0.754915 | 0.194597 | github_model.py | pypi |
__docformat__ = "numpy"
import logging
import warnings
import numpy as np
import pandas as pd
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
global_cases_time_series = (
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_"
"covid_19_time_series/time_series_covid19_confirmed_global.csv"
)
global_deaths_time_series = (
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_"
"covid_19_time_series/time_series_covid19_deaths_global.csv"
)
@log_start_end(log=logger)
def get_global_cases(country: str) -> pd.DataFrame:
"""Get historical cases for given country.
Parameters
----------
country: str
Country to search for
Returns
-------
pd.DataFrame
Dataframe of historical cases
"""
cases = pd.read_csv(global_cases_time_series)
cases = cases.rename(columns={"Country/Region": "Country"})
cases = (
cases.drop(columns=["Province/State", "Lat", "Long"])
.groupby("Country")
.agg("sum")
.T
)
cases.index = pd.to_datetime(cases.index)
if country not in cases:
console.print("[red]The selection `{country}` is not a valid option.[/red]\n")
return pd.DataFrame()
cases = pd.DataFrame(cases[country]).diff().dropna()
if cases.shape[1] > 1:
return pd.DataFrame(cases.sum(axis=1))
return cases
@log_start_end(log=logger)
def get_global_deaths(country: str) -> pd.DataFrame:
"""Get historical deaths for given country.
Parameters
----------
country: str
Country to search for
Returns
-------
pd.DataFrame
Dataframe of historical deaths
"""
deaths = pd.read_csv(global_deaths_time_series)
deaths = deaths.rename(columns={"Country/Region": "Country"})
deaths = (
deaths.drop(columns=["Province/State", "Lat", "Long"])
.groupby("Country")
.agg("sum")
.T
)
deaths.index = pd.to_datetime(deaths.index)
if country not in deaths:
console.print("[red]The selection `{country}` is not a valid option.[/red]\n")
return pd.DataFrame()
deaths = pd.DataFrame(deaths[country]).diff().dropna()
if deaths.shape[1] > 1:
return pd.DataFrame(deaths.sum(axis=1))
return deaths
@log_start_end(log=logger)
def get_covid_ov(
country: str,
limit: int = 100,
) -> pd.DataFrame:
"""Get historical cases and deaths by country.
Parameters
----------
country: str
Country to get data for
limit: int
Number of raw data to show
Returns
-------
pd.DataFrame
Dataframe of historical cases and deaths
"""
if country.lower() == "us":
country = "US"
cases = get_global_cases(country)
deaths = get_global_deaths(country)
if cases.empty or deaths.empty:
return pd.DataFrame()
data = pd.concat([cases, deaths], axis=1)
data.columns = ["Cases", "Deaths"]
data.index = [x.strftime("%Y-%m-%d") for x in data.index]
return data.tail(limit)
@log_start_end(log=logger)
def get_covid_stat(
country: str,
stat: str = "cases",
limit: int = 10,
) -> pd.DataFrame:
"""Show historical cases and deaths by country.
Parameters
----------
country: str
Country to get data for
stat: str
Statistic to get. Either "cases", "deaths" or "rates"
limit: int
Number of raw data to show
Returns
-------
pd.DataFrame
Dataframe of data for given country and statistic
"""
if stat == "cases":
data = get_global_cases(country)
elif stat == "deaths":
data = get_global_deaths(country)
elif stat == "rates":
cases = get_global_cases(country)
deaths = get_global_deaths(country)
data = (deaths / cases).fillna(0) * 100
else:
console.print("Invalid stat selected.\n")
return pd.DataFrame()
data.index = [x.strftime("%Y-%m-%d") for x in data.index]
return data.tail(limit)
@log_start_end(log=logger)
def get_case_slopes(
days_back: int = 30,
limit: int = 50,
threshold: int = 10000,
ascend: bool = False,
) -> pd.DataFrame:
"""Load cases and find slope over period.
Parameters
----------
days_back: int
Number of historical days to consider
limit: int
Number of rows to show
threshold: int
Threshold for total number of cases
ascend: bool
Flag to sort in ascending order
Returns
-------
pd.DataFrame
Dataframe containing slopes
"""
# Ignore the pandas warning for setting a slace with a value
warnings.filterwarnings("ignore")
data = pd.read_csv(global_cases_time_series)
data = data.rename(columns={"Country/Region": "Country"})
data = (
(
data.drop(columns=["Province/State", "Lat", "Long"])
.groupby("Country")
.agg("sum")
)
.diff()
.dropna()
)
hist = data.iloc[:, -days_back:]
hist["Sum"] = hist.sum(axis=1)
hist = hist[hist.Sum > threshold].drop(columns="Sum")
hist["Slope"] = hist.apply(
lambda x: np.polyfit(np.arange(days_back), x, 1)[0], axis=1
)
hist_slope = pd.DataFrame(hist["Slope"])
if ascend:
hist_slope.sort_values(by="Slope", ascending=ascend, inplace=True)
else:
hist_slope.sort_values(by="Slope", ascending=ascend, inplace=True)
return hist_slope.head(limit) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/alternative/covid/covid_model.py | 0.765418 | 0.385086 | covid_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
import matplotlib.pyplot as plt
import pandas as pd
from openbb_terminal.config_terminal import theme
from openbb_terminal.alternative.covid import covid_model
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def plot_covid_ov(
country: str,
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots historical cases and deaths by country.
Parameters
----------
country: str
Country to plot
external_axis: Optional[List[plt.Axes]]
List of external axes to include in plot
"""
cases = covid_model.get_global_cases(country) / 1_000
deaths = covid_model.get_global_deaths(country)
if cases.empty or deaths.empty:
return
ov = pd.concat([cases, deaths], axis=1)
ov.columns = ["Cases", "Deaths"]
# This plot has 2 axes
if external_axes is None:
_, ax1 = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax2 = ax1.twinx()
elif is_valid_axes_count(external_axes, 2):
ax1, ax2 = external_axes
else:
return
ax1.plot(cases.index, cases, color=theme.up_color, alpha=0.2)
ax1.plot(cases.index, cases.rolling(7).mean(), color=theme.up_color)
ax1.set_ylabel("Cases [1k]")
theme.style_primary_axis(ax1)
ax1.yaxis.set_label_position("left")
ax2.plot(deaths.index, deaths, color=theme.down_color, alpha=0.2)
ax2.plot(deaths.index, deaths.rolling(7).mean(), color=theme.down_color)
ax2.set_title(f"Overview for {country.upper()}")
ax2.set_xlabel("Date")
ax2.set_ylabel("Deaths")
theme.style_twin_axis(ax2)
ax2.yaxis.set_label_position("right")
ax1.set_xlim(ov.index[0], ov.index[-1])
legend = ax2.legend(ov.columns)
legend.legendHandles[1].set_color(theme.down_color)
legend.legendHandles[0].set_color(theme.up_color)
if external_axes is None:
theme.visualize_output()
def plot_covid_stat(
country: str,
stat: str = "cases",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots historical stat by country.
Parameters
----------
country: str
Country to plot
external_axis: Optional[List[plt.Axes]]
List of external axes to include in plot
"""
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
if stat == "cases":
data = covid_model.get_global_cases(country) / 1_000
ax.set_ylabel(stat.title() + " [1k]")
color = theme.up_color
elif stat == "deaths":
data = covid_model.get_global_deaths(country)
ax.set_ylabel(stat.title())
color = theme.down_color
elif stat == "rates":
cases = covid_model.get_global_cases(country)
deaths = covid_model.get_global_deaths(country)
data = (deaths / cases).fillna(0) * 100
ax.set_ylabel(stat.title() + " (Deaths/Cases)")
color = theme.get_colors(reverse=True)[0]
else:
console.print("Invalid stat selected.\n")
return
ax.plot(data.index, data, color=color, alpha=0.2)
ax.plot(data.index, data.rolling(7).mean(), color=color)
ax.set_title(f"{country} COVID {stat}")
ax.set_xlim(data.index[0], data.index[-1])
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
@log_start_end(log=logger)
def display_covid_ov(
country: str,
raw: bool = False,
limit: int = 10,
export: str = "",
plot: bool = True,
) -> None:
"""Prints table showing historical cases and deaths by country.
Parameters
----------
country: str
Country to get data for
raw: bool
Flag to display raw data
limit: int
Number of raw data to show
export: str
Format to export data
plot: bool
Flag to display historical plot
"""
if country.lower() == "us":
country = "US"
if plot:
plot_covid_ov(country)
if raw:
data = covid_model.get_covid_ov(country, limit)
print_rich_table(
data,
headers=[x.title() for x in data.columns],
show_index=True,
index_name="Date",
title=f"[bold]{country} COVID Numbers[/bold]",
)
if export:
export_data(export, os.path.dirname(os.path.abspath(__file__)), "ov", data)
@log_start_end(log=logger)
def display_covid_stat(
country: str,
stat: str = "cases",
raw: bool = False,
limit: int = 10,
export: str = "",
plot: bool = True,
) -> None:
"""Prints table showing historical cases and deaths by country.
Parameters
----------
country: str
Country to get data for
stat: str
Statistic to get. Either "cases", "deaths" or "rates"
raw: bool
Flag to display raw data
limit: int
Number of raw data to show
export: str
Format to export data
plot : bool
Flag to plot data
"""
data = covid_model.get_covid_stat(country, stat, limit)
if plot:
plot_covid_stat(country, stat)
if raw:
print_rich_table(
data,
headers=[stat.title()],
show_index=True,
index_name="Date",
title=f"[bold]{country} COVID {stat}[/bold]",
)
if export:
data["date"] = data.index
data = data.reset_index(drop=True)
# make sure date is first column in export
cols = data.columns.tolist()
cols = cols[-1:] + cols[:-1]
data = data[cols]
export_data(export, os.path.dirname(os.path.abspath(__file__)), stat, data)
@log_start_end(log=logger)
def display_case_slopes(
days_back: int = 30,
limit: int = 10,
threshold: int = 10000,
ascend: bool = False,
export: str = "",
) -> None:
"""Prints table showing countries with the highest case slopes.
Parameters
----------
days_back: int
Number of historical days to get slope for
limit: int
Number to show in table
ascend: bool
Flag to sort in ascending order
threshold: int
Threshold for total cases over period
export : str
Format to export data
"""
data = covid_model.get_case_slopes(days_back, limit, threshold, ascend)
print_rich_table(
data,
show_index=True,
index_name="Country",
title=f"[bold]{('Highest','Lowest')[ascend]} Sloping Cases[/bold] (Cases/Day)",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
f"slopes_{days_back}day",
data,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/alternative/covid/covid_view.py | 0.847558 | 0.380068 | covid_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List
from openbb_terminal.decorators import log_start_end
from openbb_terminal.etf import stockanalysis_model
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def view_overview(symbol: str, export: str = ""):
"""Print etf overview information
Parameters
----------
symbol:str
ETF symbols to display overview for
export:str
Format to export data
"""
if symbol.upper() not in stockanalysis_model.get_all_names_symbols()[0]:
console.print(f"{symbol.upper()} not found in ETFs\n")
return
data = stockanalysis_model.get_etf_overview(symbol)
print_rich_table(
data,
headers=list(data.columns),
title="ETF Overview Information",
show_index=True,
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "overview", data)
@log_start_end(log=logger)
def view_holdings(symbol: str, limit: int = 10, export: str = ""):
"""
Parameters
----------
symbol: str
ETF symbol to show holdings for
limit: int
Number of holdings to show
export: str
Format to export data
"""
data = stockanalysis_model.get_etf_holdings(symbol)
print_rich_table(
data[:limit],
headers=list(data.columns),
title="ETF Holdings",
show_index=True,
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "holdings", data)
@log_start_end(log=logger)
def view_comparisons(symbols: List[str], export: str = ""):
"""Show ETF comparisons
Parameters
----------
symbols: List[str]
List of ETF symbols
export: str
Format to export data
"""
etf_list = stockanalysis_model.get_all_names_symbols()[0]
for etf in symbols:
if etf not in etf_list:
console.print(f"{etf} not a known symbol.\n")
symbols.remove(etf)
data = stockanalysis_model.compare_etfs(symbols)
if data.empty:
console.print("No data found for given ETFs\n")
return
print_rich_table(
data, headers=list(data.columns), title="ETF Comparisons", show_index=True
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "overview", data)
@log_start_end(log=logger)
def display_etf_by_name(name: str, limit: int = 10, export: str = ""):
"""Display ETFs matching search string. [Source: StockAnalysis]
Parameters
----------
name: str
String being matched
limit: int
Limit of ETFs to display
export: str
Export to given file type
"""
matching_etfs = stockanalysis_model.get_etfs_by_name(name)
print_rich_table(
matching_etfs.head(limit),
show_index=False,
title="ETF Search Result",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ln_sa",
matching_etfs,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/etf/stockanalysis_view.py | 0.776962 | 0.247908 | stockanalysis_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
import pandas as pd
from openbb_terminal.decorators import log_start_end
from openbb_terminal.etf import financedatabase_model
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_etf_by_name(
name: str,
limit: int = 10,
export: str = "",
):
"""Display a selection of ETFs based on name filtered by total assets. [Source: Finance Database]
Parameters
----------
name: str
Search by name to find ETFs matching the criteria.
limit: int
Limit of ETFs to display
export: str
Type of format to export data
"""
data = financedatabase_model.get_etfs_by_name(name)
if not data:
console.print("No data was found with that name\n")
return
table_data = pd.DataFrame(data).T[
["long_name", "family", "category", "total_assets"]
]
table_data_sorted = table_data.sort_values(by="total_assets", ascending=False)
table_data_sorted["total_assets"] = table_data_sorted["total_assets"] / 1e6
print_rich_table(
table_data_sorted.iloc[:limit],
show_index=True,
headers=["Name", "Family", "Category", "Total Assets [M]"],
title="ETFs by Total Assets",
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "ln_fd", table_data_sorted
)
@log_start_end(log=logger)
def display_etf_by_description(
description: str,
limit: int = 10,
export: str = "",
):
"""Display a selection of ETFs based on description filtered by total assets.
[Source: Finance Database]
Parameters
----------
description: str
Search by description to find ETFs matching the criteria.
limit: int
Limit of ETFs to display
export: str
Type of format to export data
"""
data = financedatabase_model.get_etfs_by_description(description)
if not data:
console.print("No data was found with that description\n")
return
table_data = pd.DataFrame(data).T[
["long_name", "family", "category", "total_assets"]
]
table_data_sorted = table_data.sort_values(by="total_assets", ascending=False)
table_data_sorted["total_assets"] = table_data_sorted["total_assets"] / 1e6
print_rich_table(
table_data_sorted.iloc[:limit],
show_index=True,
headers=["Name", "Family", "Category", "Total Assets [M]"],
title="ETFs by Total Assets",
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "ld", data)
@log_start_end(log=logger)
def display_etf_by_category(
category: str,
limit: int = 10,
export: str = "",
):
"""Display a selection of ETFs based on a category filtered by total assets.
[Source: Finance Database]
Parameters
----------
description: str
Search by description to find ETFs matching the criteria.
limit: int
Limit of ETFs to display
export: str
Type of format to export data
"""
data = financedatabase_model.get_etfs_by_category(category)
if not data:
console.print("No data was found on that category\n")
return
table_data = pd.DataFrame(data).T[
["long_name", "family", "category", "total_assets"]
]
table_data_sorted = table_data.sort_values(by="total_assets", ascending=False)
table_data_sorted["total_assets"] = table_data_sorted["total_assets"] / 1e6
print_rich_table(
table_data_sorted.iloc[:limit],
show_index=True,
headers=["Name", "Family", "Category", "Total Assets [M]"],
title="ETFs by Category and Total Assets",
)
export_data(
export,
os.path.join(os.path.dirname(os.path.abspath(__file__)), "screener"),
"sbc",
data,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/etf/financedatabase_view.py | 0.619932 | 0.319944 | financedatabase_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import List, Tuple
import pathlib
import pandas as pd
import requests
from bs4 import BeautifulSoup
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
logger = logging.getLogger(__name__)
csv_path = pathlib.Path(__file__).parent / "etfs.csv"
@log_start_end(log=logger)
def get_all_names_symbols() -> Tuple[List[str], List[str]]:
"""Gets all etf names and symbols
Returns
-------
Tuple[List[str], List[str]]
List of all available etf symbols, List of all available etf names
"""
etf_symbols = []
etf_names = []
# 11/25 I am hard coding the etf lists because of stockanalysis changing the format of their website
data = pd.read_csv(csv_path)
etf_symbols = data.s.to_list()
etf_names = data.n.to_list()
return etf_symbols, etf_names
@log_start_end(log=logger)
def get_etf_overview(symbol: str) -> pd.DataFrame:
"""Get overview data for selected etf
Parameters
----------
etf_symbol : str
Etf symbol to get overview for
Returns
-------
df : pd.DataFrame
Dataframe of stock overview data
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.etf.overview("SPY")
"""
r = requests.get(
f"https://stockanalysis.com/etf/{symbol}",
headers={"User-Agent": get_user_agent()},
)
soup = BeautifulSoup(r.text, "html.parser")
tables = soup.findAll("table")
texts = []
for tab in tables[:2]:
entries = tab.findAll("td")
for ent in entries:
texts.append(ent.get_text())
var_cols = [0, 2, 4, 6, 8, 10, 12, 18, 20, 22, 26, 28, 30, 32]
vals = [idx + 1 for idx in var_cols]
columns = [texts[idx] for idx in var_cols]
data = [texts[idx] for idx in vals]
df = pd.DataFrame(data, index=columns, columns=[symbol.upper()])
return df
@log_start_end(log=logger)
def get_etf_holdings(symbol: str) -> pd.DataFrame:
"""Get ETF holdings
Parameters
----------
symbol: str
Symbol to get holdings for
Returns
-------
df: pd.DataFrame
Dataframe of holdings
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.etf.holdings("SPY")
"""
link = f"https://stockanalysis.com/etf/{symbol}/holdings/"
r = requests.get(link, headers={"User-Agent": get_user_agent()})
try:
df = pd.read_html(r.content)[0]
df["Symbol"] = df["Symbol"].fillna("N/A")
df = df.set_index("Symbol")
df = df[["Name", "% Weight", "Shares"]]
df = df.rename(columns={"% Weight": "% Of Etf"})
except ValueError:
df = pd.DataFrame()
return df
@log_start_end(log=logger)
def compare_etfs(symbols: List[str]) -> pd.DataFrame:
"""Compare selected ETFs
Parameters
----------
symbols : List[str]
ETF symbols to compare
Returns
-------
df_compare : pd.DataFrame
Dataframe of etf comparisons
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> compare_etfs = openbb.etf.compare(["SPY", "QQQ", "IWM"])
"""
df_compare = pd.DataFrame()
for symbol in symbols:
df_compare = pd.concat([df_compare, get_etf_overview(symbol)], axis=1)
return df_compare
@log_start_end(log=logger)
def get_etfs_by_name(name_to_search: str) -> pd.DataFrame:
"""Get an ETF symbol and name based on ETF string to search. [Source: StockAnalysis]
Parameters
----------
name_to_search: str
ETF name to match
Returns
-------
df: pd.Dataframe
Dataframe with symbols and names
"""
all_symbols, all_names = get_all_names_symbols()
filtered_symbols = list()
filtered_names = list()
for symbol, name in zip(all_symbols, all_names):
if name_to_search.lower() in name.lower():
filtered_symbols.append(symbol)
filtered_names.append(name)
df = pd.DataFrame(
list(zip(filtered_symbols, filtered_names)), columns=["Symbol", "Name"]
)
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/etf/stockanalysis_model.py | 0.844762 | 0.266867 | stockanalysis_model.py | pypi |
__docformat__ = "numpy"
from typing import Optional, List
import logging
import os
import pandas as pd
from matplotlib import pyplot as plt
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.etf import yfinance_model
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_etf_weightings(
name: str,
raw: bool = False,
min_pct_to_display: float = 5,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display sector weightings allocation of ETF. [Source: Yahoo Finance]
Parameters
----------
name: str
ETF name
raw: bool
Display sector weighting allocation
min_pct_to_display: float
Minimum percentage to display sector
export: str
Type of format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
sectors = yfinance_model.get_etf_sector_weightings(name)
if not sectors:
console.print("No data was found for that ETF\n")
return
holdings = pd.DataFrame(sectors, index=[0]).T
title = f"Sector holdings of {name}"
if raw:
console.print(f"\n{title}")
holdings.columns = ["% of holdings in the sector"]
print_rich_table(
holdings,
headers=list(holdings.columns),
show_index=True,
title="Sector Weightings Allocation",
)
else:
main_holdings = holdings[holdings.values > min_pct_to_display].to_dict()[
holdings.columns[0]
]
if len(main_holdings) < len(holdings):
main_holdings["Others"] = 100 - sum(main_holdings.values())
legend, values = zip(*main_holdings.items())
leg = [f"{le}\n{round(v,2)}%" for le, v in zip(legend, values)]
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.pie(
values,
labels=leg,
wedgeprops=theme.pie_wedgeprops,
colors=theme.get_colors(),
startangle=theme.pie_startangle,
)
ax.set_title(title)
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "weights", holdings)
@log_start_end(log=logger)
def display_etf_description(name: str):
"""Display ETF description summary. [Source: Yahoo Finance]
Parameters
----------
name: str
ETF name
"""
description = yfinance_model.get_etf_summary_description(name)
if not description:
console.print("No data was found for that ETF\n")
return
console.print(description, "\n") | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/etf/yfinance_view.py | 0.829871 | 0.346818 | yfinance_view.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
import requests
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def etf_movers(sort_type: str = "gainers", export: bool = False) -> pd.DataFrame:
"""
Scrape data for top etf movers.
Parameters
----------
sort_type: str
Data to get. Can be "gainers", "decliners" or "active"
Returns
-------
etfmovers: pd.DataFrame
Datafame containing the name, price, change and the volume of the etf
"""
if sort_type.lower() == "gainers":
url = (
"https://www.wsj.com/market-data/mutualfunds-etfs/etfmovers?id=%7B%22application"
"%22%3A%22WSJ%22%2C%22etfMover%22%3A%22leaders%22%2C%22count%22%3A25%7D&type="
"mdc_etfmovers"
)
elif sort_type.lower() == "decliners":
url = (
"https://www.wsj.com/market-data/mutualfunds-etfs/etfmovers?id=%7B%22application"
"%22%3A%22WSJ%22%2C%22etfMover%22%3A%22laggards%22%2C%22count%22%3A25%7D&type="
"mdc_etfmovers"
)
elif sort_type.lower() == "active":
url = (
"https://www.wsj.com/market-data/mutualfunds-etfs/etfmovers?id=%7B%22application"
"%22%3A%22WSJ%22%2C%22etfMover%22%3A%22most_active%22%2C%22count%22%3A25%7D&type="
"mdc_etfmovers"
)
else:
url = ""
if url:
data = requests.get(url, headers={"User-Agent": get_user_agent()}).json()
symbol, name, last_price, net_change, percent_change, volume = (
[],
[],
[],
[],
[],
[],
)
for entry in data["data"]["instruments"]:
symbol.append(entry["ticker"])
name.append(entry["name"])
last_price.append(entry["lastPrice"])
net_change.append(entry["priceChange"])
percent_change.append(entry["percentChange"])
volume.append(entry["formattedVolume"])
if export:
etfmovers = pd.DataFrame(
{
" ": symbol,
"Name": name,
"Price": last_price,
"Chg": net_change,
"%Chg": percent_change,
"Vol": volume,
}
)
else:
etfmovers = pd.DataFrame(
{
" ": name,
"Price": last_price,
"Chg": net_change,
"%Chg": percent_change,
"Vol": volume,
}
)
return etfmovers
return pd.DataFrame() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/etf/discovery/wsj_model.py | 0.593491 | 0.200303 | wsj_model.py | pypi |
__docformat__ = "numpy"
# pylint:disable=too-many-lines
# pylint:disable=R0904,C0201
import argparse
import logging
from datetime import datetime
from typing import List
import numpy as np
import pandas as pd
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal import feature_flags as obbff
from openbb_terminal.common.technical_analysis import (
custom_indicators_view,
momentum_view,
overlap_model,
overlap_view,
trend_indicators_view,
volatility_model,
volatility_view,
volume_view,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_BOTH_RAW_DATA_AND_FIGURES,
check_non_negative,
check_positive,
check_positive_list,
valid_date,
EXPORT_ONLY_FIGURES_ALLOWED,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.rich_config import console, MenuText
logger = logging.getLogger(__name__)
class TechnicalAnalysisController(BaseController):
"""Technical Analysis Controller class"""
CHOICES_COMMANDS = [
"ema",
"sma",
"wma",
"hma",
"vwap",
"zlma",
"cci",
"macd",
"rsi",
"stoch",
"fisher",
"cg",
"adx",
"aroon",
"bbands",
"donchian",
"kc",
"ad",
"adosc",
"obv",
"fib",
"clenow",
"demark",
"atr",
]
PATH = "/etf/ta/"
CHOICES_GENERATION = True
def __init__(
self,
ticker: str,
start: datetime,
data: pd.DataFrame,
queue: List[str] = None,
):
"""Constructor"""
super().__init__(queue)
self.ticker = ticker
self.start = start
self.data = data
self.interval = "1440min"
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
mt = MenuText("etf/ta/", 90)
mt.add_param("_ticker", self.ticker)
mt.add_raw("\n")
mt.add_info("_overlap_")
mt.add_cmd("ema")
mt.add_cmd("sma")
mt.add_cmd("wma")
mt.add_cmd("hma")
mt.add_cmd("zlma")
mt.add_cmd("vwap")
mt.add_info("_momentum_")
mt.add_cmd("cci")
mt.add_cmd("macd")
mt.add_cmd("rsi")
mt.add_cmd("stoch")
mt.add_cmd("fisher")
mt.add_cmd("cg")
mt.add_cmd("clenow")
mt.add_cmd("demark")
mt.add_info("_trend_")
mt.add_cmd("adx")
mt.add_cmd("aroon")
mt.add_info("_volatility_")
mt.add_cmd("bbands")
mt.add_cmd("donchian")
mt.add_cmd("kc")
mt.add_cmd("atr")
mt.add_info("_volume_")
mt.add_cmd("ad")
mt.add_cmd("adosc")
mt.add_cmd("obv")
mt.add_info("_custom_")
mt.add_cmd("fib")
console.print(text=mt.menu_text, menu="ETF - Technical Analysis")
def custom_reset(self):
"""Class specific component of reset command"""
if self.ticker:
return ["etf", f"load {self.ticker}", "ta"]
return []
@log_start_end(log=logger)
def call_ema(self, other_args: List[str]):
"""Process ema command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ema",
description="""
The Exponential Moving Average is a staple of technical
analysis and is used in countless technical indicators. In a Simple Moving
Average, each value in the time period carries equal weight, and values outside
of the time period are not included in the average. However, the Exponential
Moving Average is a cumulative calculation, including all data. Past values have
a diminishing contribution to the average, while more recent values have a greater
contribution. This method allows the moving average to be more responsive to changes
in the data.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive_list,
default=overlap_model.WINDOW_LENGTHS,
help="Window lengths. Multiple values indicated as comma separated values.",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=check_non_negative,
default=0,
help="offset",
choices=range(0, 100),
metavar="N_OFFSET",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
overlap_view.view_ma(
ma_type="EMA",
symbol=self.ticker,
data=self.data["Adj Close"],
window=ns_parser.n_length,
offset=ns_parser.n_offset,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_sma(self, other_args: List[str]):
"""Process sma command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="sma",
description="""
Moving Averages are used to smooth the data in an array to
help eliminate noise and identify trends. The Simple Moving Average is literally
the simplest form of a moving average. Each output value is the average of the
previous n values. In a Simple Moving Average, each value in the time period carries
equal weight, and values outside of the time period are not included in the average.
This makes it less responsive to recent changes in the data, which can be useful for
filtering out those changes.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive_list,
default=overlap_model.WINDOW_LENGTHS,
help="Window lengths. Multiple values indicated as comma separated values. ",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=check_non_negative,
default=0,
help="offset",
choices=range(0, 100),
metavar="N_OFFSET",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
overlap_view.view_ma(
ma_type="SMA",
symbol=self.ticker,
data=self.data["Adj Close"],
window=ns_parser.n_length,
offset=ns_parser.n_offset,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_wma(self, other_args: List[str]):
"""Process wma command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="wma",
description="""
A Weighted Moving Average puts more weight on recent data and less on past data.
This is done by multiplying each bar’s price by a weighting factor. Because of its
unique calculation, WMA will follow prices more closely than a corresponding Simple
Moving Average.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive_list,
default=overlap_model.WINDOW_LENGTHS,
help="Window lengths. Multiple values indicated as comma separated values. ",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=check_non_negative,
default=0,
help="offset",
choices=range(0, 100),
metavar="N_OFFSET",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
overlap_view.view_ma(
ma_type="WMA",
symbol=self.ticker,
data=self.data["Adj Close"],
window=ns_parser.n_length,
offset=ns_parser.n_offset,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_hma(self, other_args: List[str]):
"""Process hma command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="hma",
description="""
The Hull Moving Average solves the age old dilemma of making a moving average
more responsive to current price activity whilst maintaining curve smoothness.
In fact the HMA almost eliminates lag altogether and manages to improve smoothing
at the same time.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive_list,
default=overlap_model.WINDOW_LENGTHS2,
help="Window lengths. Multiple values indicated as comma separated values. ",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=check_non_negative,
default=0,
help="offset",
choices=range(0, 100),
metavar="N_OFFSET",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
overlap_view.view_ma(
ma_type="HMA",
symbol=self.ticker,
data=self.data["Adj Close"],
window=ns_parser.n_length,
offset=ns_parser.n_offset,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_zlma(self, other_args: List[str]):
"""Process zlma command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="zlma",
description="""
The zero lag exponential moving average (ZLEMA) indicator
was created by John Ehlers and Ric Way. The idea is do a
regular exponential moving average (EMA) calculation but
on a de-lagged data instead of doing it on the regular data.
Data is de-lagged by removing the data from "lag" days ago
thus removing (or attempting to) the cumulative effect of
the moving average.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive_list,
default=[20],
help="Window lengths. Multiple values indicated as comma separated values.",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=check_non_negative,
default=0,
help="offset",
choices=range(0, 100),
metavar="N_OFFSET",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
overlap_view.view_ma(
ma_type="ZLMA",
symbol=self.ticker,
data=self.data["Adj Close"],
window=ns_parser.n_length,
offset=ns_parser.n_offset,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_vwap(self, other_args: List[str]):
"""Process vwap command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="vwap",
description="""
The Volume Weighted Average Price that measures the average typical price
by volume. It is typically used with intraday charts to identify general direction.
""",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=check_non_negative,
default=0,
help="offset",
choices=range(0, 100),
metavar="N_OFFSET",
)
parser.add_argument(
"--start",
dest="start",
type=valid_date,
help="Starting date to select",
required="--end" in other_args,
)
parser.add_argument(
"--end",
dest="end",
type=valid_date,
help="Ending date to select",
required="--start" in other_args,
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
# Daily
if self.interval == "1440min":
if not ns_parser.start:
console.print(
"If no date conditions, VWAP should be used with intraday data. \n"
)
return
interval_text = "Daily"
else:
interval_text = self.interval
overlap_view.view_vwap(
data=self.data,
symbol=self.ticker,
start_date=ns_parser.start,
end_date=ns_parser.end,
offset=ns_parser.n_offset,
interval=interval_text,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_cci(self, other_args: List[str]):
"""Process cci command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="cci",
description="""
The CCI is designed to detect beginning and ending market trends.
The range of 100 to -100 is the normal trading range. CCI values outside of this
range indicate overbought or oversold conditions. You can also look for price
divergence in the CCI. If the price is making new highs, and the CCI is not,
then a price correction is likely.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=14,
help="length",
choices=range(1, 100),
metavar="N_LENGTH",
)
parser.add_argument(
"-s",
"--scalar",
action="store",
dest="n_scalar",
type=check_positive,
default=0.015,
help="scalar",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_cci(
symbol=self.ticker,
data=self.data,
window=ns_parser.n_length,
scalar=ns_parser.n_scalar,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_macd(self, other_args: List[str]):
"""Process macd command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="macd",
description="""
The Moving Average Convergence Divergence (MACD) is the difference
between two Exponential Moving Averages. The Signal line is an Exponential Moving
Average of the MACD. \n \n The MACD signals trend changes and indicates the start
of new trend direction. High values indicate overbought conditions, low values
indicate oversold conditions. Divergence with the price indicates an end to the
current trend, especially if the MACD is at extreme high or low values. When the MACD
line crosses above the signal line a buy signal is generated. When the MACD crosses
below the signal line a sell signal is generated. To confirm the signal, the MACD
should be above zero for a buy, and below zero for a sell.
""",
)
parser.add_argument(
"--fast",
action="store",
dest="n_fast",
type=check_positive,
default=12,
help="The short period.",
choices=range(1, 100),
metavar="N_FAST",
)
parser.add_argument(
"--slow",
action="store",
dest="n_slow",
type=check_positive,
default=26,
help="The long period.",
choices=range(1, 100),
metavar="N_SLOW",
)
parser.add_argument(
"--signal",
action="store",
dest="n_signal",
type=check_positive,
default=9,
help="The signal period.",
choices=range(1, 100),
metavar="N_SIGNAL",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_macd(
symbol=self.ticker,
data=self.data["Adj Close"],
n_fast=ns_parser.n_fast,
n_slow=ns_parser.n_slow,
n_signal=ns_parser.n_signal,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_rsi(self, other_args: List[str]):
"""Process rsi command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="rsi",
description="""
The Relative Strength Index (RSI) calculates a ratio of the
recent upward price movements to the absolute price movement. The RSI ranges
from 0 to 100. The RSI is interpreted as an overbought/oversold indicator when
the value is over 70/below 30. You can also look for divergence with price. If
the price is making new highs/lows, and the RSI is not, it indicates a reversal.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=14,
help="length",
choices=range(1, 100),
metavar="N_LENGTH",
)
parser.add_argument(
"-s",
"--scalar",
action="store",
dest="n_scalar",
type=check_positive,
default=100,
help="scalar",
)
parser.add_argument(
"-d",
"--drift",
action="store",
dest="n_drift",
type=check_positive,
default=1,
help="drift",
choices=range(1, 100),
metavar="N_DRIFT",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_rsi(
symbol=self.ticker,
data=self.data["Adj Close"],
window=ns_parser.n_length,
scalar=ns_parser.n_scalar,
drift=ns_parser.n_drift,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_stoch(self, other_args: List[str]):
"""Process stoch command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="stoch",
description="""
The Stochastic Oscillator measures where the close is in relation
to the recent trading range. The values range from zero to 100. %D values over 75
indicate an overbought condition; values under 25 indicate an oversold condition.
When the Fast %D crosses above the Slow %D, it is a buy signal; when it crosses
below, it is a sell signal. The Raw %K is generally considered too erratic to use
for crossover signals.
""",
)
parser.add_argument(
"-k",
"--fastkperiod",
action="store",
dest="n_fastkperiod",
type=check_positive,
default=14,
help="The time period of the fastk moving average",
choices=range(1, 100),
metavar="N_FASTKPERIOD",
)
parser.add_argument(
"-d",
"--slowdperiod",
action="store",
dest="n_slowdperiod",
type=check_positive,
default=3,
help="The time period of the slowd moving average",
choices=range(1, 100),
metavar="N_SLOWDPERIOD",
)
parser.add_argument(
"--slowkperiod",
action="store",
dest="n_slowkperiod",
type=check_positive,
default=3,
help="The time period of the slowk moving average",
choices=range(1, 100),
metavar="N_SLOWKPERIOD",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_stoch(
symbol=self.ticker,
data=self.data,
fastkperiod=ns_parser.n_fastkperiod,
slowdperiod=ns_parser.n_slowdperiod,
slowkperiod=ns_parser.n_slowkperiod,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_fisher(self, other_args: List[str]):
"""Process fisher command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="fisher",
description="""
The Fisher Transform is a technical indicator created by John F. Ehlers
that converts prices into a Gaussian normal distribution.1 The indicator
highlights when prices have moved to an extreme, based on recent prices.
This may help in spotting turning points in the price of an asset. It also
helps show the trend and isolate the price waves within a trend.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=14,
help="length",
choices=range(1, 100),
metavar="N_LENGTH",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_fisher(
symbol=self.ticker,
data=self.data,
window=ns_parser.n_length,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_cg(self, other_args: List[str]):
"""Process cg command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="cg",
description="""
The Center of Gravity indicator, in short, is used to anticipate future price movements
and to trade on price reversals as soon as they happen. However, just like other oscillators,
the COG indicator returns the best results in range-bound markets and should be avoided when
the price is trending. Traders who use it will be able to closely speculate the upcoming
price change of the asset.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=14,
help="length",
choices=range(1, 100),
metavar="N_LENGTH",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_cg(
symbol=self.ticker,
data=self.data["Adj Close"],
window=ns_parser.n_length,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_adx(self, other_args: List[str]):
"""Process adx command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="adx",
description="""
The ADX is a Welles Wilder style moving average of the Directional Movement Index (DX).
The values range from 0 to 100, but rarely get above 60. To interpret the ADX, consider
a high number to be a strong trend, and a low number, a weak trend.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=14,
help="length",
choices=range(1, 100),
metavar="N_LENGTH",
)
parser.add_argument(
"-s",
"--scalar",
action="store",
dest="n_scalar",
type=check_positive,
default=100,
help="scalar",
)
parser.add_argument(
"-d",
"--drift",
action="store",
dest="n_drift",
type=check_positive,
default=1,
help="drift",
choices=range(1, 100),
metavar="N_DRIFT",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
trend_indicators_view.display_adx(
symbol=self.ticker,
data=self.data,
window=ns_parser.n_length,
scalar=ns_parser.n_scalar,
drift=ns_parser.n_drift,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_aroon(self, other_args: List[str]):
"""Process aroon command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="aroon",
description="""
The word aroon is Sanskrit for "dawn's early light." The Aroon
indicator attempts to show when a new trend is dawning. The indicator consists
of two lines (Up and Down) that measure how long it has been since the highest
high/lowest low has occurred within an n period range. \n \n When the Aroon Up is
staying between 70 and 100 then it indicates an upward trend. When the Aroon Down
is staying between 70 and 100 then it indicates an downward trend. A strong upward
trend is indicated when the Aroon Up is above 70 while the Aroon Down is below 30.
Likewise, a strong downward trend is indicated when the Aroon Down is above 70 while
the Aroon Up is below 30. Also look for crossovers. When the Aroon Down crosses above
the Aroon Up, it indicates a weakening of the upward trend (and vice versa).
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=25,
help="length",
choices=range(1, 100),
metavar="N_LENGTH",
)
parser.add_argument(
"-s",
"--scalar",
action="store",
dest="n_scalar",
type=check_positive,
default=100,
help="scalar",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
trend_indicators_view.display_aroon(
symbol=self.ticker,
data=self.data,
window=ns_parser.n_length,
scalar=ns_parser.n_scalar,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_bbands(self, other_args: List[str]):
"""Process bbands command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="bbands",
description="""
Bollinger Bands consist of three lines. The middle band is a simple
moving average (generally 20 periods) of the typical price (TP). The upper and lower
bands are F standard deviations (generally 2) above and below the middle band.
The bands widen and narrow when the volatility of the price is higher or lower,
respectively. \n \nBollinger Bands do not, in themselves, generate buy or sell signals;
they are an indicator of overbought or oversold conditions. When the price is near the
upper or lower band it indicates that a reversal may be imminent. The middle band
becomes a support or resistance level. The upper and lower bands can also be
interpreted as price targets. When the price bounces off of the lower band and crosses
the middle band, then the upper band becomes the price target.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=15,
help="length",
choices=range(1, 100),
metavar="N_LENGTH",
)
parser.add_argument(
"-s",
"--std",
action="store",
dest="n_std",
type=check_positive,
default=2,
help="std",
choices=np.arange(0.0, 10, 0.25).tolist(),
metavar="N_STD",
)
parser.add_argument(
"-m",
"--mamode",
action="store",
dest="s_mamode",
default="sma",
help="mamode",
choices=volatility_model.MAMODES,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volatility_view.display_bbands(
symbol=self.ticker,
data=self.data,
window=ns_parser.n_length,
n_std=ns_parser.n_std,
mamode=ns_parser.s_mamode,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_donchian(self, other_args: List[str]):
"""Process donchian command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="donchian",
description="""
Donchian Channels are three lines generated by moving average
calculations that comprise an indicator formed by upper and lower
bands around a midrange or median band. The upper band marks the
highest price of a security over N periods while the lower band
marks the lowest price of a security over N periods. The area
between the upper and lower bands represents the Donchian Channel.
""",
)
parser.add_argument(
"-u",
"--length_upper",
action="store",
dest="n_length_upper",
type=check_positive,
default=20,
help="length",
choices=range(1, 100),
metavar="N_LENGTH_UPPER",
)
parser.add_argument(
"-l",
"--length_lower",
action="store",
dest="n_length_lower",
type=check_positive,
default=20,
help="length",
choices=range(1, 100),
metavar="N_LENGTH_LOWER",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volatility_view.display_donchian(
symbol=self.ticker,
data=self.data,
upper_length=ns_parser.n_length_upper,
lower_length=ns_parser.n_length_lower,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_kc(self, other_args: List[str]):
"""Process kc command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="kc",
description="""
Keltner Channels are volatility-based bands that are placed
on either side of an asset's price and can aid in determining
the direction of a trend.The Keltner channel uses the average
true range (ATR) or volatility, with breaks above or below the top
and bottom barriers signaling a continuation.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=20,
help="Window length",
choices=range(1, 100),
metavar="N_LENGTH",
)
parser.add_argument(
"-s",
"--scalar",
action="store",
dest="n_scalar",
type=check_positive,
default=2,
help="scalar",
)
parser.add_argument(
"-m",
"--mamode",
action="store",
dest="s_mamode",
default="ema",
choices=volatility_model.MAMODES,
help="mamode",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=check_non_negative,
default=0,
help="offset",
choices=range(0, 100),
metavar="N_OFFSET",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volatility_view.view_kc(
symbol=self.ticker,
data=self.data,
window=ns_parser.n_length,
scalar=ns_parser.n_scalar,
mamode=ns_parser.s_mamode,
offset=ns_parser.n_offset,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_ad(self, other_args: List[str]):
"""Process ad command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ad",
description="""
The Accumulation/Distribution Line is similar to the On Balance
Volume (OBV), which sums the volume times +1/-1 based on whether the close is
higher than the previous close. The Accumulation/Distribution indicator, however
multiplies the volume by the close location value (CLV). The CLV is based on the
movement of the issue within a single bar and can be +1, -1 or zero. \n \n
The Accumulation/Distribution Line is interpreted by looking for a divergence in
the direction of the indicator relative to price. If the Accumulation/Distribution
Line is trending upward it indicates that the price may follow. Also, if the
Accumulation/Distribution Line becomes flat while the price is still rising (or falling)
then it signals an impending flattening of the price.
""",
)
parser.add_argument(
"--open",
action="store_true",
default=False,
dest="b_use_open",
help="uses open value of stock",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volume_view.display_ad(
symbol=self.ticker,
data=self.data,
use_open=ns_parser.b_use_open,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_adosc(self, other_args: List[str]):
"""Process adosc command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="adosc",
description="""
Accumulation/Distribution Oscillator, also known as the Chaikin Oscillator
is essentially a momentum indicator, but of the Accumulation-Distribution line
rather than merely price. It looks at both the strength of price moves and the
underlying buying and selling pressure during a given time period. The oscillator
reading above zero indicates net buying pressure, while one below zero registers
net selling pressure. Divergence between the indicator and pure price moves are
the most common signals from the indicator, and often flag market turning points.
""",
)
parser.add_argument(
"--open",
action="store_true",
default=False,
dest="b_use_open",
help="uses open value of stock",
)
parser.add_argument(
"--fast",
action="store",
dest="n_length_fast",
type=check_positive,
default=3,
help="fast length",
choices=range(1, 100),
metavar="N_LENGTH_FAST",
)
parser.add_argument(
"--slow",
action="store",
dest="n_length_slow",
type=check_positive,
default=10,
help="slow length",
choices=range(1, 100),
metavar="N_LENGTH_SLOW",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volume_view.display_adosc(
symbol=self.ticker,
data=self.data,
use_open=ns_parser.b_use_open,
fast=ns_parser.n_length_fast,
slow=ns_parser.n_length_slow,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_obv(self, other_args: List[str]):
"""Process obv command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="obv",
description="""
The On Balance Volume (OBV) is a cumulative total of the up and
down volume. When the close is higher than the previous close, the volume is added
to the running total, and when the close is lower than the previous close, the volume
is subtracted from the running total. \n \n To interpret the OBV, look for the OBV
to move with the price or precede price moves. If the price moves before the OBV,
then it is a non-confirmed move. A series of rising peaks, or falling troughs, in the
OBV indicates a strong trend. If the OBV is flat, then the market is not trending.
""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volume_view.display_obv(
symbol=self.ticker,
data=self.data,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_fib(self, other_args: List[str]):
"""Process fib command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="fib",
description="Calculates the fibonacci retracement levels",
)
parser.add_argument(
"-p",
"--period",
dest="period",
type=check_positive,
help="Days to look back for retracement",
default=120,
choices=range(1, 960),
metavar="PERIOD",
)
parser.add_argument(
"--start",
dest="start",
type=valid_date,
help="Starting date to select",
required="--end" in other_args,
)
parser.add_argument(
"--end",
dest="end",
type=valid_date,
help="Ending date to select",
required="--start" in other_args,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-p")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
custom_indicators_view.fibonacci_retracement(
symbol=self.ticker,
data=self.data,
limit=ns_parser.period,
start_date=ns_parser.start,
end_date=ns_parser.end,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_clenow(self, other_args: List[str]):
"""Process clenow command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="clenow",
description="Calculates the Clenow Volatility Adjusted Momentum.",
)
parser.add_argument(
"-p",
"--period",
dest="period",
help="Lookback period for regression",
default=90,
type=check_positive,
)
if self.interval != "1440min":
console.print(
"[red]This regression should be performed with daily data and at least 90 days.[/red]"
)
return
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_FIGURES_ALLOWED
)
if ns_parser:
momentum_view.display_clenow_momentum(
self.data["Adj Close"],
self.ticker.upper(),
ns_parser.period,
ns_parser.export,
)
@log_start_end(log=logger)
def call_demark(self, other_args: List[str]):
"""Process demark command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="demark",
description="Calculates the Demark sequential indicator.",
)
parser.add_argument(
"-m",
"--min",
help="Minimum value of indicator to show (declutters plot).",
dest="min_to_show",
type=check_positive,
default=5,
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_FIGURES_ALLOWED
)
if ns_parser:
momentum_view.display_demark(
self.data,
self.ticker.upper(),
min_to_show=ns_parser.min_to_show,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_atr(self, other_args: List[str]):
"""Process atr command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="atr",
description="""
Averge True Range is used to measure volatility, especially volatility caused by
gaps or limit moves.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=14,
help="Window length",
)
parser.add_argument(
"-m",
"--mamode",
action="store",
dest="s_mamode",
default="ema",
choices=volatility_model.MAMODES,
help="mamode",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=int,
default=0,
help="offset",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volatility_view.display_atr(
data=self.data,
symbol=self.ticker,
window=ns_parser.n_length,
mamode=ns_parser.s_mamode,
offset=ns_parser.n_offset,
export=ns_parser.export,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/etf/technical_analysis/ta_controller.py | 0.682362 | 0.164483 | ta_controller.py | pypi |
__docformat__ = "numpy"
import os
from typing import List
from urllib.parse import quote
import certifi
import feedparser
import pandas as pd
from openbb_terminal.rich_config import console
def get_news(
term: str = "", sources: str = "", sort: str = "published"
) -> pd.DataFrame:
"""Get news for a given term and source. [Source: Feedparser]
Parameters
----------
term : str
term to search on the news articles
sources: str
sources to exclusively show news from (separated by commas)
sort: str
the column to sort by
Returns
-------
articles: pd.DataFrame
term to search on the news articles
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.news()
"""
# Necessary for installer so that it can locate the correct certificates for
# API calls and https
# https://stackoverflow.com/questions/27835619/urllib-and-ssl-certificate-verify-failed-error/73270162#73270162
os.environ["REQUESTS_CA_BUNDLE"] = certifi.where()
os.environ["SSL_CERT_FILE"] = certifi.where()
have_data = False
console.print("[yellow]Fetching data. Please be patient\n[/yellow]")
limit = 0
while not have_data:
if term:
term = quote(term)
data = feedparser.parse(
f"https://news.google.com/rss/search?q={term}&hl=en-US&gl=US&ceid=US:en&when:24h+allinurl"
f':{sources.replace(" ", "%20")}'
)
else:
data = feedparser.parse(
f'https://news.google.com/rss/search?q=when:24h+allinurl:{sources.replace(" ", "%20")}'
"&hl=en-US&gl=US&ceid=US:en"
)
if (
hasattr(data, "status") and data.status == 200
): # Checking if data has status attribute and if data request succeeded
if data.entries:
have_data = True
elif limit == 60: # Breaking if 60 successful requests return no data
console.print("[red]Timeout occurred. Please try again\n[/red]")
break
limit = limit + 1
elif hasattr(data, "status") and data.status != 200: # If data request failed
console.print("[red]Status code not 200. Unable to retrieve data\n[/red]")
break
else:
console.print("[red]Could not retrieve data\n[/red]")
break
# Filter based on data sources
if sources:
newdata: List = []
for entry in list(data.entries):
# check if sources specified
if "," in sources:
if entry["source"]["title"].lower().find(sources.lower()) != -1:
newdata.append(entry)
else:
for s in sources.split(","):
if entry["source"]["title"].lower().find(s.lower()) != -1:
newdata.append(entry)
if newdata:
df = pd.DataFrame(newdata, columns=["title", "link", "published"])
else:
return pd.DataFrame()
else:
df = pd.DataFrame(data.entries, columns=["title", "link", "published"])
df["published"] = pd.to_datetime(df["published"])
df = df.sort_values(by=[sort], ascending=False)
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/feedparser_model.py | 0.601125 | 0.30399 | feedparser_model.py | pypi |
__docformat__ = "numpy"
import logging
from pathlib import Path
from typing import Dict, Any, Optional
import pandas as pd
import statsmodels.api as sm
from pandas import errors
from linearmodels.datasets import wage_panel
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
DATA_EXAMPLES: Dict[str, str] = {
"anes96": "American National Election Survey 1996",
"cancer": "Breast Cancer Data",
"ccard": "Bill Greene’s credit scoring data.",
"cancer_china": "Smoking and lung cancer in eight cities in China.",
"co2": "Mauna Loa Weekly Atmospheric CO2 Data",
"committee": "First 100 days of the US House of Representatives 1995",
"copper": "World Copper Market 1951-1975 Dataset",
"cpunish": "US Capital Punishment dataset.",
"danish_data": "Danish Money Demand Data",
"elnino": "El Nino - Sea Surface Temperatures",
"engel": "Engel (1857) food expenditure data",
"fair": "Affairs dataset",
"fertility": "World Bank Fertility Data",
"grunfeld": "Grunfeld (1950) Investment Data",
"heart": "Transplant Survival Data",
"interest_inflation": "(West) German interest and inflation rate 1972-1998",
"longley": "Longley dataset",
"macrodata": "United States Macroeconomic data",
"modechoice": "Travel Mode Choice",
"nile": "Nile River flows at Ashwan 1871-1970",
"randhie": "RAND Health Insurance Experiment Data",
"scotland": "Taxation Powers Vote for the Scottish Parliament 1997",
"spector": "Spector and Mazzeo (1980) - Program Effectiveness Data",
"stackloss": "Stack loss data",
"star98": "Star98 Educational Dataset",
"statecrim": "Statewide Crime Data 2009",
"strikes": "U.S. Strike Duration Data",
"sunspots": "Yearly sunspots data 1700-2008",
"wage_panel": "Veila and M. Verbeek (1998): Whose Wages Do Unions Raise?",
}
file_types = ["xlsx", "csv"]
@log_start_end(log=logger)
def load(
file: str,
data_files: Optional[Dict[Any, Any]] = None,
data_examples: Optional[Dict[Any, Any]] = None,
) -> pd.DataFrame:
"""Load custom file into dataframe.
Parameters
----------
file: str
Path to file
data_files: dict
Contains all available data files within the Export folder
data_examples: dict
Contains all available examples from Statsmodels
Returns
-------
pd.DataFrame
Dataframe with custom data
"""
if data_files is None:
data_files = {}
if data_examples is None:
data_examples = DATA_EXAMPLES
if file in data_examples:
if file == "wage_panel":
return wage_panel.load()
return getattr(sm.datasets, file).load_pandas().data
if file in data_files:
full_file = data_files[file]
else:
full_file = file
if not Path(full_file).exists():
console.print(f"[red]Cannot find the file {full_file}[/red]\n")
return pd.DataFrame()
file_type = Path(full_file).suffix
try:
if file_type == ".xlsx":
data = pd.read_excel(full_file)
elif file_type == ".csv":
data = pd.read_csv(full_file)
else:
console.print(
f"The file type {file_type} is not supported. Use .xlsx or .csv."
)
return pd.DataFrame()
except errors.ParserError:
console.print("[red]The given file is not properly formatted.[/red]\b")
return pd.DataFrame()
except errors.EmptyDataError:
console.print("[red]The given file is empty.[/red]\b")
return pd.DataFrame()
if data is None:
return pd.DataFrame()
if not data.empty:
data.columns = [x.replace("/", "_") for x in data.columns]
return data | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/common_model.py | 0.795301 | 0.52756 | common_model.py | pypi |
__docformat__ = "numpy"
import logging
from datetime import datetime, timedelta
from typing import Any, List, Optional, Tuple
import requests
import pandas as pd
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_NEWS_TOKEN"])
def get_news(
query: str,
limit: int = 10,
start_date: Optional[str] = None,
show_newest: bool = True,
sources: str = "",
) -> List[Tuple[pd.DataFrame, Any]]:
"""Get news for a given term. [Source: NewsAPI]
Parameters
----------
query : str
term to search on the news articles
start_date: Optional[str]
date to start searching articles from formatted YYYY-MM-DD
show_newest: bool
flag to show newest articles first
sources: str
sources to exclusively show news from (comma separated)
Returns
-------
tables : List[Tuple[pd.DataFrame, dict]]
List of tuples containing news df in first index,
dict containing title of news df.
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=7)).strftime("%Y-%m-%d")
link = (
f"https://newsapi.org/v2/everything?q={query}&from={start_date}&sortBy=publishedAt"
"&language=en"
)
if sources:
if "," in sources:
sources = ".com,".join(sources.split(","))
link += f"&domains={sources}.com"
link += f"&apiKey={cfg.API_NEWS_TOKEN}"
response = requests.get(link)
articles = {}
# Check that the API response was successful
if response.status_code == 200:
response_json = response.json()
console.print(
f"{response_json['totalResults']} news articles for",
f" {query} were found since {start_date}\n",
)
if show_newest:
articles = response_json["articles"]
else:
articles = response_json["articles"][::-1]
elif response.status_code == 426:
console.print(f"Error in request: {response.json()['message']}", "\n")
elif response.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
elif response.status_code == 429:
console.print("[red]Exceeded number of calls per minute[/red]\n")
else:
console.print(f"Error in request: {response.json()['message']}", "\n")
tables: List[Tuple[pd.DataFrame, dict]] = []
if articles:
for idx, article in enumerate(articles):
# Unnecessary to use source name because contained in link article["source"]["name"]
if "description" in article:
data = [
[article["publishedAt"].replace("T", " ").replace("Z", "")],
[f"{article['description']}"],
[article["url"]],
]
table = pd.DataFrame(
data, index=["published", "content", "link"], columns=["Content"]
)
else:
data = [
[article["publishedAt"].replace("T", " ").replace("Z", "")],
[article["url"]],
]
table = pd.DataFrame(
data, index=["published", "link"], columns=["Content"]
)
table.columns = table.columns.str.title()
tables.append((table, article))
if idx >= limit - 1:
break
return tables | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/newsapi_model.py | 0.799873 | 0.18072 | newsapi_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
from datetime import datetime, timedelta
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
import pandas as pd
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.rich_config import console
from openbb_terminal.common.behavioural_analysis import sentimentinvestor_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.decorators import check_api_key
from openbb_terminal.helper_funcs import (
export_data,
print_rich_table,
plot_autoscale,
is_valid_axes_count,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_SENTIMENTINVESTOR_TOKEN"])
def display_historical(
symbol: str,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
number: int = 100,
raw: bool = False,
limit: int = 10,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display historical sentiment data of a ticker,
and plot a chart with RHI and AHI.
Parameters
----------
symbol: str
Ticker symbol to view sentiment data
start_date: Optional[str]
Initial date like string or unix timestamp (e.g. 2021-12-21)
end_date: Optional[str]
End date like string or unix timestamp (e.g. 2022-01-15)
number: int
Number of results returned by API call
Maximum 250 per api call
raw: boolean
Whether to display raw data, by default False
limit: int
Number of results display on the terminal
Default: 10
export: str
Format to export data
external_axes: Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
if start_date is None:
start_date = (datetime.utcnow() - timedelta(days=7)).strftime("%Y-%m-%d")
if end_date is None:
end_date = datetime.utcnow().strftime("%Y-%m-%d")
supported_ticker = sentimentinvestor_model.check_supported_ticker(symbol)
# Check to see if the ticker is supported
if not supported_ticker:
logger.error("Ticker not supported")
console.print(
f"[red]Ticker {symbol} not supported. Please try another one![/red]\n"
)
return
df = sentimentinvestor_model.get_historical(symbol, start_date, end_date, number)
if df.empty:
return
# This plot has 2 axes
if external_axes is None:
_, ax1 = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax2 = ax1.twinx()
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
ax1.plot(df.index, df["RHI"], color=theme.get_colors()[0])
ax2.plot(df.index, df["AHI"], color=theme.get_colors(reverse=True)[0])
ax1.set_ylabel("RHI")
ax1.set_title("Hourly-level data of RHI and AHI")
ax1.set_xlim(df.index[0], df.index[-1])
theme.style_primary_axis(ax1)
ax1.yaxis.set_label_position("left")
ax2.set_ylabel("AHI")
theme.style_primary_axis(ax2)
ax2.yaxis.set_label_position("right")
ax2.grid(visible=False)
# Manually construct the chart legend
colors = [theme.get_colors()[0], theme.get_colors(reverse=True)[0]]
lines = [Line2D([0], [0], color=c) for c in colors]
labels = ["RHI", "AHI"]
ax2.legend(lines, labels)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"hist",
df,
)
RAW_COLS = ["twitter", "stocktwits", "yahoo", "likes", "RHI", "AHI"]
if raw:
df.index = df.index.strftime("%Y-%m-%d %H:%M")
df.index.name = "Time"
print_rich_table(
df[RAW_COLS].head(limit),
headers=[
"Twitter",
"Stocktwits",
"Yahoo",
"Likes",
"RHI",
"AHI",
],
show_index=True,
index_name="Time",
title="Historical Sentiment Data",
)
@log_start_end(log=logger)
@check_api_key(["API_SENTIMENTINVESTOR_TOKEN"])
def display_trending(
start_date: Optional[str] = None,
hour: int = 0,
number: int = 10,
limit: int = 10,
export: str = "",
):
"""Display most talked about tickers within
the last hour together with their sentiment data.
Parameters
----------
start_date : Optional[str]
Initial date, format YYYY-MM-DD
hour: int
Hour of the day in 24-hour notation (e.g. 14)
number : int
Number of results returned by API call
Maximum 250 per api call
limit: int
Number of results display on the terminal
Default: 10
export: str
Format to export data
"""
if start_date is None:
start_date = datetime.today().strftime("%Y-%m-%d")
df = sentimentinvestor_model.get_trending(start_date, hour, number)
if df.empty:
return
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"trend",
df,
)
RAW_COLS = [
"total",
"twitter",
"stocktwits",
"yahoo",
"likes",
"RHI",
"AHI",
]
RAW_COLS = [col for col in RAW_COLS if col in df.columns.tolist()]
df.ticker = df.ticker.str.upper()
df = df.set_index("ticker")
df.timestamp_date = pd.to_datetime(df.timestamp_date)
timestamp = df.timestamp_date[0].strftime("%Y-%m-%d %H:%M")
print_rich_table(
df[RAW_COLS].head(limit),
headers=[col.upper() for col in RAW_COLS],
show_index=True,
index_name="TICKER",
title=f"Most trending stocks at {timestamp}",
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/behavioural_analysis/sentimentinvestor_view.py | 0.885452 | 0.329554 | sentimentinvestor_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Any, Dict, List, Tuple
import pandas as pd
import requests
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_bullbear(symbol: str) -> Tuple[int, int, int, int]:
"""Gets bullbear sentiment for ticker [Source: stocktwits].
Parameters
----------
symbol : str
Ticker symbol to look at
Returns
-------
Tuple[int, int, int, int]
Watchlist count,
Number of cases found for ticker,
Number of bullish statements,
Number of bearish statements,
"""
result = requests.get(
f"https://api.stocktwits.com/api/2/streams/symbol/{symbol}.json"
)
if result.status_code == 200:
result_json = result.json()
watchlist_count = result_json["symbol"]["watchlist_count"]
n_cases = 0
n_bull = 0
n_bear = 0
for message in result_json["messages"]:
if message["entities"]["sentiment"]:
n_cases += 1
n_bull += message["entities"]["sentiment"]["basic"] == "Bullish"
n_bear += message["entities"]["sentiment"]["basic"] == "Bearish"
return watchlist_count, n_cases, n_bull, n_bear
return 0, 0, 0, 0
@log_start_end(log=logger)
def get_messages(symbol: str, limit: int = 30) -> pd.DataFrame:
"""Get last messages for a given ticker [Source: stocktwits].
Parameters
----------
symbol : str
Stock ticker symbol
limit : int
Number of messages to get
Returns
-------
pd.DataFrame
Dataframe of messages
"""
result = requests.get(
f"https://api.stocktwits.com/api/2/streams/symbol/{symbol}.json"
)
if result.status_code == 200:
return pd.DataFrame(
[message["body"] for message in result.json()["messages"][:limit]]
)
return pd.DataFrame()
@log_start_end(log=logger)
def get_trending() -> pd.DataFrame:
"""Get trending tickers from stocktwits [Source: stocktwits].
Returns
-------
pd.DataFrame
Dataframe of trending tickers and watchlist count
"""
result = requests.get("https://api.stocktwits.com/api/2/trending/symbols.json")
if result.status_code == 200:
l_symbols = [
[symbol["symbol"], symbol["watchlist_count"], symbol["title"]]
for symbol in result.json()["symbols"]
]
df_trending = pd.DataFrame(
l_symbols, columns=["Ticker", "Watchlist Count", "Name"]
)
return df_trending
return pd.DataFrame()
@log_start_end(log=logger)
def get_stalker(user: str, limit: int = 30) -> List[Dict[str, Any]]:
"""Gets messages from given user [Source: stocktwits].
Parameters
----------
user : str
User to get posts for
limit : int, optional
Number of posts to get, by default 30
Returns
-------
List[Dict[str, Any]]
List of posts
"""
result = requests.get(f"https://api.stocktwits.com/api/2/streams/user/{user}.json")
if result.status_code == 200:
return list(result.json()["messages"][:limit])
return [] | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/behavioural_analysis/stocktwits_model.py | 0.867162 | 0.204144 | stocktwits_model.py | pypi |
__docformat__ = "numpy"
from datetime import datetime, timedelta
import logging
from typing import Optional
import pandas as pd
import requests
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.helper_funcs import clean_tweet, get_data
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
analyzer = SentimentIntensityAnalyzer()
@log_start_end(log=logger)
@check_api_key(["API_TWITTER_BEARER_TOKEN"])
def load_analyze_tweets(
symbol: str,
limit: int = 100,
start_date: Optional[str] = "",
end_date: Optional[str] = "",
) -> pd.DataFrame:
"""Load tweets from twitter API and analyzes using VADER.
Parameters
----------
symbol: str
Ticker symbol to search twitter for
limit: int
Number of tweets to analyze
start_date: Optional[str]
If given, the start time to get tweets from
end_date: Optional[str]
If given, the end time to get tweets from
Returns
-------
df_tweet: pd.DataFrame
Dataframe of tweets and sentiment
"""
params = {
"query": rf"(\${symbol}) (lang:en)",
"max_results": str(limit),
"tweet.fields": "created_at,lang",
}
if start_date:
# Assign from and to datetime parameters for the API
params["start_time"] = start_date
if end_date:
params["end_time"] = end_date
# Request Twitter API
response = requests.get(
"https://api.twitter.com/2/tweets/search/recent",
params=params, # type: ignore
headers={"authorization": "Bearer " + cfg.API_TWITTER_BEARER_TOKEN},
)
# Create dataframe
df_tweets = pd.DataFrame()
# Check that the API response was successful
if response.status_code == 200:
tweets = []
for tweet in response.json()["data"]:
row = get_data(tweet)
tweets.append(row)
df_tweets = pd.DataFrame(tweets)
elif response.status_code == 401:
console.print("Twitter API Key provided is incorrect\n")
return pd.DataFrame()
elif response.status_code == 400:
console.print(
"""
Status Code 400.
This means you are requesting data from beyond the API's 7 day limit"""
)
return pd.DataFrame()
elif response.status_code == 403:
console.print(
f"""
Status code 403.
It seems you're twitter credentials are invalid - {response.text}
"""
)
return pd.DataFrame()
else:
console.print(
f"""
Status code {response.status_code}.
Something went wrong - {response.text}
"""
)
return pd.DataFrame()
sentiments = []
pos = []
neg = []
neu = []
for s_tweet in df_tweets["text"].to_list():
tweet = clean_tweet(s_tweet, symbol)
sentiments.append(analyzer.polarity_scores(tweet)["compound"])
pos.append(analyzer.polarity_scores(tweet)["pos"])
neg.append(analyzer.polarity_scores(tweet)["neg"])
neu.append(analyzer.polarity_scores(tweet)["neu"])
# Add sentiments to tweets dataframe
df_tweets["sentiment"] = sentiments
df_tweets["positive"] = pos
df_tweets["negative"] = neg
df_tweets["neutral"] = neu
return df_tweets
@log_start_end(log=logger)
def get_sentiment(
symbol: str,
n_tweets: int = 15,
n_days_past: int = 2,
) -> pd.DataFrame:
"""Get sentiments from symbol.
Parameters
----------
symbol: str
Stock ticker symbol to get sentiment for
n_tweets: int
Number of tweets to get per hour
n_days_past: int
Number of days to extract tweets for
Returns
-------
df_sentiment: pd.DataFrame
Dataframe of sentiment
"""
# Date format string required by twitter
dt_format = "%Y-%m-%dT%H:%M:%SZ"
# Algorithm to extract
dt_recent = datetime.utcnow() - timedelta(seconds=20)
dt_old = dt_recent - timedelta(days=n_days_past)
console.print(
f"From {dt_recent.date()} retrieving {n_tweets*24} tweets ({n_tweets} tweets/hour)"
)
df_tweets = pd.DataFrame(
columns=[
"created_at",
"text",
"sentiment",
"positive",
"negative",
"neutral",
]
)
while True:
# Iterate until we haven't passed the old number of days
if dt_recent < dt_old:
break
# Update past datetime
dt_past = dt_recent - timedelta(minutes=60)
temp = load_analyze_tweets(
symbol,
n_tweets,
start_date=dt_past.strftime(dt_format),
end_date=dt_recent.strftime(dt_format),
)
if (isinstance(temp, pd.DataFrame) and temp.empty) or (
not isinstance(temp, pd.DataFrame) and not temp
):
return pd.DataFrame()
df_tweets = pd.concat([df_tweets, temp])
if dt_past.day < dt_recent.day:
console.print(
f"From {dt_past.date()} retrieving {n_tweets*24} tweets ({n_tweets} tweets/hour)"
)
# Update recent datetime
dt_recent = dt_past
# Sort tweets per date
df_tweets.sort_index(ascending=False, inplace=True)
df_tweets["cumulative_compound"] = df_tweets["sentiment"].cumsum()
df_tweets["prob_sen"] = 1
# df_tweets.to_csv(r'notebooks/tweets.csv', index=False)
df_tweets.reset_index(inplace=True)
df_tweets["Month"] = pd.to_datetime(df_tweets["created_at"]).apply(
lambda x: x.month
)
df_tweets["Day"] = pd.to_datetime(df_tweets["created_at"]).apply(lambda x: x.day)
df_tweets["date"] = pd.to_datetime(df_tweets["created_at"])
df_tweets = df_tweets.sort_values(by="date")
df_tweets["cumulative_compound"] = df_tweets["sentiment"].cumsum()
return df_tweets | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/behavioural_analysis/twitter_model.py | 0.840701 | 0.266906 | twitter_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.common.behavioural_analysis import google_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_mentions(
symbol: str,
start_date: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots weekly bars of stock's interest over time. other users watchlist. [Source: Google].
Parameters
----------
symbol : str
Ticker symbol
start_date : str
Start date as YYYY-MM-DD string
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_interest = google_model.get_mentions(symbol)
if df_interest.empty:
return
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.set_title(f"Interest over time on {symbol}")
if start_date:
df_interest = df_interest[start_date:] # type: ignore
ax.bar(df_interest.index, df_interest[symbol], width=2)
ax.bar(
df_interest.index[-1],
df_interest[symbol].values[-1],
width=theme.volume_bar_width,
)
else:
ax.bar(df_interest.index, df_interest[symbol], width=1)
ax.bar(
df_interest.index[-1],
df_interest[symbol].values[-1],
width=theme.volume_bar_width,
)
ax.set_ylabel("Interest [%]")
ax.set_xlim(df_interest.index[0], df_interest.index[-1])
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "mentions", df_interest
)
@log_start_end(log=logger)
def display_correlation_interest(
symbol: str,
data: pd.DataFrame,
words: List[str],
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots interest over time of words/sentences versus stock price. [Source: Google].
Parameters
----------
symbol : str
Ticker symbol to check price
data : pd.DataFrame
Data dataframe
words : List[str]
Words to check for interest for
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(
figsize=plot_autoscale(),
dpi=PLOT_DPI,
nrows=2,
ncols=1,
sharex=True,
gridspec_kw={"height_ratios": [1, 2]},
)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax[0].set_title(
f"{symbol.upper()} stock price and interest over time on {','.join(words)}"
)
ax[0].plot(
data.index,
data["Adj Close"].values,
c="#FCED00",
)
ax[0].set_ylabel("Stock Price")
ax[0].set_xlim(data.index[0], data.index[-1])
colors = theme.get_colors()[1:]
for idx, word in enumerate(words):
df_interest = google_model.get_mentions(word)
ax[1].plot(df_interest.index, df_interest[word], "-", color=colors[idx])
ax[1].set_ylabel("Interest [%]")
ax[1].set_xlim(data.index[0], data.index[-1])
ax[1].legend(words)
theme.style_primary_axis(ax[0])
theme.style_primary_axis(ax[1])
if external_axes is None:
theme.visualize_output()
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "interest", df_interest
)
@log_start_end(log=logger)
def display_regions(
symbol: str,
limit: int = 5,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots bars of regions based on stock's interest. [Source: Google].
Parameters
----------
symbol : str
Ticker symbol
limit: int
Number of regions to show
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_interest_region = google_model.get_regions(symbol)
if df_interest_region.empty:
return
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
df_interest_region = df_interest_region.head(limit)
df = df_interest_region.sort_values([symbol], ascending=True)
ax.set_title(f"Regions with highest interest in {symbol}")
ax.barh(
y=df.index, width=df[symbol], color=theme.get_colors(reverse=True), zorder=3
)
ax.set_xlabel("Interest [%]")
ax.set_ylabel("Region")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "regions", df)
@log_start_end(log=logger)
def display_queries(symbol: str, limit: int = 5, export: str = ""):
"""Prints table showing top related queries with this stock's query. [Source: Google].
Parameters
----------
symbol : str
Ticker symbol
limit: int
Number of regions to show
export: str
Format to export data
{"csv","json","xlsx","png","jpg","pdf","svg"}
"""
# Retrieve a dict with top and rising queries
df = google_model.get_queries(symbol, limit)
if df.empty:
return
print_rich_table(
df,
headers=list(df.columns),
title=f"Top {symbol}'s related queries",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"queries",
df,
)
@log_start_end(log=logger)
def display_rise(symbol: str, limit: int = 10, export: str = ""):
"""Prints top rising related queries with this stock's query. [Source: Google].
Parameters
----------
symbol : str
Ticker symbol
limit: int
Number of queries to show
export: str
Format to export data
"""
df_related_queries = google_model.get_rise(symbol, limit)
if df_related_queries.empty:
return
print_rich_table(
df_related_queries,
headers=list(df_related_queries.columns),
title=f"Top rising {symbol}'s related queries",
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "rise", df_related_queries
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/behavioural_analysis/google_view.py | 0.843799 | 0.417628 | google_view.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
from pytrends.request import TrendReq
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_mentions(symbol: str) -> pd.DataFrame:
"""Get interest over time from google api [Source: google].
Parameters
----------
symbol: str
Stock ticker symbol
Returns
-------
pd.DataFrame
Dataframe of interest over time
"""
try:
pytrend = TrendReq()
pytrend.build_payload(kw_list=[symbol])
return pytrend.interest_over_time()
except Exception as e:
if pytrend.google_rl:
console.print(f"[red]Too many requests: {pytrend.google_rl}[/red]\n")
else:
console.print(f"[red]{str(e)}[/red]\n")
return pd.DataFrame()
@log_start_end(log=logger)
def get_regions(symbol: str) -> pd.DataFrame:
"""Get interest by region from google api [Source: google].
Parameters
----------
symbol: str
Ticker symbol to look at
Returns
-------
pd.DataFrame
Dataframe of interest by region
"""
try:
pytrend = TrendReq()
pytrend.build_payload(kw_list=[symbol])
return pytrend.interest_by_region().sort_values([symbol], ascending=False)
except Exception as e:
if pytrend.google_rl:
console.print(f"[red]Too many requests: {pytrend.google_rl}[/red]\n")
else:
console.print(f"[red]{str(e)}[/red]\n")
return pd.DataFrame()
@log_start_end(log=logger)
def get_queries(symbol: str, limit: int = 10) -> pd.DataFrame:
"""Get related queries from google api [Source: google].
Parameters
----------
symbol: str
Stock ticker symbol to compare
limit: int
Number of queries to show
Returns
-------
pd.DataFrame
Dataframe of related queries
"""
try:
pytrend = TrendReq()
pytrend.build_payload(kw_list=[symbol])
df = pytrend.related_queries()
df = df[symbol]["top"].head(limit)
df["value"] = df["value"].apply(lambda x: str(x) + "%")
return df
except Exception as e:
if pytrend.google_rl:
console.print(f"[red]Too many requests: {pytrend.google_rl}[/red]\n")
else:
console.print(f"[red]{str(e)}[/red]\n")
return pd.DataFrame()
@log_start_end(log=logger)
def get_rise(symbol: str, limit: int = 10) -> pd.DataFrame:
"""Get top rising related queries with this stock's query [Source: google].
Parameters
----------
symbol: str
Stock ticker symbol
limit: int
Number of queries to show
Returns
-------
pd.DataFrame
Dataframe containing rising related queries
"""
try:
pytrend = TrendReq()
pytrend.build_payload(kw_list=[symbol])
df = pytrend.related_queries()
df = df[symbol]["rising"].head(limit)
return df
except Exception as e:
if pytrend.google_rl:
console.print(f"[red]Too many requests: {pytrend.google_rl}[/red]\n")
else:
console.print(f"[red]{str(e)}[/red]\n")
return pd.DataFrame() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/behavioural_analysis/google_model.py | 0.583203 | 0.41739 | google_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from openbb_terminal.config_terminal import theme
from openbb_terminal.common.behavioural_analysis import finbrain_model
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal import rich_config
logger = logging.getLogger(__name__)
def lambda_sentiment_coloring(val: float, last_val: float) -> str:
if float(val) > last_val:
return f"[green]{val}[/green]"
return f"[red]{val}[/red]"
@log_start_end(log=logger)
def display_sentiment_analysis(
symbol: str,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots Sentiment analysis from FinBrain. Prints table if raw is True. [Source: FinBrain]
Parameters
----------
symbol: str
Ticker symbol to get the sentiment analysis from
raw: False
Display raw table data
export: str
Format to export data
external_axes: Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
sentiment = finbrain_model.get_sentiment(symbol)
if sentiment.empty:
console.print("No sentiment data found.\n")
return
if not raw:
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
for index, row in sentiment.iterrows():
if float(row["Sentiment Analysis"]) >= 0:
ax.scatter(
index, float(row["Sentiment Analysis"]), s=100, color=theme.up_color
)
else:
ax.scatter(
index,
float(row["Sentiment Analysis"]),
s=100,
color=theme.down_color,
)
ax.axhline(y=0, linestyle="--")
ax.set_xlabel("Time")
ax.set_ylabel("Sentiment")
start_date = sentiment.index[-1].strftime("%Y/%m/%d")
ax.set_title(
f"FinBrain's Sentiment Analysis for {symbol.upper()} since {start_date}"
)
ax.set_ylim([-1.1, 1.1])
senValues = np.array(pd.to_numeric(sentiment["Sentiment Analysis"].values))
senNone = np.array(0 * len(sentiment))
ax.fill_between(
sentiment.index,
pd.to_numeric(sentiment["Sentiment Analysis"].values),
0,
where=(senValues < senNone),
alpha=0.30,
color=theme.down_color,
interpolate=True,
)
ax.fill_between(
sentiment.index,
pd.to_numeric(sentiment["Sentiment Analysis"].values),
0,
where=(senValues >= senNone),
alpha=0.30,
color=theme.up_color,
interpolate=True,
)
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
else:
if rich_config.USE_COLOR:
color_df = sentiment["Sentiment Analysis"].apply(
lambda_sentiment_coloring, last_val=0
)
color_df = pd.DataFrame(
data=color_df.values,
index=pd.to_datetime(sentiment.index).strftime("%Y-%m-%d"),
)
print_rich_table(
color_df,
headers=["Sentiment"],
title="FinBrain Ticker Sentiment",
show_index=True,
)
else:
print_rich_table(
pd.DataFrame(
data=sentiment.values,
index=pd.to_datetime(sentiment.index).strftime("%Y-%m-%d"),
),
headers=["Sentiment"],
title="FinBrain Ticker Sentiment",
show_index=True,
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "headlines", sentiment
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/behavioural_analysis/finbrain_view.py | 0.830594 | 0.353819 | finbrain_view.py | pypi |
__docformat__ = "numpy"
import logging
from openbb_terminal.common.behavioural_analysis import stocktwits_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
print_rich_table,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_bullbear(symbol: str):
"""
Print bullbear sentiment based on last 30 messages on the board.
Also prints the watchlist_count. [Source: Stocktwits]
Parameters
----------
symbol: str
Stock ticker symbol
"""
watchlist_count, n_cases, n_bull, n_bear = stocktwits_model.get_bullbear(symbol)
console.print(f"[yellow]Watchlist count[/yellow]: {watchlist_count}")
if n_cases > 0:
console.print(f"\nLast {n_cases} sentiment messages:")
console.print(f"[green]Bullish:[/green] {round(100*n_bull/n_cases, 2)}%")
console.print(f"[red]Bearish:[/red] {round(100*n_bear/n_cases, 2)}%")
else:
console.print("No messages found")
@log_start_end(log=logger)
def display_messages(symbol: str, limit: int = 30):
"""Prints up to 30 of the last messages on the board. [Source: Stocktwits].
Parameters
----------
symbol: str
Stock ticker symbol
limit: int
Number of messages to get
"""
messages = stocktwits_model.get_messages(symbol, limit)
if not messages.empty:
print_rich_table(
messages,
headers=["MESSAGES"],
show_index=False,
title="Last Messages on Board",
)
else:
console.print("No messages found in Stocktwits stream")
@log_start_end(log=logger)
def display_trending():
"""Show trensing stocks on stocktwits."""
df_trending = stocktwits_model.get_trending()
print_rich_table(
df_trending,
headers=list(df_trending.columns),
show_index=False,
title="Trending Stocks",
)
@log_start_end(log=logger)
def display_stalker(user: str, limit: int = 10):
"""Show last posts for given user.
Parameters
----------
user : str
Stocktwits username
limit : int, optional
Number of messages to show, by default 10
"""
messages = stocktwits_model.get_stalker(user, limit)
for message in messages:
console.print("-------------------")
console.print(
"[yellow]"
+ message["created_at"].replace("T", " ").replace("Z", "")
+ "[/yellow]"
)
console.print(message["body"] + "\n") | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/behavioural_analysis/stocktwits_view.py | 0.585575 | 0.205675 | stocktwits_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
import warnings
from datetime import datetime
from typing import Dict, List, Optional
import finviz
import matplotlib.pyplot as plt
import pandas as pd
import praw
import seaborn as sns
from openbb_terminal.common.behavioural_analysis import reddit_model
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import check_api_key
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
# pylint: disable=R0913
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(
[
"API_REDDIT_CLIENT_ID",
"API_REDDIT_CLIENT_SECRET",
"API_REDDIT_USERNAME",
"API_REDDIT_USER_AGENT",
"API_REDDIT_PASSWORD",
]
)
def print_and_record_reddit_post(
submissions_dict: Dict, submission: praw.models.reddit.submission.Submission
):
"""Prints reddit submission.
Parameters
----------
submissions_dict : Dict
Dictionary for storing reddit post information
submission : praw.models.reddit.submission.Submission
Submission to show
"""
# Refactor data
s_datetime = datetime.utcfromtimestamp(submission.created_utc).strftime(
"%Y-%m-%d %H:%M:%S"
)
s_link = f"https://old.reddit.com{submission.permalink}"
s_all_awards = "".join(
f"{award['count']} {award['name']}\n" for award in submission.all_awardings
)
s_all_awards = s_all_awards[:-2]
# Create dictionary with data to construct dataframe allows to save data
submissions_dict[submission.id] = {
"created_utc": s_datetime,
"subreddit": submission.subreddit,
"link_flair_text": submission.link_flair_text,
"title": submission.title,
"score": submission.score,
"link": s_link,
"num_comments": submission.num_comments,
"upvote_ratio": submission.upvote_ratio,
"awards": s_all_awards,
}
# Print post data collected so far
console.print(f"[yellow]{s_datetime}[/yellow] - {submission.title}")
console.print(f"[blue]{s_link}[/blue]\n")
columns = ["Subreddit", "Flair", "Score", "# Comments", "Upvote %", "Awards"]
data = [
submission.subreddit,
submission.link_flair_text,
submission.score,
submission.num_comments,
f"{round(100 * submission.upvote_ratio)}%",
s_all_awards,
]
df = pd.DataFrame([data], columns=columns)
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Reddit Submission"
)
@log_start_end(log=logger)
@check_api_key(
[
"API_REDDIT_CLIENT_ID",
"API_REDDIT_CLIENT_SECRET",
"API_REDDIT_USERNAME",
"API_REDDIT_USER_AGENT",
"API_REDDIT_PASSWORD",
]
)
def print_reddit_post(sub: tuple):
"""Prints reddit submission.
Parameters
----------
sub : tuple
Row from submissions dataframe
"""
sub_list = list(sub[1])
date = sub_list[0]
title = sub_list[3]
link = sub_list[-1]
console.print(f"[yellow]{date}[/yellow] - {title}")
console.print(f"[blue]{link}[/blue]\n")
columns = [
"Subreddit",
"Flair",
"Score",
"# Comments",
"Upvote %",
"Awards",
]
print_rich_table(
pd.DataFrame(sub[1][columns]).T,
headers=columns,
show_index=False,
title="Reddit Submission",
)
@log_start_end(log=logger)
@check_api_key(
[
"API_REDDIT_CLIENT_ID",
"API_REDDIT_CLIENT_SECRET",
"API_REDDIT_USERNAME",
"API_REDDIT_USER_AGENT",
"API_REDDIT_PASSWORD",
]
)
def display_watchlist(limit: int = 5):
"""Prints other users watchlist. [Source: Reddit].
Parameters
----------
limit: int
Maximum number of submissions to look at
"""
subs, d_watchlist_tickers, n_flair_posts_found = reddit_model.get_watchlists(limit)
if subs:
for sub in subs:
print_and_record_reddit_post({}, sub)
console.print("")
if n_flair_posts_found > 0:
lt_watchlist_sorted = sorted(
d_watchlist_tickers.items(), key=lambda item: item[1], reverse=True
)
s_watchlist_tickers = ""
n_tickers = 0
for t_ticker in lt_watchlist_sorted:
try:
# If try doesn't trigger exception, it means that this stock exists on finviz
# thus we can print it.
finviz.get_stock(t_ticker[0])
if int(t_ticker[1]) > 1:
s_watchlist_tickers += f"{t_ticker[1]} {t_ticker[0]}, "
n_tickers += 1
except Exception:
# console.print(e, "\n")
pass
if n_tickers:
console.print(
"The following stock tickers have been mentioned more than once across the previous watchlists:"
)
console.print(s_watchlist_tickers[:-2] + "\n")
@log_start_end(log=logger)
@check_api_key(
[
"API_REDDIT_CLIENT_ID",
"API_REDDIT_CLIENT_SECRET",
"API_REDDIT_USERNAME",
"API_REDDIT_USER_AGENT",
"API_REDDIT_PASSWORD",
]
)
def display_popular_tickers(
limit: int = 10, post_limit: int = 50, subreddits: str = "", export: str = ""
):
"""Prints table showing latest popular tickers. [Source: Reddit].
Parameters
----------
limit : int
Number of top tickers to get
post_limit : int
How many posts to analyze in each subreddit
subreddits : str, optional
String of comma separated subreddits.
export : str
Format to export dataframe
"""
popular_tickers_df = reddit_model.get_popular_tickers(limit, post_limit, subreddits)
if not popular_tickers_df.empty:
print_rich_table(
popular_tickers_df,
headers=list(popular_tickers_df.columns),
show_index=False,
title=f"The following TOP {limit} tickers have been mentioned",
)
else:
console.print("No tickers found")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"popular",
popular_tickers_df,
)
@log_start_end(log=logger)
@check_api_key(
[
"API_REDDIT_CLIENT_ID",
"API_REDDIT_CLIENT_SECRET",
"API_REDDIT_USERNAME",
"API_REDDIT_USER_AGENT",
"API_REDDIT_PASSWORD",
]
)
def display_spac_community(limit: int = 10, popular: bool = False):
"""Prints tickers mentioned in r/SPACs [Source: Reddit].
Parameters
----------
limit: int
Number of posts to look through
popular: bool
Search by popular instead of new
"""
subs, d_watchlist_tickers = reddit_model.get_spac_community(limit, popular)
if not subs.empty:
for sub in subs.iterrows():
print_reddit_post(sub)
console.print("")
if d_watchlist_tickers:
lt_watchlist_sorted = sorted(
d_watchlist_tickers.items(), key=lambda item: item[1], reverse=True
)
s_watchlist_tickers = ""
n_tickers = 0
for t_ticker in lt_watchlist_sorted:
try:
# If try doesn't trigger exception, it means that this stock exists on finviz
# thus we can print it.
finviz.get_stock(t_ticker[0])
if int(t_ticker[1]) > 1:
s_watchlist_tickers += f"{t_ticker[1]} {t_ticker[0]}, "
n_tickers += 1
except Exception:
# console.print(e, "\n")
pass
if n_tickers:
console.print(
"The following stock tickers have been mentioned more than once across the previous SPACs:"
)
console.print(s_watchlist_tickers[:-2])
@log_start_end(log=logger)
@check_api_key(
[
"API_REDDIT_CLIENT_ID",
"API_REDDIT_CLIENT_SECRET",
"API_REDDIT_USERNAME",
"API_REDDIT_USER_AGENT",
"API_REDDIT_PASSWORD",
]
)
def display_spac(limit: int = 5):
"""Prints posts containing 'spac' in top communities.
Parameters
----------
limit: int
Number of posts to get from each subreddit
"""
warnings.filterwarnings("ignore") # To avoid printing the warning
subs, d_watchlist_tickers, n_flair_posts_found = reddit_model.get_spac(limit)
if not subs.empty:
for sub in subs.iterrows():
print_reddit_post(sub)
console.print("")
if n_flair_posts_found > 0:
lt_watchlist_sorted = sorted(
d_watchlist_tickers.items(), key=lambda item: item[1], reverse=True
)
s_watchlist_tickers = ""
n_tickers = 0
for t_ticker in lt_watchlist_sorted:
try:
# If try doesn't trigger exception, it means that this stock exists on finviz
# thus we can print it.
finviz.get_stock(t_ticker[0])
if int(t_ticker[1]) > 1:
s_watchlist_tickers += f"{t_ticker[1]} {t_ticker[0]}, "
n_tickers += 1
except Exception:
pass
if n_tickers:
console.print(
"The following stock tickers have been mentioned more than once across the previous SPACs:"
)
console.print(s_watchlist_tickers[:-2])
@log_start_end(log=logger)
@check_api_key(
[
"API_REDDIT_CLIENT_ID",
"API_REDDIT_CLIENT_SECRET",
"API_REDDIT_USERNAME",
"API_REDDIT_USER_AGENT",
"API_REDDIT_PASSWORD",
]
)
def display_wsb_community(limit: int = 10, new: bool = False):
"""Prints WSB posts.
Parameters
----------
limit : int, optional
Number of posts to look at, by default 10
new : bool, optional
Flag to sort by new instead of hot, by default False
"""
subs = reddit_model.get_wsb_community(limit, new)
if not subs.empty:
for sub in subs.iterrows():
print_reddit_post(sub)
console.print("")
@log_start_end(log=logger)
@check_api_key(
[
"API_REDDIT_CLIENT_ID",
"API_REDDIT_CLIENT_SECRET",
"API_REDDIT_USERNAME",
"API_REDDIT_USER_AGENT",
"API_REDDIT_PASSWORD",
]
)
def display_due_diligence(
symbol: str, limit: int = 10, n_days: int = 3, show_all_flairs: bool = False
):
"""Prints Reddit due diligence data for a given ticker.
Parameters
----------
symbol: str
Stock ticker symbol
limit: int
Number of posts to get
n_days: int
Number of days back to get posts
show_all_flairs: bool
Search through all flairs (apart from Yolo and Meme)
"""
subs = reddit_model.get_due_dilligence(symbol, limit, n_days, show_all_flairs)
if not subs.empty:
for sub in subs.iterrows():
print_reddit_post(sub)
console.print("")
else:
console.print(f"No DD posts found for {symbol}\n")
@log_start_end(log=logger)
@check_api_key(
[
"API_REDDIT_CLIENT_ID",
"API_REDDIT_CLIENT_SECRET",
"API_REDDIT_USERNAME",
"API_REDDIT_USER_AGENT",
"API_REDDIT_PASSWORD",
]
)
def display_redditsent(
symbol: str,
sortby: str = "relevance",
limit: int = 100,
graphic: bool = False,
time_frame: str = "week",
full_search: bool = True,
subreddits: str = "all",
display: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots Reddit sentiment about a search term. Prints table showing if display is True.
Parameters
----------
symbol: str
The ticker symbol being search for in Reddit
sortby: str
Type of search
limit: str
Number of posts to get at most
graphic: bool
Displays box and whisker plot
time_frame: str
Time frame for search
full_search: bool
Enable comprehensive search for ticker
subreddits: str
Comma-separated list of subreddits
display: bool
Enable printing of raw sentiment values for each post
export: str
Format to export data
external_axes: Optional[List[plt.Axes]]
If supplied, expect 1 external axis
"""
df, polarity_scores, avg_polarity = reddit_model.get_posts_about(
symbol, limit, sortby, time_frame, full_search, subreddits
)
if df.empty:
console.print(f"No posts for {symbol} found")
return
if display:
print_rich_table(df=df)
if graphic:
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
sns.boxplot(x=polarity_scores, ax=ax)
ax.set_title(f"Sentiment Score of {symbol}")
ax.set_xlabel("Sentiment Score")
if not external_axes:
theme.visualize_output()
console.print(f"Sentiment Analysis for {symbol} is {avg_polarity}\n")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"polarity_scores",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/behavioural_analysis/reddit_view.py | 0.621196 | 0.178329 | reddit_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from dateutil import parser as dparse
import openbb_terminal.config_plot as cfg_plot
from openbb_terminal.config_terminal import theme
from openbb_terminal.common.behavioural_analysis import twitter_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
get_closing_price,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_inference(symbol: str, limit: int = 100, export: str = ""):
"""Prints Inference sentiment from past n tweets.
Parameters
----------
symbol: str
Stock ticker symbol
limit: int
Number of tweets to analyze
export: str
Format to export tweet dataframe
"""
df_tweets = twitter_model.load_analyze_tweets(symbol, limit)
if (isinstance(df_tweets, pd.DataFrame) and df_tweets.empty) or (
not isinstance(df_tweets, pd.DataFrame) and not df_tweets
):
return
# Parse tweets
dt_from = dparse.parse(df_tweets["created_at"].values[-1])
dt_to = dparse.parse(df_tweets["created_at"].values[0])
console.print(f"From: {dt_from.strftime('%Y-%m-%d %H:%M:%S')}")
console.print(f"To: {dt_to.strftime('%Y-%m-%d %H:%M:%S')}")
console.print(f"{len(df_tweets)} tweets were analyzed.")
dt_delta = dt_to - dt_from
n_freq = dt_delta.total_seconds() / len(df_tweets)
console.print(f"Frequency of approx 1 tweet every {round(n_freq)} seconds.")
pos = df_tweets["positive"]
neg = df_tweets["negative"]
percent_pos = len(np.where(pos > neg)[0]) / len(df_tweets)
percent_neg = len(np.where(pos < neg)[0]) / len(df_tweets)
total_sent = np.round(np.sum(df_tweets["sentiment"]), 2)
mean_sent = np.round(np.mean(df_tweets["sentiment"]), 2)
console.print(f"The summed compound sentiment of {symbol} is: {total_sent}")
console.print(f"The average compound sentiment of {symbol} is: {mean_sent}")
console.print(
f"Of the last {len(df_tweets)} tweets, {100*percent_pos:.2f} % had a higher positive sentiment"
)
console.print(
f"Of the last {len(df_tweets)} tweets, {100*percent_neg:.2f} % had a higher negative sentiment"
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "infer", df_tweets)
@log_start_end(log=logger)
def display_sentiment(
symbol: str,
n_tweets: int = 15,
n_days_past: int = 2,
compare: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots sentiments from symbol
Parameters
----------
symbol: str
Stock ticker symbol to get sentiment for
n_tweets: int
Number of tweets to get per hour
n_days_past: int
Number of days to extract tweets for
compare: bool
Show corresponding change in stock price
export: str
Format to export tweet dataframe
external_axes: Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_tweets = twitter_model.get_sentiment(symbol, n_tweets, n_days_past)
if df_tweets.empty:
return
ax1, ax2, ax3 = None, None, None
if compare:
# This plot has 3 axes
if external_axes is None:
_, axes = plt.subplots(
3, 1, sharex=False, figsize=plot_autoscale(), dpi=cfg_plot.PLOT_DPI
)
ax1, ax2, ax3 = axes
elif is_valid_axes_count(external_axes, 3):
(ax1, ax2, ax3) = external_axes
else:
return
else:
# This plot has 2 axes
if external_axes is None:
_, axes = plt.subplots(
2, 1, sharex=True, figsize=plot_autoscale(), dpi=cfg_plot.PLOT_DPI
)
ax1, ax2 = axes
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
ax1.plot(
pd.to_datetime(df_tweets["created_at"]),
df_tweets["cumulative_compound"].values,
)
ax1.set_ylabel("\nCumulative\nVADER Sentiment")
for _, day_df in df_tweets.groupby(by="Day"):
day_df["time"] = pd.to_datetime(day_df["created_at"])
day_df = day_df.sort_values(by="time")
ax1.plot(
day_df["time"],
day_df["sentiment"].cumsum(),
label=pd.to_datetime(day_df["date"]).iloc[0].strftime("%Y-%m-%d"),
)
ax2.bar(
df_tweets["date"],
df_tweets["positive"],
color=theme.up_color,
width=theme.volume_bar_width / 100,
)
ax2.bar(
df_tweets["date"],
-1 * df_tweets["negative"],
color=theme.down_color,
width=theme.volume_bar_width / 100,
)
ax1.set_title(
f"Twitter's {symbol} total compound sentiment over time is {round(np.sum(df_tweets['sentiment']), 2)}"
)
theme.style_primary_axis(ax1)
ax2.set_ylabel("VADER Polarity Scores")
theme.style_primary_axis(ax2)
if compare:
# get stock end price for each corresponding day if compare == True
closing_price_df = get_closing_price(symbol, n_days_past)
if ax3:
ax3.plot(
closing_price_df["Date"],
closing_price_df["Close"],
label=pd.to_datetime(closing_price_df["Date"])
.iloc[0]
.strftime("%Y-%m-%d"),
)
ax3.set_ylabel("Stock Price")
theme.style_primary_axis(ax3)
if external_axes is None:
theme.visualize_output()
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "sentiment", df_tweets
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/behavioural_analysis/twitter_view.py | 0.789761 | 0.372791 | twitter_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Tuple
import pandas as pd
import pandas_ta as ta
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_rolling_avg(
data: pd.DataFrame, window: int = 14
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Return rolling mean and standard deviation
Parameters
----------
data: pd.DataFrame
Dataframe of target data
window: int
Length of rolling window
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame]
Dataframe of rolling mean,
Dataframe of rolling standard deviation
"""
rolling_mean = data.rolling(window, center=True, min_periods=1).mean()
rolling_std = data.rolling(window, center=True, min_periods=1).std()
return pd.DataFrame(rolling_mean), pd.DataFrame(rolling_std)
@log_start_end(log=logger)
def get_spread(
data: pd.DataFrame, window: int = 14
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Standard Deviation and Variance
Parameters
----------
data: pd.DataFrame
DataFrame of targeted data
window: int
Length of window
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame]
Dataframe of rolling standard deviation,
Dataframe of rolling variance
"""
df_sd = ta.stdev(
close=data,
length=window,
).dropna()
df_var = ta.variance(
close=data,
length=window,
).dropna()
return pd.DataFrame(df_sd), pd.DataFrame(df_var)
@log_start_end(log=logger)
def get_quantile(
data: pd.DataFrame, window: int = 14, quantile_pct: float = 0.5
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Overlay Median & Quantile
Parameters
----------
data: pd.DataFrame
Dataframe of targeted data
window : int
Length of window
quantile_pct: float
Quantile to display
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame]
Dataframe of rolling median prices over window,
Dataframe of rolling quantile prices over window
"""
df_med = ta.median(close=data, length=window).dropna()
df_quantile = ta.quantile(
data,
length=window,
q=quantile_pct,
).dropna()
return pd.DataFrame(df_med), pd.DataFrame(df_quantile)
@log_start_end(log=logger)
def get_skew(data: pd.DataFrame, window: int = 14) -> pd.DataFrame:
"""Skewness Indicator
Parameters
----------
data: pd.DataFrame
Dataframe of targeted data
window : int
Length of window
Returns
-------
data_skew : pd.DataFrame
Dataframe of rolling skew
"""
df_skew = ta.skew(close=data, length=window).dropna()
return df_skew
@log_start_end(log=logger)
def get_kurtosis(data: pd.DataFrame, window: int = 14) -> pd.DataFrame:
"""Kurtosis Indicator
Parameters
----------
data: pd.DataFrame
Dataframe of targeted data
window: int
Length of window
Returns
-------
df_kurt : pd.DataFrame
Dataframe of rolling kurtosis
"""
df_kurt = ta.kurtosis(close=data, length=window).dropna()
return df_kurt | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/quantitative_analysis/rolling_model.py | 0.952816 | 0.555857 | rolling_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
import matplotlib.pyplot as plt
import pandas as pd
from openbb_terminal.config_terminal import theme
from openbb_terminal.common.quantitative_analysis import rolling_model
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
reindex_dates,
is_valid_axes_count,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_mean_std(
data: pd.DataFrame,
target: str,
symbol: str = "",
window: int = 14,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots mean std deviation
Parameters
----------
data: pd.DataFrame
Dataframe
target: str
Column in data to look at
symbol : str
Stock ticker
window : int
Length of window
export: str
Format to export data
external_axes: Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
data = data[target]
rolling_mean, rolling_std = rolling_model.get_rolling_avg(data, window)
plot_data = pd.merge(
data,
rolling_mean,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_mean"),
)
plot_data = pd.merge(
plot_data,
rolling_std,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_std"),
)
plot_data = reindex_dates(plot_data)
# This plot has 2 axes
if external_axes is None:
_, axes = plt.subplots(
2,
1,
sharex=True,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
ax1, ax2 = axes
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
ax1.plot(
plot_data.index,
plot_data[target].values,
label=symbol,
)
ax1.plot(
plot_data.index,
plot_data[target + "_mean"].values,
)
ax1.set_ylabel(
"Values",
)
ax1.legend(["Real Values", "Rolling Mean"])
ax1.set_title(f"Rolling mean and std (window {str(window)}) of {symbol} {target}")
ax1.set_xlim([plot_data.index[0], plot_data.index[-1]])
ax2.plot(
plot_data.index,
plot_data[target + "_std"].values,
label="Rolling std",
)
ax2.legend(["Rolling std"])
ax2.set_ylabel(
f"{target} Std Deviation",
)
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"rolling",
rolling_mean.join(rolling_std, lsuffix="_mean", rsuffix="_std"),
)
@log_start_end(log=logger)
def display_spread(
data: pd.DataFrame,
target: str,
symbol: str = "",
window: int = 14,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots rolling spread
Parameters
----------
data: pd.DataFrame
Dataframe
target: str
Column in data to look at
target: str
Column in data to look at
symbol : str
Stock ticker
window : int
Length of window
export: str
Format to export data
external_axes: Optional[List[plt.Axes]], optional
External axes (3 axes are expected in the list), by default None
"""
data = data[target]
df_sd, df_var = rolling_model.get_spread(data, window)
plot_data = pd.merge(
data,
df_sd,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_sd"),
)
plot_data = pd.merge(
plot_data,
df_var,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_var"),
)
plot_data = reindex_dates(plot_data)
# This plot has 1 axis
if external_axes is None:
_, axes = plt.subplots(
3,
1,
sharex=True,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
(ax1, ax2, ax3) = axes
elif is_valid_axes_count(external_axes, 3):
(ax1, ax2, ax3) = external_axes
else:
return
ax1.plot(plot_data.index, plot_data[target].values)
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel("Value")
ax1.set_title(f"Spread of {symbol} {target}")
ax2.plot(
plot_data[f"STDEV_{window}"].index,
plot_data[f"STDEV_{window}"].values,
label="Stdev",
)
ax2.set_ylabel("Stdev")
ax3.plot(
plot_data[f"VAR_{window}"].index,
plot_data[f"VAR_{window}"].values,
label="Variance",
)
ax3.set_ylabel("Variance")
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
theme.style_primary_axis(
ax3,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"spread",
df_sd.join(df_var, lsuffix="_sd", rsuffix="_var"),
)
@log_start_end(log=logger)
def display_quantile(
data: pd.DataFrame,
target: str,
symbol: str = "",
window: int = 14,
quantile: float = 0.5,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots rolling quantile
Parameters
----------
data: pd.DataFrame
Dataframe
target: str
Column in data to look at
symbol : str
Stock ticker
window : int
Length of window
quantile: float
Quantile to get
export: str
Format to export data
external_axes: Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
data = data[target]
df_med, df_quantile = rolling_model.get_quantile(data, window, quantile)
plot_data = pd.merge(
data,
df_med,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_med"),
)
plot_data = pd.merge(
plot_data,
df_quantile,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_quantile"),
)
plot_data = reindex_dates(plot_data)
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.set_title(f"{symbol} {target} Median & Quantile")
ax.plot(plot_data.index, plot_data[target].values, label=target)
ax.plot(
plot_data.index,
plot_data[f"MEDIAN_{window}"].values,
label=f"Median w={window}",
)
ax.plot(
plot_data.index,
plot_data[f"QTL_{window}_{quantile}"].values,
label=f"Quantile q={quantile}",
linestyle="--",
)
ax.set_xlim(plot_data.index[0], plot_data.index[-1])
ax.set_ylabel(f"{symbol} Value")
ax.legend()
theme.style_primary_axis(
ax,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"quantile",
df_med.join(df_quantile),
)
@log_start_end(log=logger)
def display_skew(
symbol: str,
data: pd.DataFrame,
target: str,
window: int = 14,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots rolling skew
Parameters
----------
symbol: str
Stock ticker
data: pd.DataFrame
Dataframe
target: str
Column in data to look at
window: int
Length of window
export: str
Format to export data
external_axes: Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
data = data[target]
df_skew = rolling_model.get_skew(data, window)
plot_data = pd.merge(
data,
df_skew,
how="outer",
left_index=True,
right_index=True,
)
plot_data = reindex_dates(plot_data)
# This plot has 1 axis
if external_axes is None:
_, axes = plt.subplots(
2,
1,
sharex=True,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
(ax1, ax2) = axes
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
ax1.set_title(f"{symbol} Skewness Indicator")
ax1.plot(plot_data.index, plot_data[target].values)
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel(f"{target}")
ax2.plot(plot_data.index, plot_data[f"SKEW_{window}"].values, label="Skew")
ax2.set_ylabel("Indicator")
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"skew",
df_skew,
)
@log_start_end(log=logger)
def display_kurtosis(
symbol: str,
data: pd.DataFrame,
target: str,
window: int = 14,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots rolling kurtosis
Parameters
----------
symbol: str
Ticker
data: pd.DataFrame
Dataframe of stock prices
target: str
Column in data to look at
window: int
Length of window
export: str
Format to export data
external_axes: Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
data = data[target]
df_kurt = rolling_model.get_kurtosis(data, window)
plot_data = pd.merge(
data,
df_kurt,
how="outer",
left_index=True,
right_index=True,
)
plot_data = reindex_dates(plot_data)
# This plot has 1 axis
if external_axes is None:
_, axes = plt.subplots(
2,
1,
sharex=True,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
(ax1, ax2) = axes
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
ax1.set_title(f"{symbol} {target} Kurtosis Indicator (window {str(window)})")
ax1.plot(plot_data.index, plot_data[target].values)
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel(f"{target}")
ax2.plot(
plot_data.index,
plot_data[f"KURT_{window}"].values,
)
ax2.set_ylabel("Indicator")
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"kurtosis",
df_kurt,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/quantitative_analysis/rolling_view.py | 0.927511 | 0.437703 | rolling_view.py | pypi |
__docformat__ = "numpy"
# pylint: disable=too-many-lines
import logging
import os
import warnings
from datetime import datetime
from typing import Any, Optional, List
import matplotlib
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
from scipy import stats
from detecta import detect_cusum
from statsmodels.graphics.gofplots import qqplot
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.config_terminal import theme
from openbb_terminal.common.quantitative_analysis import qa_model
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
reindex_dates,
lambda_long_number_format,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
register_matplotlib_converters()
def lambda_color_red(val: Any) -> str:
"""Adds red to dataframe value"""
if val > 0.05:
return f"[red]{round(val,4)}[/red]"
return round(val, 4)
@log_start_end(log=logger)
def display_summary(data: pd.DataFrame, export: str = "") -> None:
"""Prints table showing summary statistics
Parameters
----------
data : pd.DataFrame
DataFrame to get statistics of
export : str
Format to export data
"""
summary = qa_model.get_summary(data)
print_rich_table(
summary,
headers=list(summary.columns),
floatfmt=".3f",
show_index=True,
title="[bold]Summary Statistics[/bold]",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"summary",
summary,
)
@log_start_end(log=logger)
def display_hist(
data: pd.DataFrame,
target: str,
symbol: str = "",
bins: int = 15,
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots histogram of data
Parameters
----------
data : pd.DataFrame
Dataframe to look at
target : str
Data column to get histogram of the dataframe
symbol : str
Name of dataset
bins : int
Number of bins in histogram
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.load("AAPL")
>>> openbb.qa.hist(data=df, target="Adj Close")
"""
data = data[target]
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
sns.histplot(
data,
color=theme.up_color,
bins=bins,
kde=True,
ax=ax,
stat="proportion",
legend=True,
)
sns.rugplot(data, color=theme.down_color, ax=ax, legend=True)
if isinstance(data.index[0], datetime):
start = data.index[0]
ax.set_title(
f"Histogram of {symbol} {target} from {start.strftime('%Y-%m-%d')}"
)
else:
ax.set_title(f"Histogram of {symbol} {target}")
ax.set_xlabel("Value")
theme.style_primary_axis(ax)
# Manually construct the chart legend
proportion_legend = mpatches.Patch(
color=theme.up_color, label="Univariate distribution"
)
marginal_legend = mpatches.Patch(
color=theme.down_color, label="Marginal distributions"
)
ax.legend(handles=[proportion_legend, marginal_legend])
if external_axes is None:
theme.visualize_output()
@log_start_end(log=logger)
def display_cdf(
data: pd.DataFrame,
target: str,
symbol: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots Cumulative Distribution Function
Parameters
----------
data : pd.DataFrame
Dataframe to look at
target : str
Data column
symbol : str
Name of dataset
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.load("AAPL")
>>> openbb.qa.cdf(data=df, target="Adj Close")
"""
data = data[target]
start = data.index[0]
cdf = data.value_counts().sort_index().div(len(data)).cumsum()
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
cdf.plot(ax=ax)
ax.set_title(
f"Cumulative Distribution Function of {symbol} {target}\nfrom {start.strftime('%Y-%m-%d')}"
)
ax.set_ylabel("Probability")
ax.set_xlabel(target)
minVal = data.values.min()
q25 = np.quantile(data.values, 0.25)
medianVal = np.quantile(data.values, 0.5)
q75 = np.quantile(data.values, 0.75)
labels = [
(minVal, q25),
(0.25, 0.25),
theme.down_color,
(q25, q25),
(0, 0.25),
theme.down_color,
(minVal, medianVal),
(0.5, 0.5),
theme.down_color,
(medianVal, medianVal),
(0, 0.5),
theme.down_color,
(minVal, q75),
(0.75, 0.75),
theme.down_color,
(q75, q75),
(0, 0.75),
theme.down_color,
]
ax.plot(*labels, ls="--")
ax.text(
minVal + (q25 - minVal) / 2,
0.27,
"Q1",
color=theme.down_color,
fontweight="bold",
)
ax.text(
minVal + (medianVal - minVal) / 2,
0.52,
"Median",
color=theme.down_color,
fontweight="bold",
)
ax.text(
minVal + (q75 - minVal) / 2,
0.77,
"Q3",
color=theme.down_color,
fontweight="bold",
)
ax.set_xlim(cdf.index[0], cdf.index[-1])
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"cdf",
pd.DataFrame(cdf),
)
@log_start_end(log=logger)
def display_bw(
data: pd.DataFrame,
target: str,
symbol: str = "",
yearly: bool = True,
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots box and whisker plots
Parameters
----------
symbol : str
Name of dataset
data : pd.DataFrame
Dataframe to look at
target : str
Data column to look at
yearly : bool
Flag to indicate yearly accumulation
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.load("AAPL")
>>> openbb.qa.bw(data=df, target="Adj Close")
"""
data = data[target]
start = data.index[0]
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
theme.style_primary_axis(ax)
color = theme.get_colors()[0]
if yearly:
x_data = data.index.year
else:
x_data = data.index.month
box_plot = sns.boxplot(
x=x_data,
y=data,
ax=ax,
zorder=3,
boxprops=dict(edgecolor=color),
flierprops=dict(
linestyle="--",
color=color,
markerfacecolor=theme.up_color,
markeredgecolor=theme.up_color,
),
whiskerprops=dict(color=color),
capprops=dict(color=color),
)
box_plot.set(
xlabel=["Monthly", "Yearly"][yearly],
ylabel=target,
title=f"{['Monthly','Yearly'][yearly]} box plot of {symbol} {target} from {start.strftime('%Y-%m-%d')}",
)
l_months = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
l_ticks = list()
if not yearly:
for val in box_plot.get_xticklabels():
l_ticks.append(l_months[int(val.get_text()) - 1])
box_plot.set_xticklabels(l_ticks)
# remove the scientific notion on the left hand side
ax.ticklabel_format(style="plain", axis="y")
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
@log_start_end(log=logger)
def display_acf(
data: pd.DataFrame,
target: str,
symbol: str = "",
lags: int = 15,
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots Auto and Partial Auto Correlation of returns and change in returns
Parameters
----------
data : pd.DataFrame
Dataframe to look at
target : str
Data column to look at
symbol : str
Name of dataset
lags : int
Max number of lags to look at
external_axes : Optional[List[plt.Axes]], optional
External axes (4 axes are expected in the list), by default None
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.load("AAPL")
>>> openbb.qa.acf(data=df, target="Adj Close")
"""
data = data[target]
start = data.index[0]
# This plot has 4 axes
if external_axes is None:
fig, axes = plt.subplots(
nrows=2,
ncols=2,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
(ax1, ax2), (ax3, ax4) = axes
elif is_valid_axes_count(external_axes, 4):
(ax1, ax2, ax3, ax4) = external_axes
else:
return
# Diff Auto - correlation function for original time series
sm.graphics.tsa.plot_acf(np.diff(np.diff(data.values)), lags=lags, ax=ax1)
ax1.set_title(f"{symbol} Returns Auto-Correlation", fontsize=9)
# Diff Partial auto - correlation function for original time series
sm.graphics.tsa.plot_pacf(
np.diff(np.diff(data.values)), lags=lags, ax=ax2, method="ywm"
)
ax2.set_title(
f"{symbol} Returns Partial Auto-Correlation",
fontsize=9,
)
# Diff Diff Auto-correlation function for original time series
sm.graphics.tsa.plot_acf(np.diff(np.diff(data.values)), lags=lags, ax=ax3)
ax3.set_title(
f"Change in {symbol} Returns Auto-Correlation",
fontsize=9,
)
# Diff Diff Partial auto-correlation function for original time series
sm.graphics.tsa.plot_pacf(
np.diff(np.diff(data.values)), lags=lags, ax=ax4, method="ywm"
)
ax4.set_title(
f"Change in {symbol} Returns Partial Auto-Correlation",
fontsize=9,
)
fig.suptitle(
f"ACF differentials starting from {start.strftime('%Y-%m-%d')}",
fontsize=15,
x=0.042,
y=0.95,
horizontalalignment="left",
verticalalignment="top",
)
theme.style_primary_axis(ax1)
theme.style_primary_axis(ax2)
theme.style_primary_axis(ax3)
theme.style_primary_axis(ax4)
if external_axes is None:
theme.visualize_output(force_tight_layout=True)
@log_start_end(log=logger)
def display_qqplot(
data: pd.DataFrame,
target: str,
symbol: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots QQ plot for data against normal quantiles
Parameters
----------
data : pd.DataFrame
Dataframe
target : str
Column in data to look at
symbol : str
Stock ticker
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.load("AAPL")
>>> openbb.qa.qqplot(data=df, target="Adj Close")
"""
# Statsmodels has a UserWarning for marker kwarg-- which we don't use
warnings.filterwarnings(category=UserWarning, action="ignore")
data = data[target]
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
qqplot(
data,
stats.distributions.norm,
fit=True,
line="45",
color=theme.down_color,
ax=ax,
)
ax.get_lines()[1].set_color(theme.up_color)
ax.set_title(f"Q-Q plot for {symbol} {target}")
ax.set_ylabel("Sample quantiles")
ax.set_xlabel("Theoretical quantiles")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
@log_start_end(log=logger)
def display_cusum(
data: pd.DataFrame,
target: str,
threshold: float = 5,
drift: float = 2.1,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots Cumulative sum algorithm (CUSUM) to detect abrupt changes in data
Parameters
----------
data : pd.DataFrame
Dataframe
target : str
Column of data to look at
threshold : float
Threshold value
drift : float
Drift parameter
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.load("AAPL")
>>> openbb.qa.cusum(data=df, target="Adj Close")
"""
target_series = data[target].values
# The code for this plot was adapted from detecta's sources because at the
# time of writing this detect_cusum had a bug related to external axes support.
# see https: // github.com/demotu/detecta/pull/3
tap, tan = 0, 0
ta, tai, taf, _ = detect_cusum(
x=target_series,
threshold=threshold,
drift=drift,
ending=True,
show=False,
)
# Thus some variable names are left unchanged and unreadable...
gp, gn = np.zeros(target_series.size), np.zeros(target_series.size)
for i in range(1, target_series.size):
s = target_series[i] - target_series[i - 1]
gp[i] = gp[i - 1] + s - drift # cumulative sum for + change
gn[i] = gn[i - 1] - s - drift # cumulative sum for - change
if gp[i] < 0:
gp[i], tap = 0, i
if gn[i] < 0:
gn[i], tan = 0, i
if gp[i] > threshold or gn[i] > threshold: # change detected!
ta = np.append(ta, i) # alarm index
tai = np.append(tai, tap if gp[i] > threshold else tan) # start
gp[i], gn[i] = 0, 0 # reset alarm
if external_axes is None:
_, axes = plt.subplots(
2,
1,
sharex=True,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
(ax1, ax2) = axes
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
target_series_indexes = range(data[target].size)
ax1.plot(target_series_indexes, target_series)
if len(ta):
ax1.plot(
tai,
target_series[tai],
">",
markerfacecolor=theme.up_color,
markersize=5,
label="Start",
)
ax1.plot(
taf,
target_series[taf],
"<",
markerfacecolor=theme.down_color,
markersize=5,
label="Ending",
)
ax1.plot(
ta,
target_series[ta],
"o",
markerfacecolor=theme.get_colors()[-1],
markeredgecolor=theme.get_colors()[-2],
markeredgewidth=1,
markersize=3,
label="Alarm",
)
ax1.legend()
ax1.set_xlim(-0.01 * target_series.size, target_series.size * 1.01 - 1)
ax1.set_ylabel("Amplitude")
ymin, ymax = (
target_series[np.isfinite(target_series)].min(),
target_series[np.isfinite(target_series)].max(),
)
y_range = ymax - ymin if ymax > ymin else 1
ax1.set_ylim(ymin - 0.1 * y_range, ymax + 0.1 * y_range)
ax1.set_title(
"Time series and detected changes "
+ f"(threshold= {threshold:.3g}, drift= {drift:.3g}): N changes = {len(tai)}",
fontsize=10,
)
theme.style_primary_axis(ax1)
ax2.plot(target_series_indexes, gp, label="+")
ax2.plot(target_series_indexes, gn, label="-")
ax2.set_xlim(-0.01 * target_series.size, target_series.size * 1.01 - 1)
ax2.set_xlabel("Data points")
ax2.set_ylim(-0.01 * threshold, 1.1 * threshold)
ax2.axhline(threshold)
theme.style_primary_axis(ax2)
ax2.set_title(
"Time series of the cumulative sums of positive and negative changes",
fontsize=10,
)
ax2.legend()
if external_axes is None:
theme.visualize_output()
@log_start_end(log=logger)
def display_seasonal(
symbol: str,
data: pd.DataFrame,
target: str,
multiplicative: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots seasonal decomposition data
Parameters
----------
symbol : str
Name of dataset
data : pd.DataFrame
DataFrame
target : str
Column of data to look at
multiplicative : bool
Boolean to indicate multiplication instead of addition
export : str
Format to export trend and cycle data
external_axes : Optional[List[plt.Axes]], optional
External axes (6 axes are expected in the list), by default None
"""
data = data[target]
result, cycle, trend = qa_model.get_seasonal_decomposition(data, multiplicative)
plot_data = pd.merge(
data,
result.trend,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_result.trend"),
)
plot_data = pd.merge(
plot_data,
result.seasonal,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_result.seasonal"),
)
plot_data = pd.merge(
plot_data,
result.resid,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_result.resid"),
)
plot_data = pd.merge(
plot_data,
cycle,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_cycle"),
)
plot_data = pd.merge(
plot_data,
trend,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_trend"),
)
plot_data = reindex_dates(plot_data)
# This plot has 1 axis
if external_axes is None:
fig, axes = plt.subplots(
4,
1,
sharex=True,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
(ax1, ax2, ax3, ax4) = axes
elif is_valid_axes_count(external_axes, 4):
(ax1, ax2, ax3, ax4) = external_axes
else:
return
colors = iter(theme.get_colors())
ax1.set_title(f"{symbol} (Time-Series) {target} seasonal decomposition")
ax1.plot(
plot_data.index, plot_data[target].values, color=next(colors), label="Values"
)
ax1.set_xlim([plot_data.index[0], plot_data.index[-1]])
ax1.legend()
# Multiplicative model
ax2.plot(plot_data["trend"], color=theme.down_color, label="Cyclic-Trend")
ax2.plot(
plot_data["trend_cycle"],
color=theme.up_color,
linestyle="--",
label="Cycle component",
)
ax2.legend()
ax3.plot(plot_data["trend_trend"], color=next(colors), label="Trend component")
ax3.plot(plot_data["seasonal"], color=next(colors), label="Seasonal effect")
ax3.legend()
ax4.plot(plot_data["resid"], color=next(colors), label="Residuals")
ax4.legend()
theme.style_primary_axis(ax1)
theme.style_primary_axis(ax2)
theme.style_primary_axis(ax3)
theme.style_primary_axis(
ax4,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
fig.tight_layout(pad=theme.tight_layout_padding)
fig.subplots_adjust(
hspace=0.1,
)
theme.visualize_output(force_tight_layout=False)
# From #https: // otexts.com/fpp2/seasonal-strength.html
console.print("Time-Series Level is " + str(round(data.mean(), 2)))
Ft = max(0, 1 - np.var(result.resid)) / np.var(result.trend + result.resid)
console.print(f"Strength of Trend: {Ft:.4f}")
Fs = max(
0,
1 - np.var(result.resid) / np.var(result.seasonal + result.resid),
)
console.print(f"Strength of Seasonality: {Fs:.4f}\n")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"summary",
cycle.join(trend),
)
@log_start_end(log=logger)
def display_normality(data: pd.DataFrame, target: str, export: str = "") -> None:
"""Prints table showing normality statistics
Parameters
----------
data : pd.DataFrame
DataFrame
target : str
Column in data to look at
export : str
Format to export data
"""
data = data[target]
normal = qa_model.get_normality(data)
stats1 = normal.copy().T
stats1.iloc[:, 1] = stats1.iloc[:, 1].apply(lambda x: lambda_color_red(x))
print_rich_table(
stats1,
show_index=True,
headers=["Statistic", "p-value"],
floatfmt=".4f",
title="[bold]Normality Statistics[/bold]",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"normality",
normal,
)
@log_start_end(log=logger)
def display_unitroot(
data: pd.DataFrame,
target: str,
fuller_reg: str = "c",
kpss_reg: str = "c",
export: str = "",
):
"""Prints table showing unit root test calculations
Parameters
----------
data : pd.DataFrame
DataFrame
target : str
Column of data to look at
fuller_reg : str
Type of regression of ADF test. Can be ‘c’,’ct’,’ctt’,’nc’ 'c' - Constant and t - trend order
kpss_reg : str
Type of regression for KPSS test. Can be ‘c’,’ct'
export : str
Format for exporting data
"""
data = data[target]
data = qa_model.get_unitroot(data, fuller_reg, kpss_reg)
print_rich_table(
data,
show_index=True,
headers=list(data.columns),
title="[bold]Unit Root Calculation[/bold]",
floatfmt=".4f",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"unitroot",
data,
)
@log_start_end(log=logger)
def display_raw(
data: pd.DataFrame,
sortby: str = "",
ascend: bool = False,
limit: int = 20,
export: str = "",
) -> None:
"""Prints table showing raw stock data
Parameters
----------
data : DataFrame
DataFrame with historical information
sortby : str
The column to sort by
ascend : bool
Whether to sort descending
limit : int
Number of rows to show
export : str
Export data as CSV, JSON, XLSX
"""
if isinstance(data, pd.Series):
df1 = pd.DataFrame(data)
else:
df1 = data.copy()
if sortby:
try:
sort_col = [x.lower().replace(" ", "") for x in df1.columns].index(
sortby.lower().replace(" ", "")
)
except ValueError:
console.print("[red]The provided column is not a valid option[/red]\n")
return
df1 = df1.sort_values(by=data.columns[sort_col], ascending=ascend)
else:
df1 = df1.sort_index(ascending=ascend)
df1.index = [x.strftime("%Y-%m-%d") for x in df1.index]
print_rich_table(
df1.head(limit),
headers=[x.title() if x != "" else "Date" for x in df1.columns],
title="[bold]Raw Data[/bold]",
show_index=True,
floatfmt=".3f",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"raw",
data,
)
@log_start_end(log=logger)
def display_line(
data: pd.Series,
title: str = "",
log_y: bool = True,
markers_lines: Optional[List[datetime]] = None,
markers_scatter: Optional[List[datetime]] = None,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Display line plot of data
Parameters
----------
data: pd.Series
Data to plot
title: str
Title for plot
log_y: bool
Flag for showing y on log scale
markers_lines: Optional[List[datetime]]
List of dates to highlight using vertical lines
markers_scatter: Optional[List[datetime]]
List of dates to highlight using scatter
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.load("AAPL")
>>> openbb.qa.line(data=df["Adj Close"])
"""
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
if log_y:
ax.semilogy(data.index, data.values)
ax.yaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.yaxis.set_major_locator(
matplotlib.ticker.LogLocator(base=100, subs=[1.0, 2.0, 5.0, 10.0])
)
ax.ticklabel_format(style="plain", axis="y")
else:
ax.plot(data.index, data.values)
if markers_lines:
ymin, ymax = ax.get_ylim()
ax.vlines(markers_lines, ymin, ymax, color="#00AAFF")
if markers_scatter:
for n, marker_date in enumerate(markers_scatter):
price_location_idx = data.index.get_loc(marker_date, method="nearest")
# algo to improve text placement of highlight event number
if (
0 < price_location_idx < (len(data) - 1)
and data.iloc[price_location_idx - 1]
> data.iloc[price_location_idx]
and data.iloc[price_location_idx + 1]
> data.iloc[price_location_idx]
):
text_loc = (0, -20)
else:
text_loc = (0, 10)
ax.annotate(
str(n + 1),
(mdates.date2num(marker_date), data.iloc[price_location_idx]),
xytext=text_loc,
textcoords="offset points",
)
ax.scatter(
marker_date,
data.iloc[price_location_idx],
color="#00AAFF",
s=100,
)
data_type = data.name
ax.set_ylabel(data_type)
ax.set_xlim(data.index[0], data.index[-1])
ax.ticklabel_format(style="plain", axis="y")
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
if title:
ax.set_title(title)
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"line",
)
def display_var(
data: pd.DataFrame,
symbol: str = "",
use_mean: bool = False,
adjusted_var: bool = False,
student_t: bool = False,
percentile: float = 99.9,
data_range: int = 0,
portfolio: bool = False,
) -> None:
"""Prints table showing VaR of dataframe.
Parameters
----------
data: pd.Dataframe
Data dataframe
use_mean: bool
if one should use the data mean return
symbol: str
name of the data
adjusted_var: bool
if one should have VaR adjusted for skew and kurtosis (Cornish-Fisher-Expansion)
student_t: bool
If one should use the student-t distribution
percentile: int
var percentile
data_range: int
Number of rows you want to use VaR over
portfolio: bool
If the data is a portfolio
"""
if data_range > 0:
df = qa_model.get_var(
data[-data_range:], use_mean, adjusted_var, student_t, percentile, portfolio
)
else:
df = qa_model.get_var(
data, use_mean, adjusted_var, student_t, percentile, portfolio
)
if adjusted_var:
str_title = "Adjusted "
elif student_t:
str_title = "Student-t "
else:
str_title = ""
if symbol != "":
symbol += " "
print_rich_table(
df,
show_index=True,
headers=list(df.columns),
title=f"[bold]{symbol}{str_title}Value at Risk[/bold]",
floatfmt=".2f",
)
def display_es(
data: pd.DataFrame,
symbol: str = "",
use_mean: bool = False,
distribution: str = "normal",
percentile: float = 99.9,
portfolio: bool = False,
) -> None:
"""Prints table showing expected shortfall.
Parameters
----------
data: pd.DataFrame
Data dataframe
use_mean:
if one should use the data mean return
symbol: str
name of the data
distribution: str
choose distribution to use: logistic, laplace, normal
percentile: int
es percentile
portfolio: bool
If the data is a portfolio
"""
df = qa_model.get_es(data, use_mean, distribution, percentile, portfolio)
if distribution == "laplace":
str_title = "Laplace "
elif distribution == "student_t":
str_title = "Student-t "
elif distribution == "logistic":
str_title = "Logistic "
else:
str_title = ""
if symbol != "":
symbol += " "
print_rich_table(
df,
show_index=True,
headers=list(df.columns),
title=f"[bold]{symbol}{str_title}Expected Shortfall[/bold]",
floatfmt=".2f",
)
def display_sharpe(data: pd.DataFrame, rfr: float = 0, window: float = 252) -> None:
"""Plots Calculated the sharpe ratio
Parameters
----------
data: pd.DataFrame
selected dataframe column
rfr: float
risk free rate
window: float
length of the rolling window
"""
sharpe_ratio = qa_model.get_sharpe(data, rfr, window)
fig, ax = plt.subplots()
ax.plot(sharpe_ratio[int(window - 1) :])
ax.set_title(f"Sharpe Ratio - over a {window} day window")
ax.set_ylabel("Sharpe ratio")
ax.set_xlabel("Date")
fig.legend()
theme.style_primary_axis(ax)
theme.visualize_output()
def display_sortino(
data: pd.DataFrame, target_return: float, window: float, adjusted: bool
) -> None:
"""Plots the sortino ratio
Parameters
----------
data: pd.DataFrame
selected dataframe
target_return: float
target return of the asset
window: float
length of the rolling window
adjusted: bool
adjust the sortino ratio
"""
sortino_ratio = qa_model.get_sortino(data, target_return, window, adjusted)
if adjusted:
str_adjusted = "Adjusted "
else:
str_adjusted = ""
fig, ax = plt.subplots()
ax.plot(sortino_ratio[int(window - 1) :])
ax.set_title(f"{str_adjusted}Sortino Ratio - over a {window} day window")
ax.set_ylabel("Sortino ratio")
ax.set_xlabel("Date")
fig.legend()
theme.style_primary_axis(ax)
theme.visualize_output()
def display_omega(
data: pd.DataFrame, threshold_start: float = 0, threshold_end: float = 1.5
) -> None:
"""Plots the omega ratio
Parameters
----------
data: pd.DataFrame
stock dataframe
threshold_start: float
annualized target return threshold start of plotted threshold range
threshold_end: float
annualized target return threshold end of plotted threshold range
"""
df = qa_model.get_omega(data, threshold_start, threshold_end)
# Plotting
fig, ax = plt.subplots()
ax.plot(df["threshold"], df["omega"])
ax.set_title(f"Omega Curve - over last {len(data)}'s period")
ax.set_ylabel("Omega Ratio")
ax.set_xlabel("Threshold (%)")
fig.legend()
ax.set_ylim(threshold_start, threshold_end)
theme.style_primary_axis(ax)
theme.visualize_output() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/quantitative_analysis/qa_view.py | 0.868283 | 0.427038 | qa_view.py | pypi |
__docformat__ = "numpy"
import logging
import warnings
from typing import Tuple, Union
import pandas as pd
import statsmodels.api as sm
from statsmodels.tools.sm_exceptions import MissingDataError
from statsmodels.tsa.seasonal import DecomposeResult, seasonal_decompose
from statsmodels.tsa.stattools import adfuller, kpss
from scipy import stats
import numpy as np
from openbb_terminal.decorators import log_start_end
# TODO : Since these are common/ they should be independent of 'stock' info.
# df_stock should be replaced with a generic df and a column variable
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_summary(data: pd.DataFrame) -> pd.DataFrame:
"""Print summary statistics
Parameters
----------
data : pd.DataFrame
Dataframe to get summary statistics for
Returns
-------
summary : pd.DataFrame
Summary statistics
"""
df_stats = data.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9])
df_stats.loc["var"] = df_stats.loc["std"] ** 2
return df_stats
@log_start_end(log=logger)
def get_seasonal_decomposition(
data: pd.DataFrame, multiplicative: bool = False
) -> Tuple[DecomposeResult, pd.DataFrame, pd.DataFrame]:
"""Perform seasonal decomposition
Parameters
----------
data : pd.DataFrame
Dataframe of targeted data
multiplicative : bool
Boolean to indicate multiplication instead of addition
Returns
-------
Tuple[DecomposeResult, pd.DataFrame, pd.DataFrame]
DecomposeResult class from statsmodels (observed, seasonal, trend, residual, and weights),
Filtered cycle DataFrame,
Filtered trend DataFrame
"""
seasonal_periods = 5
# Hodrick-Prescott filter
# See Ravn and Uhlig: http://home.uchicago.edu/~huhlig/papers/uhlig.ravn.res.2002.pdf
lamb = 107360000000
model = ["additive", "multiplicative"][multiplicative]
result = seasonal_decompose(data, model=model, period=seasonal_periods)
cycle, trend = sm.tsa.filters.hpfilter(
result.trend[result.trend.notna().values], lamb=lamb
)
return result, pd.DataFrame(cycle), pd.DataFrame(trend)
@log_start_end(log=logger)
def get_normality(data: pd.DataFrame) -> pd.DataFrame:
"""
Look at the distribution of returns and generate statistics on the relation to the normal curve.
This function calculates skew and kurtosis (the third and fourth moments) and performs both
a Jarque-Bera and Shapiro Wilk test to determine if data is normally distributed.
Parameters
----------
data : pd.DataFrame
Dataframe of targeted data
Returns
-------
pd.DataFrame
Dataframe containing statistics of normality
"""
# Kurtosis
# Measures height and sharpness of the central peak relative to that of a standard bell curve
k, kpval = stats.kurtosistest(data)
# Skewness
# Measure of the asymmetry of the probability distribution of a random variable about its mean
s, spval = stats.skewtest(data)
# Jarque-Bera goodness of fit test on sample data
# Tests if the sample data has the skewness and kurtosis matching a normal distribution
jb, jbpval = stats.jarque_bera(data)
# Shapiro
# The Shapiro-Wilk test tests the null hypothesis that the data was drawn from a normal distribution.
sh, shpval = stats.shapiro(data)
# Kolmogorov-Smirnov
# The one-sample test compares the underlying distribution F(x) of a sample against a given distribution G(x).
# Comparing to normal here.
ks, kspval = stats.kstest(data, "norm")
l_statistic = [k, s, jb, sh, ks]
l_pvalue = [kpval, spval, jbpval, shpval, kspval]
return pd.DataFrame(
[l_statistic, l_pvalue],
columns=[
"Kurtosis",
"Skewness",
"Jarque-Bera",
"Shapiro-Wilk",
"Kolmogorov-Smirnov",
],
index=["Statistic", "p-value"],
)
@log_start_end(log=logger)
def get_unitroot(
data: pd.DataFrame, fuller_reg: str = "c", kpss_reg: str = "c"
) -> pd.DataFrame:
"""Calculate test statistics for unit roots
Parameters
----------
data : pd.DataFrame
DataFrame of target variable
fuller_reg : str
Type of regression of ADF test. Can be ‘c’,’ct’,’ctt’,’nc’ 'c' - Constant and t - trend order
kpss_reg : str
Type of regression for KPSS test. Can be ‘c’,’ct'
Returns
-------
pd.DataFrame
Dataframe with results of ADF test and KPSS test
"""
# The Augmented Dickey-Fuller test
# Used to test for a unit root in a univariate process in the presence of serial correlation.
try:
result = adfuller(data, regression=fuller_reg)
except MissingDataError:
data = data.dropna(axis=0)
result = adfuller(data, regression=fuller_reg)
cols = ["Test Statistic", "P-Value", "NLags", "Nobs", "ICBest"]
vals = [result[0], result[1], result[2], result[3], result[5]]
data = pd.DataFrame(data=vals, index=cols, columns=["ADF"])
# Kwiatkowski-Phillips-Schmidt-Shin test
# Test for level or trend stationarity
# This test seems to produce an Interpolation Error which says
# The test statistic is outside of the range of p-values available in the
# look-up table. The actual p-value is greater than the p-value returned.
# Wrap this in catch_warnings to prevent
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
res2 = kpss(data, regression=kpss_reg, nlags="auto")
except ValueError:
return pd.DataFrame()
vals2 = [res2[0], res2[1], res2[2], "", ""]
data["KPSS"] = vals2
return data
def calculate_adjusted_var(
kurtosis: float, skew: float, ndp: float, std: float, mean: float
) -> float:
"""Calculates VaR, which is adjusted for skew and kurtosis (Cornish-Fischer-Expansion)
Parameters
----------
kurtosis: float
kurtosis of data
skew: float
skew of data
ndp: float
normal distribution percentage number (99% -> -2.326)
std: float
standard deviation of data
mean: float
mean of data
Returns
-------
float
Real adjusted VaR
"""
# Derived from Cornish-Fisher-Expansion
# Formula for quantile from "Finance Compact Plus" by Zimmerman; Part 1, page 130-131
# More material/resources:
# - "Numerical Methods and Optimization in Finance" by Gilli, Maringer & Schumann;
# - https://www.value-at-risk.net/the-cornish-fisher-expansion/;
# - https://www.diva-portal.org/smash/get/diva2:442078/FULLTEXT01.pdf, Section 2.4.2, p.18;
# - "Risk Management and Financial Institutions" by John C. Hull
skew_component = skew / 6 * (ndp**2 - 1) ** 2 - skew**2 / 36 * ndp * (
2 * ndp**2 - 5
)
kurtosis_component = (kurtosis - 3) / 24 * ndp * (ndp**2 - 3)
quantile = ndp + skew_component + kurtosis_component
log_return = mean + quantile * std
real_return = 2.7182818**log_return - 1
return real_return
def get_var(
data: pd.DataFrame,
use_mean: bool = False,
adjusted_var: bool = False,
student_t: bool = False,
percentile: Union[int, float] = 99.9,
portfolio: bool = False,
) -> pd.DataFrame:
"""Gets value at risk for specified stock dataframe.
Parameters
----------
data: pd.DataFrame
Data dataframe
use_mean: bool
If one should use the data mean for calculation
adjusted_var: bool
If one should return VaR adjusted for skew and kurtosis
student_t: bool
If one should use the student-t distribution
percentile: Union[int,float]
VaR percentile
portfolio: bool
If the data is a portfolio
Returns
-------
pd.DataFrame
DataFrame with Value at Risk per percentile
"""
if not portfolio:
data = data[["adjclose"]].copy()
data.loc[:, "return"] = data.adjclose.pct_change()
data_return = data["return"]
else:
data = data[1:].copy()
data_return = data
# Distribution percentages
percentile = percentile / 100
percentile_90 = -1.282
percentile_95 = -1.645
percentile_99 = -2.326
percentile_custom = stats.norm.ppf(1 - percentile)
# Mean
if use_mean:
mean = data_return.mean()
else:
mean = 0
# Standard Deviation
std = data_return.std(axis=0)
if adjusted_var:
# Kurtosis
# Measures height and sharpness of the central peak relative to that of a standard bell curve
k = data_return.kurtosis(axis=0)
# Skewness
# Measure of the asymmetry of the probability distribution of a random variable about its mean
s = data_return.skew(axis=0)
# Adjusted VaR
var_90 = calculate_adjusted_var(k, s, percentile_90, std, mean)
var_95 = calculate_adjusted_var(k, s, percentile_95, std, mean)
var_99 = calculate_adjusted_var(k, s, percentile_99, std, mean)
var_custom = calculate_adjusted_var(k, s, percentile_custom, std, mean)
elif student_t:
# Calculating VaR based on the Student-t distribution
# Fitting student-t parameters to the data
v, _, _ = stats.t.fit(data_return.fillna(0))
if not use_mean:
mean = 0
var_90 = np.sqrt((v - 2) / v) * stats.t.ppf(0.1, v) * std + mean
var_95 = np.sqrt((v - 2) / v) * stats.t.ppf(0.05, v) * std + mean
var_99 = np.sqrt((v - 2) / v) * stats.t.ppf(0.01, v) * std + mean
var_custom = np.sqrt((v - 2) / v) * stats.t.ppf(1 - percentile, v) * std + mean
else:
# Regular Var
var_90 = mean + percentile_90 * std
var_95 = mean + percentile_95 * std
var_99 = mean + percentile_99 * std
var_custom = mean + percentile_custom * std
if not portfolio:
data.sort_values("return", inplace=True, ascending=True)
data_return = data["return"]
else:
data.sort_values(inplace=True, ascending=True)
data_return = data
# Historical VaR
hist_var_90 = data_return.quantile(0.1)
hist_var_95 = data_return.quantile(0.05)
hist_var_99 = data_return.quantile(0.01)
hist_var_custom = data_return.quantile(1 - percentile)
var_list = [var_90 * 100, var_95 * 100, var_99 * 100, var_custom * 100]
hist_var_list = [
hist_var_90 * 100,
hist_var_95 * 100,
hist_var_99 * 100,
hist_var_custom * 100,
]
str_hist_label = "Historical VaR [%]"
if adjusted_var:
str_var_label = "Adjusted VaR [%]"
elif student_t:
str_var_label = "Student-t VaR [%]"
else:
str_var_label = "Gaussian VaR [%]"
data_dictionary = {str_var_label: var_list, str_hist_label: hist_var_list}
df = pd.DataFrame(
data_dictionary, index=["90.0%", "95.0%", "99.0%", f"{percentile*100}%"]
)
df.sort_index(inplace=True)
df = df.replace(np.nan, "-")
return df
def get_es(
data: pd.DataFrame,
use_mean: bool = False,
distribution: str = "normal",
percentile: Union[float, int] = 99.9,
portfolio: bool = False,
) -> pd.DataFrame:
"""Gets Expected Shortfall for specified stock dataframe.
Parameters
----------
data: pd.DataFrame
Data dataframe
use_mean: bool
If one should use the data mean for calculation
distribution: str
Type of distribution, options: laplace, student_t, normal
percentile: Union[float,int]
VaR percentile
portfolio: bool
If the data is a portfolio
Returns
-------
pd.DataFrame
DataFrame with Expected Shortfall per percentile
"""
if not portfolio:
data = data[["adjclose"]].copy()
data.loc[:, "return"] = data.adjclose.pct_change()
data_return = data["return"]
else:
data = data[1:].copy()
data_return = data
# Distribution percentages
percentile = percentile / 100
percentile_90 = -1.282
percentile_95 = -1.645
percentile_99 = -2.326
percentile_custom = stats.norm.ppf(1 - percentile)
# Mean
if use_mean:
mean = data_return.mean()
else:
mean = 0
# Standard Deviation
std = data_return.std(axis=0)
if distribution == "laplace":
# Calculating ES based on Laplace distribution
# For formula see: https://en.wikipedia.org/wiki/Expected_shortfall#Laplace_distribution
# Fitting b (scale parameter) to the variance of the data
# Since variance of the Laplace dist.: var = 2*b**2
# Thus:
b = np.sqrt(std**2 / 2)
# Calculation
es_90 = -b * (1 - np.log(2 * 0.1)) + mean
es_95 = -b * (1 - np.log(2 * 0.05)) + mean
es_99 = -b * (1 - np.log(2 * 0.01)) + mean
if (1 - percentile) < 0.5:
es_custom = -b * (1 - np.log(2 * (1 - percentile))) + mean
else:
es_custom = 0
elif distribution == "student_t":
# Calculating ES based on the Student-t distribution
# Fitting student-t parameters to the data
v, _, scale = stats.t.fit(data_return.fillna(0))
if not use_mean:
mean = 0
# Student T Distribution percentages
percentile_90 = stats.t.ppf(0.1, v)
percentile_95 = stats.t.ppf(0.05, v)
percentile_99 = stats.t.ppf(0.01, v)
percentile_custom = stats.t.ppf(1 - percentile, v)
# Calculation
es_90 = (
-scale
* (v + percentile_90**2)
/ (v - 1)
* stats.t.pdf(percentile_90, v)
/ 0.1
+ mean
)
es_95 = (
-scale
* (v + percentile_95**2)
/ (v - 1)
* stats.t.pdf(percentile_95, v)
/ 0.05
+ mean
)
es_99 = (
-scale
* (v + percentile_99**2)
/ (v - 1)
* stats.t.pdf(percentile_99, v)
/ 0.01
+ mean
)
es_custom = (
-scale
* (v + percentile_custom**2)
/ (v - 1)
* stats.t.pdf(percentile_custom, v)
/ (1 - percentile)
+ mean
)
elif distribution == "logistic":
# Logistic distribution
# For formula see: https://en.wikipedia.org/wiki/Expected_shortfall#Logistic_distribution
# Fitting s (scale parameter) to the variance of the data
# Since variance of the Logistic dist.: var = s**2*pi**2/3
# Thus:
s = np.sqrt(3 * std**2 / np.pi**2)
# Calculation
a = 1 - percentile
es_90 = -s * np.log((0.9 ** (1 - 1 / 0.1)) / 0.1) + mean
es_95 = -s * np.log((0.95 ** (1 - 1 / 0.05)) / 0.05) + mean
es_99 = -s * np.log((0.99 ** (1 - 1 / 0.01)) / 0.01) + mean
es_custom = -s * np.log((percentile ** (1 - 1 / a)) / a) + mean
else:
# Regular Expected Shortfall
es_90 = std * -stats.norm.pdf(percentile_90) / 0.1 + mean
es_95 = std * -stats.norm.pdf(percentile_95) / 0.05 + mean
es_99 = std * -stats.norm.pdf(percentile_99) / 0.01 + mean
es_custom = std * -stats.norm.pdf(percentile_custom) / (1 - percentile) + mean
# Historical Expected Shortfall
df = get_var(data, use_mean, False, False, percentile, portfolio)
hist_var_list = list(df["Historical VaR [%]"].values)
hist_es_90 = data_return[data_return <= hist_var_list[0]].mean()
hist_es_95 = data_return[data_return <= hist_var_list[1]].mean()
hist_es_99 = data_return[data_return <= hist_var_list[2]].mean()
hist_es_custom = data_return[data_return <= hist_var_list[3]].mean()
es_list = [es_90 * 100, es_95 * 100, es_99 * 100, es_custom * 100]
hist_es_list = [
hist_es_90 * 100,
hist_es_95 * 100,
hist_es_99 * 100,
hist_es_custom * 100,
]
str_hist_label = "Historical ES [%]"
if distribution == "laplace":
str_es_label = "Laplace ES [%]"
elif distribution == "student_t":
str_es_label = "Student-t ES [%]"
elif distribution == "logistic":
str_es_label = "Logistic ES [%]"
else:
str_es_label = "ES [%]"
data_dictionary = {str_es_label: es_list, str_hist_label: hist_es_list}
df = pd.DataFrame(
data_dictionary, index=["90.0%", "95.0%", "99.0%", f"{percentile*100}%"]
)
df.sort_index(inplace=True)
df = df.replace(np.nan, "-")
return df
def get_sharpe(data: pd.DataFrame, rfr: float = 0, window: float = 252) -> pd.DataFrame:
"""Calculates the sharpe ratio
Parameters
----------
data: pd.DataFrame
selected dataframe column
rfr: float
risk free rate
window: float
length of the rolling window
Returns
-------
sharpe: pd.DataFrame
sharpe ratio
"""
data_return = data.pct_change().rolling(window).sum() * 100
std = data.rolling(window).std() / np.sqrt(252) * 100
sharpe = (data_return - rfr) / std
return sharpe
def get_sortino(
data: pd.DataFrame,
target_return: float = 0,
window: float = 252,
adjusted: bool = False,
) -> pd.DataFrame:
"""Calculates the sortino ratio
Parameters
----------
data: pd.DataFrame
selected dataframe
target_return: float
target return of the asset
window: float
length of the rolling window
adjusted: bool
adjust the sortino ratio
Returns
-------
sortino: pd.DataFrame
sortino ratio
"""
data = data * 100
# Sortino Ratio
# For method & terminology see:
# http://www.redrockcapital.com/Sortino__A__Sharper__Ratio_Red_Rock_Capital.pdf
target_downside_deviation = data.rolling(window).apply(
lambda x: (x.values[x.values < 0]).std() / np.sqrt(252) * 100
)
data_return = data.rolling(window).sum()
sortino_ratio = (data_return - target_return) / target_downside_deviation
if adjusted:
# Adjusting the sortino ratio inorder to compare it to sharpe ratio
# Thus if the deviation is neutral then it's equal to the sharpe ratio
sortino_ratio = sortino_ratio / np.sqrt(2)
return sortino_ratio
def get_omega_ratio(data: pd.DataFrame, threshold: float = 0) -> float:
"""Calculates the omega ratio
Parameters
----------
data: pd.DataFrame
selected dataframe
threshold: float
target return threshold
Returns
-------
omega_ratio: float
omega ratio
"""
# Omega ratio; for more information and explanation see:
# https://en.wikipedia.org/wiki/Omega_ratio
# Calculating daily threshold from annualised threshold value
daily_threshold = (threshold + 1) ** np.sqrt(1 / 252) - 1
# Get excess return
data_excess = data - daily_threshold
# Values excess return
data_positive_sum = data_excess[data_excess > 0].sum()
data_negative_sum = data_excess[data_excess < 0].sum()
omega_ratio = data_positive_sum / (-data_negative_sum)
return omega_ratio
def get_omega(
data: pd.DataFrame, threshold_start: float = 0, threshold_end: float = 1.5
) -> pd.DataFrame:
"""Get the omega series
Parameters
----------
data: pd.DataFrame
stock dataframe
threshold_start: float
annualized target return threshold start of plotted threshold range
threshold_end: float
annualized target return threshold end of plotted threshold range
Returns
-------
omega: pd.DataFrame
omega series
"""
threshold = np.linspace(threshold_start, threshold_end, 50)
df = pd.DataFrame(threshold, columns=["threshold"])
omega_list = [get_omega_ratio(data, i) for i in threshold]
df["omega"] = omega_list
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/quantitative_analysis/qa_model.py | 0.762336 | 0.677741 | qa_model.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
import pandas_ta as ta
from openbb_terminal.decorators import log_start_end
from openbb_terminal.common.technical_analysis import ta_helpers
logger = logging.getLogger(__name__)
MAMODES = ["ema", "sma", "wma", "hma", "zlma"]
@log_start_end(log=logger)
def bbands(
data: pd.DataFrame, window: int = 15, n_std: float = 2, mamode: str = "ema"
) -> pd.DataFrame:
"""Calculate Bollinger Bands
Parameters
----------
data : pd.DataFrame
Dataframe of ohlc prices
window : int
Length of window to calculate BB
n_std : float
Number of standard deviations to show
mamode : str
Method of calculating average
Returns
-------
df_ta: pd.DataFrame
Dataframe of bollinger band data
"""
close_col = ta_helpers.check_columns(data, high=False, low=False)
if close_col is None:
return pd.DataFrame()
return pd.DataFrame(
ta.bbands(
close=data[close_col],
length=window,
std=n_std,
mamode=mamode,
)
).dropna()
@log_start_end(log=logger)
def donchian(
data: pd.DataFrame,
upper_length: int = 20,
lower_length: int = 20,
) -> pd.DataFrame:
"""Calculate Donchian Channels
Parameters
----------
data : pd.DataFrame
Dataframe of ohlc prices
upper_length : int
Length of window to calculate upper channel
lower_length : int
Length of window to calculate lower channel
Returns
-------
pd.DataFrame
Dataframe of upper and lower channels
"""
close_col = ta_helpers.check_columns(data, close=False)
if close_col is None:
return pd.DataFrame()
return pd.DataFrame(
ta.donchian(
high=data["High"],
low=data["Low"],
upper_length=upper_length,
lower_length=lower_length,
).dropna()
)
@log_start_end(log=logger)
def kc(
data: pd.DataFrame,
window: int = 20,
scalar: float = 2,
mamode: str = "ema",
offset: int = 0,
) -> pd.DataFrame:
"""Keltner Channels
Parameters
----------
data: pd.DataFrame
Dataframe of ohlc prices
window : int
Length of window
scalar: float
Scalar value
mamode: str
Type of filter
offset : int
Offset value
Returns
-------
pd.DataFrame
Dataframe of rolling kc
"""
close_col = ta_helpers.check_columns(data)
if close_col is None:
return pd.DataFrame()
return pd.DataFrame(
ta.kc(
high=data["High"],
low=data["Low"],
close=data[close_col],
length=window,
scalar=scalar,
mamode=mamode,
offset=offset,
).dropna()
)
@log_start_end(log=logger)
def atr(
data: pd.DataFrame,
window: int = 14,
mamode: str = "ema",
offset: int = 0,
) -> pd.DataFrame:
"""Average True Range
Parameters
----------
data : pd.DataFrame
Dataframe of ohlc prices
window : int
Length of window
mamode: str
Type of filter
offset : int
Offset value
Returns
-------
pd.DataFrame
Dataframe of atr
"""
close_col = ta_helpers.check_columns(data)
if close_col is None:
return pd.DataFrame()
return pd.DataFrame(
ta.atr(
high=data["High"],
low=data["Low"],
close=data[close_col],
length=window,
mamode=mamode,
offset=offset,
).dropna()
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/technical_analysis/volatility_model.py | 0.887382 | 0.36077 | volatility_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Any, Tuple
import pandas as pd
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.common.technical_analysis import ta_helpers
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def calculate_fib_levels(
data: pd.DataFrame,
limit: int = 120,
start_date: Any = None,
end_date: Any = None,
) -> Tuple[pd.DataFrame, pd.Timestamp, pd.Timestamp, float, float]:
"""Calculate Fibonacci levels
Parameters
----------
data : pd.DataFrame
Dataframe of prices
limit : int
Days to look back for retracement
start_date : Any
Custom start date for retracement
end_date : Any
Custom end date for retracement
Returns
-------
df : pd.DataFrame
Dataframe of fib levels
min_date: pd.Timestamp
Date of min point
max_date: pd.Timestamp:
Date of max point
min_pr: float
Price at min point
max_pr: float
Price at max point
"""
close_col = ta_helpers.check_columns(data, high=False, low=False)
if close_col is None:
return pd.DataFrame(), pd.Timestamp(), pd.Timestamp(), 0, 0
if start_date and end_date:
if start_date not in data.index:
date0 = data.index[data.index.get_loc(start_date, method="nearest")]
console.print(f"Start date not in data. Using nearest: {date0}")
else:
date0 = start_date
if end_date not in data.index:
date1 = data.index[data.index.get_loc(end_date, method="nearest")]
console.print(f"End date not in data. Using nearest: {date1}")
else:
date1 = end_date
data0 = data.loc[date0, close_col]
data1 = data.loc[date1, close_col]
min_pr = min(data0, data1)
max_pr = max(data0, data1)
if min_pr == data0:
min_date = date0
max_date = date1
else:
min_date = date1
max_date = date0
else:
data_to_use = data.iloc[limit:][close_col]
min_pr = data_to_use.min()
min_date = data_to_use.idxmin()
max_pr = data_to_use.max()
max_date = data_to_use.idxmax()
fib_levels = [0, 0.235, 0.382, 0.5, 0.618, 0.65, 1]
price_dif = max_pr - min_pr
levels = [round(max_pr - price_dif * f_lev, 2) for f_lev in fib_levels]
df = pd.DataFrame()
df["Level"] = fib_levels
df["Level"] = df["Level"].apply(lambda x: str(x * 100) + "%")
df["Price"] = levels
return df, min_date, max_date, min_pr, max_pr | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/common/technical_analysis/custom_indicators_model.py | 0.801237 | 0.281449 | custom_indicators_model.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.