code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from datetime import datetime, timedelta
from typing import List, Optional, Dict, Iterable
import os
import argparse
import logging
import re
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.ticker import LogLocator, ScalarFormatter
import mplfinance as mpf
import yfinance as yf
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from openbb_terminal.stocks import stocks_helper
from openbb_terminal.forex import av_model, polygon_model
from openbb_terminal.rich_config import console
from openbb_terminal.decorators import log_start_end
import openbb_terminal.config_terminal as cfg
from openbb_terminal.helper_funcs import (
is_valid_axes_count,
plot_autoscale,
lambda_long_number_format_y_axis,
)
CANDLE_SORT = [
"adjclose",
"open",
"close",
"high",
"low",
"volume",
"logret",
]
FOREX_SOURCES: Dict = {
"YahooFinance": "YahooFinance",
"AlphaVantage": "AlphaAdvantage",
"Oanda": "Oanda",
"Polygon": "Polygon",
}
SOURCES_INTERVALS: Dict = {
"YahooFinance": [
"1min",
"5min",
"15min",
"30min",
"60min",
"90min",
"1hour",
"1day",
"5day",
"1week",
"1month",
"3month",
],
"AlphaVantage": ["1min", "5min", "15min", "30min", "60min"],
}
INTERVAL_MAPS: Dict = {
"YahooFinance": {
"1min": "1m",
"2min": "2m",
"5min": "5m",
"15min": "15m",
"30min": "30m",
"60min": "60m",
"90min": "90m",
"1hour": "60m",
"1day": "1d",
"5day": "5d",
"1week": "1wk",
"1month": "1mo",
"3month": "3mo",
},
"AlphaVantage": {
"1min": 1,
"5min": 5,
"15min": 15,
"30min": 30,
"60min": 60,
"1day": 1,
},
}
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def load(
to_symbol: str,
from_symbol: str,
resolution: str = "d",
interval: str = "1day",
start_date: Optional[str] = None,
source: str = "YahooFinance",
verbose: bool = False,
) -> pd.DataFrame:
"""Load forex for two given symbols.
Parameters
----------
to_symbol : str
The from currency symbol. Ex: USD, EUR, GBP, YEN
from_symbol : str
The from currency symbol. Ex: USD, EUR, GBP, YEN
resolution : str, optional
The resolution for the data, by default "d"
interval : str, optional
What interval to get data for, by default "1day"
start_date : Optional[str], optional
When to begin loading in data, by default last_year.strftime("%Y-%m-%d")
source : str, optional
Where to get data from, by default "YahooFinance"
verbose : bool, optional
Display verbose information on what was the pair that was loaded, by default True
Returns
-------
pd.DataFrame
The loaded data
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=365)).strftime("%Y-%m-%d")
if source in ["YahooFinance", "AlphaVantage"]:
interval_map = INTERVAL_MAPS[source]
if interval not in interval_map.keys() and resolution != "d":
if verbose:
console.print(
f"Interval not supported by {FOREX_SOURCES[source]}."
" Need to be one of the following options",
list(interval_map.keys()),
)
return pd.DataFrame()
# Check interval in multiple ways
if interval in interval_map:
clean_interval = interval_map[interval]
elif interval in interval_map.values():
clean_interval = interval
else:
console.print(f"[red]'{interval}' is an invalid interval[/red]\n")
return pd.DataFrame()
if source == "AlphaVantage":
if "min" in interval:
resolution = "i"
df = av_model.get_historical(
to_symbol=to_symbol,
from_symbol=from_symbol,
resolution=resolution,
interval=clean_interval,
start_date=start_date,
)
df.index.name = "date"
return df
if source == "YahooFinance":
df = yf.download(
f"{from_symbol}{to_symbol}=X",
start=datetime.strptime(start_date, "%Y-%m-%d"),
interval=clean_interval,
progress=verbose,
)
df.index.name = "date"
return df
if source == "Polygon":
# Interval for polygon gets broken into multiplier and timeframe
temp = re.split(r"(\d+)", interval)
multiplier = int(temp[1])
timeframe = temp[2]
if timeframe == "min":
timeframe = "minute"
df = polygon_model.get_historical(
f"{from_symbol}{to_symbol}",
multiplier=multiplier,
timespan=timeframe,
start_date=start_date,
)
df.index.name = "date"
return df
console.print(f"Source {source} not supported")
return pd.DataFrame()
@log_start_end(log=logger)
def get_yf_currency_list() -> List:
"""Load YF list of forex pair a local file."""
path = os.path.join(os.path.dirname(__file__), "data/yahoofinance_forex.json")
return sorted(list(set(pd.read_json(path)["from_symbol"])))
YF_CURRENCY_LIST = get_yf_currency_list()
@log_start_end(log=logger)
def check_valid_yf_forex_currency(fx_symbol: str) -> str:
"""Check if given symbol is supported on Yahoo Finance.
Parameters
----------
fx_symbol : str
Symbol to check
Returns
-------
str
Currency symbol
Raises
------
argparse.ArgumentTypeError
Symbol not valid on YahooFinance
"""
if fx_symbol.upper() in get_yf_currency_list():
return fx_symbol.upper()
raise argparse.ArgumentTypeError(
f"{fx_symbol.upper()} not found in YahooFinance supported currency codes. "
)
@log_start_end(log=logger)
def display_candle(
data: pd.DataFrame,
to_symbol: str = "",
from_symbol: str = "",
ma: Optional[Iterable[int]] = None,
external_axes: Optional[List[plt.Axes]] = None,
use_matplotlib: bool = True,
add_trend: bool = False,
yscale: str = "linear",
):
"""Show candle plot for fx data.
Parameters
----------
data : pd.DataFrame
Loaded fx historical data
to_symbol : str
To forex symbol
from_symbol : str
From forex symbol
ma : Optional[Iterable[int]]
Moving averages
external_axes: Optional[List[plt.Axes]]
External axes (1 axis is expected in the list), by default None
"""
# We check if there's Volume data to avoid errors and empty subplots
has_volume = bool(data["Volume"].sum() > 0)
if add_trend:
if (data.index[1] - data.index[0]).total_seconds() >= 86400:
data = stocks_helper.find_trendline(data, "OC_High", "high")
data = stocks_helper.find_trendline(data, "OC_Low", "low")
if use_matplotlib:
ap0 = []
if add_trend:
if "OC_High_trend" in data.columns:
ap0.append(
mpf.make_addplot(
data["OC_High_trend"],
color=cfg.theme.up_color,
secondary_y=False,
),
)
if "OC_Low_trend" in data.columns:
ap0.append(
mpf.make_addplot(
data["OC_Low_trend"],
color=cfg.theme.down_color,
secondary_y=False,
),
)
candle_chart_kwargs = {
"type": "candle",
"style": cfg.theme.mpf_style,
"volume": has_volume,
"addplot": ap0,
"xrotation": cfg.theme.xticks_rotation,
"scale_padding": {"left": 0.3, "right": 1, "top": 0.8, "bottom": 0.8},
"update_width_config": {
"candle_linewidth": 0.6,
"candle_width": 0.8,
"volume_linewidth": 0.8,
"volume_width": 0.8,
},
"warn_too_much_data": 10000,
"yscale": yscale,
}
if ma:
candle_chart_kwargs["mav"] = ma
if external_axes is None:
candle_chart_kwargs["returnfig"] = True
candle_chart_kwargs["figratio"] = (10, 7)
candle_chart_kwargs["figscale"] = 1.10
candle_chart_kwargs["figsize"] = plot_autoscale()
candle_chart_kwargs["warn_too_much_data"] = 100_000
fig, ax = mpf.plot(data, **candle_chart_kwargs)
if has_volume:
lambda_long_number_format_y_axis(data, "Volume", ax)
fig.suptitle(
f"{from_symbol}/{to_symbol}",
x=0.055,
y=0.965,
horizontalalignment="left",
)
if ma:
# Manually construct the chart legend
colors = [cfg.theme.get_colors()[i] for i, _ in enumerate(ma)]
lines = [Line2D([0], [0], color=c) for c in colors]
labels = ["MA " + str(label) for label in ma]
ax[0].legend(lines, labels)
if yscale == "log":
ax[0].yaxis.set_major_formatter(ScalarFormatter())
ax[0].yaxis.set_major_locator(
LogLocator(base=100, subs=[1.0, 2.0, 5.0, 10.0])
)
ax[0].ticklabel_format(style="plain", axis="y")
cfg.theme.visualize_output(force_tight_layout=False)
elif (has_volume and is_valid_axes_count(external_axes, 2)) or (
not has_volume and is_valid_axes_count(external_axes, 1)
):
candle_chart_kwargs["ax"] = external_axes[0]
if has_volume:
candle_chart_kwargs["volume"] = external_axes[1]
mpf.plot(data, **candle_chart_kwargs)
if not use_matplotlib:
fig = make_subplots(
rows=2 if has_volume else 1,
cols=1,
shared_xaxes=True,
vertical_spacing=0.06,
subplot_titles=(
f"{from_symbol}/{to_symbol}",
"Volume" if has_volume else None,
),
row_width=[0.2, 0.7] if has_volume else [1],
)
fig.add_trace(
go.Candlestick(
x=data.index,
open=data.Open,
high=data.High,
low=data.Low,
close=data.Close,
name="OHLC",
),
row=1,
col=1,
)
if ma:
plotly_colors = [
"black",
"teal",
"blue",
"purple",
"orange",
"gray",
"deepskyblue",
]
for idx, ma_val in enumerate(ma):
temp = data["Adj Close"].copy()
temp[f"ma{ma_val}"] = data["Adj Close"].rolling(ma_val).mean()
temp = temp.dropna()
fig.add_trace(
go.Scatter(
x=temp.index,
y=temp[f"ma{ma_val}"],
name=f"MA{ma_val}",
mode="lines",
line=go.scatter.Line(
color=plotly_colors[np.mod(idx, len(plotly_colors))]
),
),
row=1,
col=1,
)
if add_trend:
if "OC_High_trend" in data.columns:
fig.add_trace(
go.Scatter(
x=data.index,
y=data["OC_High_trend"],
name="High Trend",
mode="lines",
line=go.scatter.Line(color="green"),
),
row=1,
col=1,
)
if "OC_Low_trend" in data.columns:
fig.add_trace(
go.Scatter(
x=data.index,
y=data["OC_Low_trend"],
name="Low Trend",
mode="lines",
line=go.scatter.Line(color="red"),
),
row=1,
col=1,
)
if has_volume:
colors = [
"red" if row.Open < row["Adj Close"] else "green"
for _, row in data.iterrows()
]
fig.add_trace(
go.Bar(
x=data.index,
y=data.Volume,
name="Volume",
marker_color=colors,
),
row=2,
col=1,
)
fig.update_layout(
yaxis_title="Stock Price ($)",
xaxis=dict(
rangeselector=dict(
buttons=list(
[
dict(
count=1,
label="1m",
step="month",
stepmode="backward",
),
dict(
count=3,
label="3m",
step="month",
stepmode="backward",
),
dict(count=1, label="YTD", step="year", stepmode="todate"),
dict(
count=1,
label="1y",
step="year",
stepmode="backward",
),
dict(step="all"),
]
)
),
rangeslider=dict(visible=False),
type="date",
),
)
fig.update_layout(
updatemenus=[
dict(
buttons=[
dict(
label="linear",
method="relayout",
args=[{"yaxis.type": "linear"}],
),
dict(
label="log",
method="relayout",
args=[{"yaxis.type": "log"}],
),
]
)
]
)
fig.show(config=dict({"scrollZoom": True}))
return
@log_start_end(log=logger)
def parse_forex_symbol(input_symbol):
"""Parse potential forex symbols."""
for potential_split in ["-", "/"]:
if potential_split in input_symbol:
symbol = input_symbol.replace(potential_split, "")
return symbol
if len(input_symbol) != 6:
raise argparse.ArgumentTypeError("Input symbol should be 6 characters.\n ")
return input_symbol.upper() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forex/forex_helper.py | 0.855731 | 0.293177 | forex_helper.py | pypi |
import argparse
import logging
import os
from typing import Any, Dict, List
import pandas as pd
import requests
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_currency_list() -> List:
"""Load AV currency codes from a local file."""
path = os.path.join(os.path.dirname(__file__), "data/av_forex_currencies.csv")
return list(pd.read_csv(path)["currency code"])
CURRENCY_LIST = get_currency_list()
@log_start_end(log=logger)
def check_valid_forex_currency(symbol: str) -> str:
"""Check if given symbol is supported on alphavantage.
Parameters
----------
symbol : str
Symbol to check
Returns
-------
str
Currency symbol
Raises
------
argparse.ArgumentTypeError
Symbol not valid on alphavantage
"""
if symbol.upper() in CURRENCY_LIST:
return symbol.upper()
raise argparse.ArgumentTypeError(
f"{symbol.upper()} not found in alphavantage supported currency codes. "
)
@log_start_end(log=logger)
def get_quote(to_symbol: str = "USD", from_symbol: str = "EUR") -> Dict[str, Any]:
"""Get current exchange rate quote from alpha vantage.
Parameters
----------
to_symbol : str
To forex symbol
from_symbol : str
From forex symbol
Returns
-------
Dict[str, Any]
Dictionary of exchange rate
"""
url = (
"https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE"
+ f"&from_currency={from_symbol}"
+ f"&to_currency={to_symbol}"
+ f"&apikey={cfg.API_KEY_ALPHAVANTAGE}"
)
response = requests.get(url)
response_json = response.json()
result = {}
# If the returned data was unsuccessful
if "Error Message" in response_json:
console.print(response_json["Error Message"])
logger.error(response_json["Error Message"])
else:
# check if json is empty
if not response_json:
console.print("No data found.\n")
else:
result = response_json
return result
@log_start_end(log=logger)
def get_historical(
to_symbol: str = "USD",
from_symbol: str = "EUR",
resolution: str = "d",
interval: int = 5,
start_date: str = "",
) -> pd.DataFrame:
"""Get historical forex data.
Parameters
----------
to_symbol : str
To forex symbol
from_symbol : str
From forex symbol
resolution : str, optional
Resolution of data. Can be "i", "d", "w", "m" for intraday, daily, weekly or monthly
interval : int, optional
Interval for intraday data
start_date : str, optional
Start date for data.
Returns
-------
pd.DataFrame
Historical data for forex pair
"""
d_res = {"i": "FX_INTRADAY", "d": "FX_DAILY", "w": "FX_WEEKLY", "m": "FX_MONTHLY"}
url = f"https://www.alphavantage.co/query?function={d_res[resolution]}&from_symbol={from_symbol}"
url += f"&to_symbol={to_symbol}&outputsize=full&apikey={cfg.API_KEY_ALPHAVANTAGE}"
if resolution == "i":
url += f"&interval={interval}min"
r = requests.get(url)
response_json = r.json()
if r.status_code != 200:
return pd.DataFrame()
df = pd.DataFrame()
# If the returned data was unsuccessful
if "Error Message" in response_json:
console.print(response_json["Error Message"])
elif "Note" in response_json:
console.print(response_json["Note"])
else:
# check if json is empty
if not response_json:
console.print("No data found.\n")
else:
key = list(response_json.keys())[1]
df = pd.DataFrame.from_dict(response_json[key], orient="index")
if start_date and resolution != "i":
df = df[df.index > start_date]
df = df.rename(
columns={
"1. open": "Open",
"2. high": "High",
"3. low": "Low",
"4. close": "Close",
}
)
df.index = pd.DatetimeIndex(df.index)
df = df[::-1]
return df.astype(float) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forex/av_model.py | 0.779322 | 0.23936 | av_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Dict, Union, Optional, List
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import mplfinance as mpf
import pandas as pd
import pandas_ta as ta
import seaborn as sns
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import check_api_key
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forex.oanda.oanda_model import (
account_summary_request,
cancel_pending_order_request,
close_trades_request,
create_order_request,
fx_price_request,
get_calendar_request,
get_candles_dataframe,
open_positions_request,
open_trades_request,
order_history_request,
orderbook_plot_data_request,
pending_orders_request,
positionbook_plot_data_request,
)
from openbb_terminal.helper_funcs import plot_autoscale, is_valid_axes_count
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["OANDA_ACCOUNT", "OANDA_TOKEN", "OANDA_ACCOUNT_TYPE"])
def get_fx_price(account: str, instrument: Union[str, None] = ""):
"""View price for loaded currency pair.
Parameters
----------
accountID : str
Oanda account ID
instrument : Union[str, None]
Instrument code or None
"""
data = fx_price_request(accountID=account, instrument=instrument)
if data and data is not None and "prices" in data:
bid = data["prices"][0]["bids"][0]["price"]
ask = data["prices"][0]["asks"][0]["price"]
console.print(f"{instrument if instrument else ''}" + " Bid: " + bid)
console.print(f"{instrument if instrument else ''}" + " Ask: " + ask)
else:
console.print("No data was retrieved.\n")
@log_start_end(log=logger)
@check_api_key(["OANDA_ACCOUNT", "OANDA_TOKEN", "OANDA_ACCOUNT_TYPE"])
def get_account_summary(accountID: str):
"""Print Oanda account summary.
Parameters
----------
accountID : str
Oanda user account ID
"""
df_summary = account_summary_request(accountID)
if df_summary is not False and not df_summary.empty:
console.print(df_summary.to_string(index=False, header=False))
else:
console.print("No data was retrieved.\n")
@log_start_end(log=logger)
@check_api_key(["OANDA_ACCOUNT", "OANDA_TOKEN", "OANDA_ACCOUNT_TYPE"])
def get_order_book(
accountID: str,
instrument: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""
Plot the orderbook for the instrument if Oanda provides one.
Parameters
----------
accountID : str
Oanda user account ID
instrument : str
The loaded currency pair
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_orderbook_data = orderbook_plot_data_request(
accountID=accountID, instrument=instrument
)
if df_orderbook_data is not False and not df_orderbook_data.empty:
pd.set_option("display.max_rows", None)
# HELP WANTED!
# TODO:
# An early contributor left "magic constants" in this function
# help is needed to figure out the rationale behind these or
# refactor it to not include the magic numbers.
df_orderbook_data = df_orderbook_data.take(range(527, 727, 1))
book_plot(
df_orderbook_data, instrument, "Order Book", external_axes=external_axes
)
else:
console.print("No data was retrieved.\n")
@log_start_end(log=logger)
@check_api_key(["OANDA_ACCOUNT", "OANDA_TOKEN", "OANDA_ACCOUNT_TYPE"])
def get_position_book(
accountID: str, instrument: str = "", external_axes: Optional[List[plt.Axes]] = None
):
"""Plot a position book for an instrument if Oanda provides one.
Parameters
----------
accountID : str
Oanda user account ID
instrument : str
The loaded currency pair
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_positionbook_data = positionbook_plot_data_request(
accountID=accountID, instrument=instrument
)
if df_positionbook_data is not False and not df_positionbook_data.empty:
pd.set_option("display.max_rows", None)
# HELP WANTED!
# TODO:
# An early contributor left "magic constants" in this function
# help is needed to figure out the rationale behind these or
# refactor it to not include the magic numbers.
df_positionbook_data = df_positionbook_data.take(range(219, 300, 1))
book_plot(
df_positionbook_data,
instrument,
"Position Book",
external_axes=external_axes,
)
else:
console.print("No data was retrieved.\n")
@log_start_end(log=logger)
@check_api_key(["OANDA_ACCOUNT", "OANDA_TOKEN", "OANDA_ACCOUNT_TYPE"])
def list_orders(accountID: str, order_state: str = "PENDING", order_count: int = 0):
"""List order history.
Parameters
----------
accountID : str
Oanda user account ID
order_state : str
Filter orders by a specific state ("PENDING", "CANCELLED", etc.)
order_count : int
Limit the number of orders to retrieve
"""
df_order_list = order_history_request(order_state, order_count, accountID)
if df_order_list is not False and not df_order_list.empty:
console.print(df_order_list)
else:
console.print("No data was retrieved.\n")
@log_start_end(log=logger)
@check_api_key(["OANDA_ACCOUNT", "OANDA_TOKEN", "OANDA_ACCOUNT_TYPE"])
def create_order(accountID: str, instrument: str = "", price: int = 0, units: int = 0):
"""Create a buy/sell order.
Parameters
----------
accountID : str
Oanda user account ID
instrument : str
The loaded currency pair
price : int
The price to set for the limit order.
units : int
The number of units to place in the order request.
"""
df_orders = create_order_request(price, units, instrument, accountID)
if df_orders is not False and not df_orders.empty:
console.print(df_orders.to_string(index=False))
else:
console.print("No data was returned from Oanda.\n")
@log_start_end(log=logger)
@check_api_key(["OANDA_ACCOUNT", "OANDA_TOKEN", "OANDA_ACCOUNT_TYPE"])
def cancel_pending_order(accountID: str, orderID: str = ""):
"""Cancel a Pending Order.
Parameters
----------
accountID : str
Oanda user account ID
orderID : str
The pending order ID to cancel.
"""
order_id = cancel_pending_order_request(orderID, accountID)
if order_id is not False and order_id is not None:
console.print(f"Order {order_id} canceled.")
else:
console.print("No data was returned from Oanda.\n")
@log_start_end(log=logger)
@check_api_key(["OANDA_ACCOUNT", "OANDA_TOKEN", "OANDA_ACCOUNT_TYPE"])
def get_open_positions(accountID: str):
"""Get information about open positions.
Parameters
----------
accountID : str
Oanda user account ID
"""
df_positions = open_positions_request(accountID)
if df_positions is not False and not df_positions.empty:
console.print(df_positions.to_string(index=False))
else:
console.print("No data was returned from Oanda.\n")
@log_start_end(log=logger)
@check_api_key(["OANDA_ACCOUNT", "OANDA_TOKEN", "OANDA_ACCOUNT_TYPE"])
def get_pending_orders(accountID: str):
"""Get information about pending orders.
Parameters
----------
accountID : str
Oanda user account ID
"""
df_pending = pending_orders_request(accountID)
if df_pending is not False and not df_pending.empty:
console.print(df_pending.to_string(index=False))
elif df_pending is not False and df_pending.empty:
console.print("No pending orders.\n")
else:
console.print("No data was returned from Oanda.\n")
@log_start_end(log=logger)
@check_api_key(["OANDA_ACCOUNT", "OANDA_TOKEN", "OANDA_ACCOUNT_TYPE"])
def get_open_trades(accountID: str):
"""View open trades.
Parameters
----------
accountID : str
Oanda user account ID
"""
df_trades = open_trades_request(accountID)
if isinstance(df_trades, pd.DataFrame) and not df_trades.empty:
console.print(df_trades.to_string(index=False))
elif df_trades is not False and df_trades.empty:
console.print("No trades were found.\n")
else:
console.print("No data was returned from Oanda.\n")
@log_start_end(log=logger)
@check_api_key(["OANDA_ACCOUNT", "OANDA_TOKEN", "OANDA_ACCOUNT_TYPE"])
def close_trade(accountID: str, orderID: str = "", units: Union[int, None] = None):
"""Close a trade.
Parameters
----------
accountID : str
Oanda user account ID
orderID : str
ID of the order to close
units : Union[int, None]
Number of units to close. If empty default to all.
"""
df_trades = close_trades_request(orderID, units, accountID)
if df_trades is not False and not df_trades.empty:
console.print(df_trades.to_string(index=False))
elif df_trades is not False and df_trades.empty:
console.print("No trades were found.\n")
else:
console.print("No data was returned from Oanda.\n")
@log_start_end(log=logger)
@check_api_key(["OANDA_ACCOUNT", "OANDA_TOKEN", "OANDA_ACCOUNT_TYPE"])
def show_candles(
instrument: str = "",
granularity: str = "D",
candlecount: int = 180,
additional_charts: Optional[Dict[str, bool]] = None,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Show candle chart.
Note that additional plots (ta indicators) not supported in external axis mode.
Parameters
----------
instrument : str
The loaded currency pair
granularity : str, optional
The timeframe to get for the candle chart. Seconds: S5, S10, S15, S30
Minutes: M1, M2, M4, M5, M10, M15, M30 Hours: H1, H2, H3, H4, H6, H8, H12
Day (default): D, Week: W Month: M,
candlecount : int, optional
Limit for the number of data points
additional_charts : Dict[str, bool]
A dictionary of flags to include additional charts
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
df_candles = get_candles_dataframe(instrument, granularity, candlecount)
if (
df_candles is not False
and not df_candles.empty
and additional_charts is not None
):
plots_to_add, legends, subplot_legends = add_plots(
df_candles, additional_charts
)
else:
plots_to_add, legends, subplot_legends = None, [], []
candle_chart_kwargs = {
"type": "candle",
"style": theme.mpf_style,
"mav": (20, 50),
"volume": True,
"xrotation": theme.xticks_rotation,
"scale_padding": {"left": 0.3, "right": 1, "top": 0.8, "bottom": 0.8},
"update_width_config": {
"candle_linewidth": 0.6,
"candle_width": 0.8,
"volume_linewidth": 0.8,
"volume_width": 0.8,
},
"warn_too_much_data": 10000,
}
# This plot has 2 axes
if not external_axes:
candle_chart_kwargs["returnfig"] = True
candle_chart_kwargs["figratio"] = (10, 7)
candle_chart_kwargs["figscale"] = 1.10
candle_chart_kwargs["figsize"] = plot_autoscale()
if plots_to_add:
candle_chart_kwargs["addplot"] = plots_to_add
if isinstance(df_candles, pd.DataFrame):
fig, ax = mpf.plot(df_candles, **candle_chart_kwargs)
fig.suptitle(
f"{instrument} {granularity}",
x=0.055,
y=0.965,
horizontalalignment="left",
)
if len(legends) > 0:
ax[0].legend(legends)
# pylint: disable=C0200
for i in range(0, len(subplot_legends), 2):
ax[subplot_legends[i]].legend(subplot_legends[i + 1])
theme.visualize_output(force_tight_layout=False)
else:
logger.error("Data not found")
console.print("[red]Data not found[/red]\n")
elif is_valid_axes_count(external_axes, 2):
ax, volume = external_axes
candle_chart_kwargs["ax"] = ax
candle_chart_kwargs["volume"] = volume
mpf.plot(df_candles, **candle_chart_kwargs)
else:
return
@log_start_end(log=logger)
@check_api_key(["OANDA_ACCOUNT", "OANDA_TOKEN", "OANDA_ACCOUNT_TYPE"])
def calendar(instrument: str, days: int = 7):
"""View calendar of significant events.
Parameters
----------
instrument : str
The loaded currency pair
days : int
Number of days in advance
"""
df_calendar = get_calendar_request(days, instrument)
if df_calendar is not False and not df_calendar.empty:
console.print(df_calendar.to_string(index=False))
elif df_calendar is not False and df_calendar.empty:
console.print("No calendar records were found.\n")
else:
console.print("No data was returned from Oanda.\n")
# Utilities
@log_start_end(log=logger)
def add_plots(df: pd.DataFrame, additional_charts: Dict[str, bool]):
"""Add additional plots to the candle chart.
Parameters
----------
df : pd.DataFrame
The source data
additional_charts : Dict[str, bool]
A dictionary of flags to include additional charts
Returns
-------
Tuple
Tuple of lists containing the plots, legends and subplot legends
"""
panel_number = 2
plots_to_add = []
legends = []
subplot_legends = []
if additional_charts["ad"]:
ad = ta.ad(df["High"], df["Low"], df["Close"], df["Volume"])
ad_plot = mpf.make_addplot(ad, panel=panel_number)
plots_to_add.append(ad_plot)
subplot_legends.extend([panel_number * 2, ["AD"]])
panel_number += 1
if additional_charts["bbands"]:
bbands = ta.bbands(df["Close"])
bbands = bbands.drop("BBB_5_2.0", axis=1)
bbands_plot = mpf.make_addplot(bbands, panel=0)
plots_to_add.append(bbands_plot)
legends.extend(["Lower BBand", "Middle BBand", "Upper BBand"])
if additional_charts["cci"]:
cci = ta.cci(df["High"], df["Low"], df["Close"])
cci_plot = mpf.make_addplot(cci, panel=panel_number)
plots_to_add.append(cci_plot)
subplot_legends.extend([panel_number * 2, ["CCI"]])
panel_number += 1
if additional_charts["ema"]:
ema = ta.ema(df["Close"])
ema_plot = mpf.make_addplot(ema, panel=0)
plots_to_add.append(ema_plot)
legends.append("10 EMA")
if additional_charts["rsi"]:
rsi = ta.rsi(df["Close"])
rsi_plot = mpf.make_addplot(rsi, panel=panel_number)
plots_to_add.append(rsi_plot)
subplot_legends.extend([panel_number * 2, ["RSI"]])
panel_number += 1
if additional_charts["obv"]:
obv = ta.obv(df["Close"], df["Volume"])
obv_plot = mpf.make_addplot(obv, panel=panel_number)
plots_to_add.append(obv_plot)
subplot_legends.extend([panel_number * 2, ["OBV"]])
panel_number += 1
if additional_charts["sma"]:
sma_length = [20, 50]
for length in sma_length:
sma = ta.sma(df["Close"], length=length)
sma_plot = mpf.make_addplot(sma, panel=0)
plots_to_add.append(sma_plot)
legends.append(f"{length} SMA")
if additional_charts["vwap"]:
vwap = ta.vwap(df["High"], df["Low"], df["Close"], df["Volume"])
vwap_plot = mpf.make_addplot(vwap, panel=0)
plots_to_add.append(vwap_plot)
legends.append("vwap")
return plots_to_add, legends, subplot_legends
@log_start_end(log=logger)
def book_plot(
df: pd.DataFrame,
instrument: str,
book_type: str,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot the order book for a given instrument.
Parameters
----------
df : pd.DataFrame
Order book data
instrument : str
The loaded currency pair
book_type : str
Order book type
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = df.apply(pd.to_numeric)
df["shortCountPercent"] = df["shortCountPercent"] * -1
axis_origin = max(
abs(max(df["longCountPercent"])), abs(max(df["shortCountPercent"]))
)
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.set_xlim(-axis_origin, +axis_origin)
sns.barplot(
x="longCountPercent",
y="price",
data=df,
label="Count Percent",
color=theme.up_color,
orient="h",
)
sns.barplot(
x="shortCountPercent",
y="price",
data=df,
label="Prices",
color=theme.down_color,
orient="h",
)
ax.invert_yaxis()
ax.yaxis.set_major_locator(mticker.MultipleLocator(10))
ax.set_xlabel("Count Percent")
ax.set_ylabel("Price")
ax.set_title(f"{instrument} {book_type}")
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forex/oanda/oanda_view.py | 0.743447 | 0.251763 | oanda_view.py | pypi |
__docformat__ = "numpy"
import json
import logging
from datetime import datetime
from typing import Dict, Union
import pandas as pd
from oandapyV20 import API
from oandapyV20.endpoints import (
accounts,
forexlabs,
instruments,
orders,
positions,
pricing,
trades,
)
from oandapyV20.exceptions import V20Error
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
if cfg.OANDA_ACCOUNT_TYPE != "REPLACE_ME":
client = API(access_token=cfg.OANDA_TOKEN, environment=cfg.OANDA_ACCOUNT_TYPE)
else:
client = None
account = cfg.OANDA_ACCOUNT
@log_start_end(log=logger)
def fx_price_request(
accountID: str = account, instrument: Union[str, None] = None
) -> Union[Dict[str, str], bool]:
"""Request price for a forex pair.
Parameters
----------
accountID : str, optional
Oanda account ID, by default cfg.OANDA_ACCOUNT
instrument : Union[str, None]
The loaded currency pair, by default None
Returns
-------
Union[Dict[str, str], bool]
The currency pair price or False
"""
if accountID == "REPLACE_ME":
console.print("Error: Oanda account credentials are required.")
return False
if instrument is None:
console.print(
"Error: An instrument should be loaded before running this command."
)
return False
try:
parameters = {"instruments": instrument}
request = pricing.PricingInfo(accountID=accountID, params=parameters)
response = client.request(request)
return response
except V20Error as e:
logger.exception(str(e))
d_error = json.loads(e.msg)
console.print(d_error["errorMessage"], "\n")
return False
@log_start_end(log=logger)
def account_summary_request(accountID: str = account) -> Union[pd.DataFrame, bool]:
"""Request Oanda account summary.
Parameters
----------
accountID : str, optional
Oanda account ID, by default cfg.OANDA_ACCOUNT
Returns
-------
Union[pd.DataFrame, bool]
Account summary data or False
"""
if accountID == "REPLACE_ME":
console.print("Error: Oanda account credentials are required.")
return False
if client is None:
return False
try:
request = accounts.AccountSummary(accountID=accountID)
response = client.request(request)
df_summary = pd.DataFrame(
[
{"Type": "Balance", "Value": response["account"]["balance"]},
{"Type": "NAV", "Value": response["account"]["NAV"]},
{
"Type": "Unrealized P/L",
"Value": response["account"]["unrealizedPL"],
},
{"Type": "Total P/L", "Value": response["account"]["pl"]},
{
"Type": "Open Trade Count",
"Value": response["account"]["openTradeCount"],
},
{
"Type": "Margin Available",
"Value": response["account"]["marginAvailable"],
},
{"Type": "Margin Used", "Value": response["account"]["marginUsed"]},
{
"Type": "Margin Closeout",
"Value": response["account"]["marginCloseoutNAV"],
},
{
"Type": "Margin Closeout Percent",
"Value": response["account"]["marginCloseoutPercent"],
},
{
"Type": "Margin Closeout Position Value",
"Value": response["account"]["marginCloseoutPositionValue"],
},
]
)
return df_summary
except V20Error as e:
logger.exception(str(e))
d_error = json.loads(e.msg)
console.print(d_error["errorMessage"], "\n")
return False
@log_start_end(log=logger)
def orderbook_plot_data_request(
instrument: Union[str, None] = None, accountID: str = account
) -> Union[pd.DataFrame, bool]:
"""Request order book data for plotting.
Parameters
----------
instrument : Union[str, None]
The loaded currency pair, by default None
accountID : str, optional
Oanda account ID, by default cfg.OANDA_ACCOUNT
Returns
-------
Union[pd.DataFrame, bool]
Order book data or False
"""
if accountID == "REPLACE_ME":
console.print("Error: Oanda account credentials are required.")
return False
if instrument is None:
console.print(
"Error: An instrument should be loaded before running this command."
)
return False
parameters = {"bucketWidth": "1"}
if client is None:
return False
try:
request = instruments.InstrumentsOrderBook(
instrument=instrument, params=parameters
)
response = client.request(request)
df_orderbook_data = pd.DataFrame.from_dict(response["orderBook"]["buckets"])
return df_orderbook_data
except V20Error as e:
logger.exception(str(e))
d_error = json.loads(e.msg)
console.print(d_error["errorMessage"], "\n")
return False
@log_start_end(log=logger)
def positionbook_plot_data_request(
instrument: Union[str, None] = None, accountID: str = account
) -> Union[pd.DataFrame, bool]:
"""Request position book data for plotting.
Parameters
----------
instrument : Union[str, None]
The loaded currency pair, by default None
accountID : str, optional
Oanda account ID, by default cfg.OANDA_ACCOUNT
Returns
-------
Union[pd.DataFrame, bool]
Position book data or False
"""
if accountID == "REPLACE_ME":
console.print("Error: Oanda account credentials are required.")
return False
if instrument is None:
console.print(
"Error: An instrument should be loaded before running this command."
)
return False
if client is None:
return False
try:
request = instruments.InstrumentsPositionBook(instrument=instrument)
response = client.request(request)
df_positionbook_data = pd.DataFrame.from_dict(
response["positionBook"]["buckets"]
)
return df_positionbook_data
except V20Error as e:
logger.exception(str(e))
d_error = json.loads(e.msg)
console.print(d_error["errorMessage"], "\n")
return False
@log_start_end(log=logger)
def order_history_request(
order_state: str = "PENDING", order_count: int = 0, accountID: str = account
) -> Union[pd.DataFrame, bool]:
"""Request the orders list from Oanda.
Parameters
----------
order_state : str
Filter orders by a specific state ("PENDING", "CANCELLED", etc.)
order_count : int
Limit the number of orders to retrieve
accountID : str, optional
Oanda account ID, by default cfg.OANDA_ACCOUNT
"""
if accountID == "REPLACE_ME":
console.print("Error: Oanda account credentials are required.")
return False
parameters: Dict[str, Union[str, int]] = {}
parameters["state"] = order_state
parameters["count"] = order_count
if client is None:
return False
try:
request = orders.OrderList(accountID, parameters)
response = client.request(request)
df_order_list = pd.DataFrame.from_dict(response["orders"])
df_order_list = df_order_list[
["id", "instrument", "units", "price", "state", "type"]
]
return df_order_list
except KeyError:
logger.exception("No orders were found")
console.print("No orders were found\n")
return False
except V20Error as e:
logger.exception(str(e))
d_error = json.loads(e.msg)
console.print(d_error["errorMessage"], "\n")
return False
@log_start_end(log=logger)
def create_order_request(
price: int = 0,
units: int = 0,
instrument: Union[str, None] = None,
accountID: str = account,
) -> Union[pd.DataFrame, bool]:
"""Request creation of buy/sell trade order.
Parameters
----------
instrument : Union[str, None]
The loaded currency pair, by default None
price : int
The price to set for the limit order.
units : int
The number of units to place in the order request.
accountID : str, optional
Oanda account ID, by default cfg.OANDA_ACCOUNT
Returns
-------
Union[pd.DataFrame, bool]
Orders data or False
"""
if accountID == "REPLACE_ME":
console.print("Error: Oanda account credentials are required.")
return False
if instrument is None:
console.print(
"Error: An instrument should be loaded before running this command."
)
return False
if "JPY" in instrument or "THB" in instrument or "HUF" in instrument:
price = round(price, 3)
else:
price = round(price, 5)
data = {
"order": {
"price": price,
"instrument": instrument,
"units": units,
"type": "LIMIT",
"timeInForce": "GTC",
"positionFill": "DEFAULT",
}
}
if client is None:
return False
try:
request = orders.OrderCreate(accountID, data)
response = client.request(request)
order_data = []
order_data.append(
{
"Order ID": response["orderCreateTransaction"]["id"],
"Instrument": response["orderCreateTransaction"]["instrument"],
"Price": response["orderCreateTransaction"]["price"],
"Units": response["orderCreateTransaction"]["units"],
}
)
df_orders = pd.DataFrame.from_dict(order_data)
return df_orders
except V20Error as e:
logger.exception(str(e))
d_error = json.loads(e.msg)
console.print(d_error["errorMessage"], "\n")
return False
except Exception as e:
logger.exception(str(e))
console.print(e)
return False
@log_start_end(log=logger)
def cancel_pending_order_request(
orderID: str, accountID: str = account
) -> Union[str, bool]:
"""Request cancellation of a pending order.
Parameters
----------
orderID : str
The pending order ID to cancel.
accountID : str, optional
Oanda account ID, by default cfg.OANDA_ACCOUNT
"""
if accountID == "REPLACE_ME":
console.print("Error: Oanda account credentials are required.")
return False
if client is None:
return False
try:
request = orders.OrderCancel(accountID, orderID)
response = client.request(request)
order_id = response["orderCancelTransaction"]["orderID"]
return order_id
except V20Error as e:
logger.exception(str(e))
d_error = json.loads(e.msg)
console.print(d_error["errorMessage"], "\n")
return False
@log_start_end(log=logger)
def open_positions_request(accountID: str = account) -> Union[pd.DataFrame, bool]:
"""Request information on open positions.
Parameters
----------
accountID : str, optional
Oanda account ID, by default cfg.OANDA_ACCOUNT
"""
if accountID == "REPLACE_ME":
console.print("Error: Oanda account credentials are required.")
return False
if client is None:
return False
try:
request = positions.OpenPositions(accountID)
response = client.request(request)
position_data = [
{
"Instrument": response["positions"][i]["instrument"],
"Long Units": response["positions"][i]["long"]["units"],
"Total Long P/L": response["positions"][i]["long"]["units"],
"Unrealized Long P/L": response["positions"][i]["long"]["unrealizedPL"],
"Short Units": response["positions"][i]["short"]["units"],
"Total Short P/L": response["positions"][i]["short"]["pl"],
"Short Unrealized P/L": response["positions"][i]["short"][
"unrealizedPL"
],
}
for i in range(len(response["positions"]))
]
df_positions = pd.DataFrame.from_dict(position_data)
return df_positions
except V20Error as e:
logger.exception(str(e))
d_error = json.loads(e.msg)
console.print(d_error["errorMessage"], "\n")
return False
@log_start_end(log=logger)
def pending_orders_request(accountID: str = account) -> Union[pd.DataFrame, bool]:
"""Request information on pending orders.
Parameters
----------
accountID : str, optional
Oanda account ID, by default cfg.OANDA_ACCOUNT
Returns
-------
Union[pd.DataFrame, bool]
Pending orders data or False
"""
if accountID == "REPLACE_ME":
console.print("Error: Oanda account credentials are required.")
return False
if client is None:
return False
try:
request = orders.OrdersPending(accountID)
response = client.request(request)
pending_data = [
{
"Order ID": response["orders"][i]["id"],
"Instrument": response["orders"][i]["instrument"],
"Price": response["orders"][i]["price"],
"Units": response["orders"][i]["units"],
"Time Created": response["orders"][i]["createTime"][:10]
+ " "
+ response["orders"][i]["createTime"][11:19],
"Time In Force": response["orders"][i]["timeInForce"],
}
for i in range(len(response["orders"]))
]
if len(pending_data) == 0:
return pd.DataFrame()
df_pending = pd.DataFrame.from_dict(pending_data)
return df_pending
except V20Error as e:
logger.exception(str(e))
d_error = json.loads(e.msg)
console.print(d_error["errorMessage"], "\n")
return False
@log_start_end(log=logger)
def open_trades_request(accountID: str = account) -> Union[pd.DataFrame, bool]:
"""Request open trades data.
Parameters
----------
accountID : str, optional
Oanda account ID, by default cfg.OANDA_ACCOUNT
Returns
-------
Union[pd.DataFrame, bool]
Open trades data or False
"""
if accountID == "REPLACE_ME":
console.print("Error: Oanda account credentials are required.")
return False
if client is None:
return False
try:
request = trades.OpenTrades(accountID)
response = client.request(request)
if "trades" in response and len(response["trades"]) > 0:
df_trades = pd.DataFrame.from_dict(response["trades"])
df_trades = df_trades[
[
"id",
"instrument",
"initialUnits",
"currentUnits",
"price",
"unrealizedPL",
]
]
df_trades = df_trades.rename(
columns={
"id": "ID",
"instrument": "Instrument",
"initialUnits": "Initial Units",
"currentUnits": "Current Units",
"price": "Entry Price",
"unrealizedPL": "Unrealized P/L",
}
)
else:
df_trades = pd.DataFrame()
return df_trades
except V20Error as e:
logger.exception(str(e))
d_error = json.loads(e.msg)
console.print(d_error["errorMessage"], "\n")
return False
@log_start_end(log=logger)
def close_trades_request(
orderID: str, units: Union[int, None] = 0, accountID: str = account
) -> Union[pd.DataFrame, bool]:
"""Close a trade.
Parameters
----------
orderID : str
ID of the order to close
units : Union[int, None]
Number of units to close. If empty default to all.
accountID : str, optional
Oanda account ID, by default cfg.OANDA_ACCOUNT
Returns
-------
Union[pd.DataFrame, bool]
Close trades data or False
"""
if accountID == "REPLACE_ME":
console.print("Error: Oanda account credentials are required.")
return False
data = {}
if units is not None:
data["units"] = units
if client is None:
return False
try:
request = trades.TradeClose(accountID, orderID, data)
response = client.request(request)
close_data = []
close_data.append(
{
"OrderID": response["orderCreateTransaction"]["tradeClose"]["tradeID"],
"Instrument": response["orderFillTransaction"]["instrument"],
"Units": response["orderCreateTransaction"]["units"],
"Price": response["orderFillTransaction"]["price"],
"P/L": response["orderFillTransaction"]["pl"],
}
)
if len(close_data) == 0:
return pd.DataFrame()
df_trades = pd.DataFrame.from_dict(close_data)
return df_trades
except V20Error as e:
logger.exception(str(e))
d_error = json.loads(e.msg)
console.print(d_error["errorMessage"], "\n")
return False
@log_start_end(log=logger)
def get_candles_dataframe(
instrument: Union[str, None] = None, granularity: str = "D", candlecount: int = 180
) -> Union[pd.DataFrame, bool]:
"""Request data for candle chart.
Parameters
----------
instrument : str
Loaded currency pair code
granularity : str, optional
Data granularity, by default "D"
candlecount : int, optional
Limit for the number of data points, by default 180
Returns
-------
Union[pd.DataFrame, bool]
Candle chart data or False
"""
if instrument is None:
console.print(
"Error: An instrument should be loaded before running this command."
)
return False
parameters = {
"granularity": granularity,
"count": candlecount,
}
if client is None:
return False
try:
request = instruments.InstrumentsCandles(instrument, params=parameters)
response = client.request(request)
candles_data = [
{
"Date": response["candles"][i]["time"][:10]
+ " "
+ response["candles"][i]["time"][11:19],
"Open": float(response["candles"][i]["mid"]["o"]),
"High": float(response["candles"][i]["mid"]["h"]),
"Low": float(response["candles"][i]["mid"]["l"]),
"Close": float(response["candles"][i]["mid"]["c"]),
"Volume": response["candles"][i]["volume"],
}
for i in range(len(response["candles"]))
]
if len(candles_data) == 0:
df_candles = pd.DataFrame()
else:
df_candles = pd.DataFrame(candles_data)
df_candles.set_index("Date", inplace=True)
df_candles.index = pd.to_datetime(df_candles.index)
return df_candles
except V20Error as e:
logger.exception(str(e))
d_error = json.loads(e.msg)
console.print(d_error["errorMessage"], "\n")
return False
@log_start_end(log=logger)
def get_calendar_request(
days: int = 14, instrument: Union[str, None] = None
) -> Union[pd.DataFrame, bool]:
"""Request data of significant events calendar.
Parameters
----------
instrument : Union[str, None]
The loaded currency pair, by default None
days : int
Number of days in advance
Returns
-------
Union[pd.DataFrame, bool]
Calendar events data or False
"""
if instrument is None:
console.print(
"Error: An instrument should be loaded before running this command."
)
return False
parameters = {"instrument": instrument, "period": str(days * 86400 * -1)}
if client is None:
return False
try:
request = forexlabs.Calendar(params=parameters)
response = client.request(request)
except V20Error as e:
logger.exception(str(e))
d_error = json.loads(e.msg)
console.print(d_error["message"], "\n")
return False
l_data = []
for i in enumerate(response):
if "forecast" in response[i[0]]:
forecast = response[i[0]]["forecast"]
if response[i[0]]["unit"] != "Index":
forecast += response[i[0]]["unit"]
else:
forecast = ""
if "market" in response[i[0]]:
market = response[i[0]]["market"]
if response[i[0]]["unit"] != "Index":
market += response[i[0]]["unit"]
else:
market = ""
if "actual" in response[i[0]]:
actual = response[i[0]]["actual"]
if response[i[0]]["unit"] != "Index":
actual += response[i[0]]["unit"]
else:
actual = ""
if "previous" in response[i[0]]:
previous = response[i[0]]["previous"]
if response[i[0]]["unit"] != "Index":
previous += response[i[0]]["unit"]
else:
previous = ""
if "impact" in response[i[0]]:
impact = response[i[0]]["impact"]
else:
impact = ""
l_data.append(
{
"Title": response[i[0]]["title"],
"Time": datetime.fromtimestamp(response[i[0]]["timestamp"]),
"Impact": impact,
"Forecast": forecast,
"Market Forecast": market,
"Currency": response[i[0]]["currency"],
"Region": response[i[0]]["region"],
"Actual": actual,
"Previous": previous,
}
)
if len(l_data) == 0:
df_calendar = pd.DataFrame()
else:
df_calendar = pd.DataFrame(l_data)
return df_calendar | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forex/oanda/oanda_model.py | 0.734691 | 0.229924 | oanda_model.py | pypi |
__docformat__ = "numpy"
import argparse
import logging
from typing import List, Union
from openbb_terminal import config_terminal as cfg
from openbb_terminal import feature_flags as obbff
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forex import av_model, forex_helper
from openbb_terminal.forex.forex_helper import FOREX_SOURCES
from openbb_terminal.forex.oanda import oanda_view
from openbb_terminal.helper_funcs import (
check_non_negative_float,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.rich_config import console, MenuText
logger = logging.getLogger(__name__)
account = cfg.OANDA_ACCOUNT
class OandaController(BaseController):
"""Oanda menu controller."""
CHOICES_COMMANDS = [
"from",
"to",
"price",
"summary",
"list",
"orderbook",
"positionbook",
"order",
"cancel",
"positions",
"closetrade",
"trades",
"candles",
"pending",
"calendar",
]
PATH = "/forex/oanda/"
CHOICES_GENERATION = True
def __init__(self, queue: List[str] = None):
"""Construct Data."""
super().__init__(queue)
self.from_symbol = ""
self.to_symbol = ""
self.source = "Oanda"
self.instrument: Union[str, None] = None
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
# TODO: We currently use the Alpha Vantage currency list for autocompletion
# This leads to messages like `USD_EUR is not a valid instrument.`
# In Oanda they have their own list of available instruments. It would be
# Great to fetch these lists and store them locally like it's done for
# other currency codes (see ./av_forex_currencies.csv and how it's handled).
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help."""
mt = MenuText("forex/oanda/")
mt.add_cmd("summary")
mt.add_cmd("calendar")
mt.add_cmd("list")
mt.add_cmd("pending")
mt.add_cmd("cancel")
mt.add_cmd("positions")
mt.add_cmd("trades")
mt.add_cmd("closetrade")
mt.add_raw("\n")
mt.add_cmd("from")
mt.add_cmd("to")
mt.add_raw("\n")
mt.add_param("_loaded", self.instrument or "")
mt.add_param("_from", self.from_symbol)
mt.add_param("_to", self.to_symbol)
mt.add_param("_source", FOREX_SOURCES[self.source])
mt.add_raw("\n")
mt.add_cmd("candles", self.instrument)
mt.add_cmd("price", self.instrument)
mt.add_cmd("order", self.instrument)
mt.add_cmd("orderbook", self.instrument)
mt.add_cmd("positionbook", self.instrument)
console.print(text=mt.menu_text, menu="Forex - Oanda")
@log_start_end(log=logger)
def call_to(self, other_args: List[str]):
"""Process 'to' command."""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="to",
description='Select the "to" currency symbol in a forex pair',
)
parser.add_argument(
"-n",
"--name",
help="To currency",
required="-h" not in other_args,
type=av_model.check_valid_forex_currency,
dest="to_symbol",
choices=forex_helper.YF_CURRENCY_LIST,
metavar="TO_SYMBOL",
)
if (
other_args
and "-n" not in other_args[0]
and "--name" not in other_args[0]
and "-h" not in other_args
):
other_args.insert(0, "-n")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
self.to_symbol = ns_parser.to_symbol.upper()
self.instrument = f"{self.from_symbol}_{self.to_symbol}"
console.print(
f"\nSelected pair\nFrom: {self.from_symbol}\n"
f"To: {self.to_symbol}\n"
f"Source: {FOREX_SOURCES[self.source]}\n\n"
)
@log_start_end(log=logger)
def call_from(self, other_args: List[str]):
"""Process 'from' command."""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="from",
description='Select the "from" currency symbol in a forex pair',
)
parser.add_argument(
"-n",
"--name",
help="From currency",
required="-h" not in other_args,
type=av_model.check_valid_forex_currency,
dest="from_symbol",
choices=forex_helper.YF_CURRENCY_LIST,
metavar="FROM_SYMBOL",
)
if (
other_args
and "-n" not in other_args[0]
and "--name" not in other_args[0]
and "-h" not in other_args
):
other_args.insert(0, "-n")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
self.from_symbol = ns_parser.from_symbol.upper()
self.instrument = f"{self.from_symbol}_{self.to_symbol}"
console.print(
f"\nSelected pair\nFrom: {self.from_symbol}\n"
f"To: {self.to_symbol}\n"
f"Source: {FOREX_SOURCES[self.source]}\n\n"
)
@log_start_end(log=logger)
def call_price(self, other_args):
"""Process Price Command."""
parser = argparse.ArgumentParser(
add_help=False,
prog="price",
description="Get price for selected instrument.",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
oanda_view.get_fx_price(account, self.instrument)
@log_start_end(log=logger)
def call_summary(self, other_args):
"""Process account summary command."""
parser = argparse.ArgumentParser(
add_help=False,
prog="summary",
description="Print some information about your account.",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
oanda_view.get_account_summary(account)
@log_start_end(log=logger)
def call_orderbook(self, other_args):
"""Process Oanda Order Book."""
parser = argparse.ArgumentParser(
add_help=False,
prog="orderbook",
description="Plot an orderbook for an instrument if Oanda provides one.",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
oanda_view.get_order_book(account, self.instrument)
@log_start_end(log=logger)
def call_positionbook(self, other_args):
"""Process Oanda Position Book."""
parser = argparse.ArgumentParser(
add_help=False,
prog="positionbook",
description="Plot a position book for an instrument if Oanda provides one.",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
oanda_view.get_position_book(account, self.instrument)
@log_start_end(log=logger)
def call_list(self, other_args: List[str]):
"""Process list orders command."""
parser = argparse.ArgumentParser(
add_help=False,
prog="list",
description="List order history",
)
parser.add_argument(
"-s",
"--state",
dest="state",
action="store",
default="ALL",
type=str,
required=False,
help="List orders that have a specific state.",
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
action="store",
default=20,
type=int,
required=False,
help="Limit the number of orders to retrieve.",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
order_state = ns_parser.state.upper()
order_count = ns_parser.limit
oanda_view.list_orders(account, order_state, order_count)
@log_start_end(log=logger)
def call_order(self, other_args: List[str]):
"""Place limit order."""
parser = argparse.ArgumentParser(
add_help=False,
prog="order",
description="Create order",
)
parser.add_argument(
"-u",
"--unit",
dest="units",
action="store",
type=int,
required="-h" not in other_args,
help="The number of units to place in the order request. Positive for "
+ "a long position and negative for a short position.",
)
parser.add_argument(
"-p",
"--price",
dest="price",
action="store",
type=check_non_negative_float,
required="-h" not in other_args,
help="The price to set for the limit order.",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
price = ns_parser.price
units = ns_parser.units
oanda_view.create_order(account, self.instrument, price, units)
@log_start_end(log=logger)
def call_cancel(self, other_args: List[str]):
"""Cancel pending order by ID."""
parser = argparse.ArgumentParser(
add_help=False,
prog="cancel",
description="Cancel a Pending Order.",
)
parser.add_argument(
"-i",
"--id",
dest="orderID",
action="store",
required="-h" not in other_args,
type=str,
help="The pending order ID to cancel.",
)
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-i")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
orderID = ns_parser.orderID
oanda_view.cancel_pending_order(account, orderID)
@log_start_end(log=logger)
def call_positions(self, other_args):
"""Get Open Positions."""
parser = argparse.ArgumentParser(
add_help=False,
prog="positions",
description="Get information about open positions.",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
oanda_view.get_open_positions(account)
@log_start_end(log=logger)
def call_pending(self, other_args):
"""See up to 25 pending orders."""
parser = argparse.ArgumentParser(
add_help=False,
prog="pending",
description="Get information about pending orders.",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
oanda_view.get_pending_orders(account)
@log_start_end(log=logger)
def call_trades(self, other_args):
"""List open trades."""
parser = argparse.ArgumentParser(
add_help=False,
prog="trades",
description="Get information about open trades.",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
oanda_view.get_open_trades(account)
@log_start_end(log=logger)
def call_closetrade(self, other_args: List[str]):
"""Close a trade by id."""
parser = argparse.ArgumentParser(
add_help=False,
prog="closetrade",
description="Close a trade by id.",
)
parser.add_argument(
"-i",
"--id",
dest="orderID",
action="store",
type=str,
required=False,
help="The Trade ID to close. ",
)
parser.add_argument(
"-u",
"--units",
dest="units",
action="store",
required=False,
help="The number of units on the trade to close. If not set it "
+ "defaults to all units. ",
)
if other_args:
if "-i" not in other_args[0] and "-h" not in other_args[0]:
other_args.insert(0, "-i")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
orderID = ns_parser.orderID
units = ns_parser.units
oanda_view.close_trade(account, orderID, units)
@log_start_end(log=logger)
def call_candles(self, other_args: List[str]):
"""Plot candle chart for a loaded currency pair."""
parser = argparse.ArgumentParser(
add_help=False,
prog="candles",
description="Display Candle Data",
)
parser.add_argument(
"-g",
"--granularity",
dest="granularity",
action="store",
type=str,
default="D",
required=False,
help="The timeframe to get for the candle chart (Seconds: S5, S10, S15, S30 "
+ "Minutes: M1, M2, M4, M5, M10, M15, M30 Hours: H1, H2, H3, H4, H6, H8, H12 "
+ "Day (default): D, Week: W Month: M",
)
parser.add_argument(
"-l",
"--limit",
dest="candlecount",
action="store",
default=180,
type=int,
required=False,
help="The number of candles to retrieve. Default:180 ",
)
parser.add_argument(
"-a",
"--ad",
dest="ad",
action="store_true",
help="Adds ad (Accumulation/Distribution Indicator) to the chart",
)
parser.add_argument(
"-b",
"--bbands",
dest="bbands",
action="store_true",
help="Adds Bollinger Bands to the chart",
)
parser.add_argument(
"-c",
"--cci",
dest="cci",
action="store_true",
help="Adds cci (Commodity Channel Index) to the chart",
)
parser.add_argument(
"-e",
"--ema",
dest="ema",
action="store_true",
help="Adds ema (Exponential Moving Average) to the chart",
)
parser.add_argument(
"-o",
"--obv",
dest="obv",
action="store_true",
help="Adds obv (On Balance Volume) to the chart",
)
parser.add_argument(
"-r",
"--rsi",
dest="rsi",
action="store_true",
help="Adds rsi (Relative Strength Index) to the chart",
)
parser.add_argument(
"-s",
"--sma",
dest="sma",
action="store_true",
help="Adds sma (Simple Moving Average) to the chart",
)
parser.add_argument(
"-v",
"--vwap",
dest="vwap",
action="store_true",
help="Adds vwap (Volume Weighted Average Price) to the chart",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
oanda_view.show_candles(
self.instrument,
granularity=ns_parser.granularity.upper(),
candlecount=ns_parser.candlecount,
additional_charts={
"ad": ns_parser.ad,
"bbands": ns_parser.bbands,
"cci": ns_parser.cci,
"ema": ns_parser.ema,
"obv": ns_parser.obv,
"rsi": ns_parser.rsi,
"sma": ns_parser.sma,
"vwap": ns_parser.vwap,
},
)
@log_start_end(log=logger)
def call_calendar(self, other_args: List[str]):
"""Call calendar."""
parser = argparse.ArgumentParser(
add_help=False,
prog="calendar",
description="Show Calendar Data",
)
parser.add_argument(
"-d",
"--days",
dest="days",
action="store",
type=int,
default=7,
required=False,
help="The number of days to search for, up to 30 forward or backward "
+ "use negative numbers to search back. ",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
days = ns_parser.days
oanda_view.calendar(self.instrument, days) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forex/oanda/oanda_controller.py | 0.634656 | 0.16175 | oanda_controller.py | pypi |
__docformat__ = "numpy"
import argparse
import logging
from typing import List
import numpy as np
import pandas as pd
from openbb_terminal import feature_flags as obbff
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal.common.quantitative_analysis import qa_view, rolling_view
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_ONLY_FIGURES_ALLOWED,
EXPORT_ONLY_RAW_DATA_ALLOWED,
check_positive,
check_proportion_range,
check_list_dates,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import CryptoBaseController
from openbb_terminal.rich_config import console, MenuText
logger = logging.getLogger(__name__)
class QaController(CryptoBaseController):
"""Quantitative Analysis Controller class"""
CHOICES_COMMANDS = [
"pick",
"raw",
"summary",
"line",
"hist",
"cdf",
"bw",
"rolling",
"decompose",
"cusum",
"acf",
"spread",
"quantile",
"skew",
"kurtosis",
"normality",
"qqplot",
"unitroot",
"unitroot",
]
FULLER_REG = ["c", "ct", "ctt", "nc"]
KPS_REG = ["c", "ct"]
PATH = "/forex/qa/"
CHOICES_GENERATION = True
def __init__(
self,
from_symbol: str,
to_symbol: str,
data: pd.DataFrame,
queue: List[str] = None,
):
"""Constructor"""
super().__init__(queue)
data["Returns"] = data["Close"].pct_change()
data["LogRet"] = np.log(data["Close"]) - np.log(data["Close"].shift(1))
data = data.dropna()
self.data = data
self.from_symbol = from_symbol
self.to_symbol = to_symbol
self.ticker = f"{from_symbol}/{to_symbol}"
self.target = "Close"
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
choices["pick"].update({c: {} for c in list(data.columns)})
choices["load"] = {
"--interval": {
c: {}
for c in [
"1",
"5",
"15",
"30",
"60",
"240",
"1440",
"10080",
"43200",
]
},
"-i": "--interval",
"--exchange": {c: {} for c in self.exchanges},
"--source": {c: {} for c in ["CCXT", "YahooFinance", "CoingGecko"]},
"--vs": {c: {} for c in ["usd", "eur"]},
"--start": None,
"-s": "--start",
"--end": None,
"-e": "--end",
}
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
mt = MenuText("forex/qa/")
mt.add_cmd("pick")
mt.add_raw("\n")
mt.add_param("_pair", self.ticker)
mt.add_param("_target", self.target)
mt.add_raw("\n")
mt.add_info("_statistics_")
mt.add_cmd("summary")
mt.add_cmd("normality")
mt.add_cmd("unitroot")
mt.add_info("_plots_")
mt.add_cmd("line")
mt.add_cmd("hist")
mt.add_cmd("cdf")
mt.add_cmd("bw")
mt.add_cmd("acf")
mt.add_cmd("qqplot")
mt.add_info("_rolling_metrics_")
mt.add_cmd("rolling")
mt.add_cmd("spread")
mt.add_cmd("quantile")
mt.add_cmd("skew")
mt.add_cmd("kurtosis")
mt.add_info("_other_")
mt.add_cmd("raw")
mt.add_cmd("decompose")
mt.add_cmd("cusum")
console.print(text=mt.menu_text, menu="Forex - Quantitative Analysis")
def custom_reset(self):
"""Class specific component of reset command"""
if self.ticker:
return [
"forex",
f"from {self.from_symbol}",
f"to {self.to_symbol}",
"load",
"qa",
]
return []
@log_start_end(log=logger)
def call_pick(self, other_args: List[str]):
"""Process pick command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="pick",
description="""
Change target variable
""",
)
parser.add_argument(
"-t",
"--target",
dest="target",
choices=list(self.data.columns),
help="Select variable to analyze",
)
if other_args and "-t" not in other_args and "-h" not in other_args:
other_args.insert(0, "-t")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
self.target = ns_parser.target
@log_start_end(log=logger)
def call_raw(self, other_args: List[str]):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="raw",
description="""
Print raw data to console
""",
)
parser.add_argument(
"-l",
"--limit",
help="Number to show",
type=check_positive,
default=20,
dest="limit",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
parser.add_argument(
"-s",
"--sortby",
help="The column to sort by",
type=str.lower,
choices=[x.lower().replace(" ", "") for x in self.data.columns],
dest="sortby",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_raw(
data=self.data,
limit=ns_parser.limit,
sortby=ns_parser.sortby,
ascend=ns_parser.reverse,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_summary(self, other_args: List[str]):
"""Process summary command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="summary",
description="""
Summary statistics
""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_summary(data=self.data, export=ns_parser.export)
@log_start_end(log=logger)
def call_line(self, other_args: List[str]):
"""Process line command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="line",
description="Show line plot of selected data or highlight specific datetimes.",
)
parser.add_argument(
"--log",
help="Plot with y on log scale",
dest="log",
action="store_true",
default=False,
)
parser.add_argument(
"--ml",
help="Draw vertical line markers to highlight certain events",
dest="ml",
type=check_list_dates,
default="",
)
parser.add_argument(
"--ms",
help="Draw scatter markers to highlight certain events",
dest="ms",
type=check_list_dates,
default="",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_FIGURES_ALLOWED
)
if ns_parser:
qa_view.display_line(
self.data[self.target],
title=f"{self.ticker} {self.target}",
log_y=ns_parser.log,
markers_lines=ns_parser.ml,
markers_scatter=ns_parser.ms,
)
@log_start_end(log=logger)
def call_hist(self, other_args: List[str]):
"""Process hist command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="hist",
description="""
Histogram with density and rug
""",
)
parser.add_argument(
"-b",
"--bins",
type=check_positive,
default=15,
dest="n_bins",
choices=range(10, 100),
metavar="BINS",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
qa_view.display_hist(
symbol=self.ticker,
data=self.data,
target=self.target,
bins=ns_parser.n_bins,
)
@log_start_end(log=logger)
def call_cdf(self, other_args: List[str]):
"""Process cdf command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="cdf",
description="""
Cumulative distribution function
""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_cdf(
symbol=self.ticker,
data=self.data,
target=self.target,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_bw(self, other_args: List[str]):
"""Process bwy command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="bw",
description="""
Box and Whisker plot
""",
)
parser.add_argument(
"-y",
"--yearly",
action="store_true",
default=False,
dest="year",
help="Flag to show yearly bw plot",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
qa_view.display_bw(
symbol=self.ticker,
data=self.data,
target=self.target,
yearly=ns_parser.year,
)
@log_start_end(log=logger)
def call_decompose(self, other_args: List[str]):
"""Process decompose command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="decompose",
description="""
Decompose time series as:
- Additive Time Series = Level + CyclicTrend + Residual + Seasonality
- Multiplicative Time Series = Level * CyclicTrend * Residual * Seasonality
""",
)
parser.add_argument(
"-m",
"--multiplicative",
action="store_true",
default=False,
dest="multiplicative",
help="decompose using multiplicative model instead of additive",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_seasonal(
symbol=self.ticker,
data=self.data,
target=self.target,
multiplicative=ns_parser.multiplicative,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_cusum(self, other_args: List[str]):
"""Process cusum command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="cusum",
description="""
Cumulative sum algorithm (CUSUM) to detect abrupt changes in data
""",
)
parser.add_argument(
"-t",
"--threshold",
dest="threshold",
type=float,
default=(
max(self.data[self.target].values) - min(self.data[self.target].values)
)
/ 40,
help="threshold",
)
parser.add_argument(
"-d",
"--drift",
dest="drift",
type=float,
default=(
max(self.data[self.target].values) - min(self.data[self.target].values)
)
/ 80,
help="drift",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
qa_view.display_cusum(
data=self.data,
target=self.target,
threshold=ns_parser.threshold,
drift=ns_parser.drift,
)
@log_start_end(log=logger)
def call_acf(self, other_args: List[str]):
"""Process acf command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="acf",
description="""
Auto-Correlation and Partial Auto-Correlation Functions for diff and diff diff forex data
""",
)
parser.add_argument(
"-l",
"--lags",
dest="lags",
type=check_positive,
default=15,
help="maximum lags to display in plots",
choices=range(5, 100),
metavar="LAGS",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if self.target != "Close":
console.print(
"Target not Close. For best results, use `pick Close` first."
)
qa_view.display_acf(
symbol=self.ticker,
data=self.data,
target=self.target,
lags=ns_parser.lags,
)
@log_start_end(log=logger)
def call_rolling(self, other_args: List[str]):
"""Process rolling command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="rolling",
description="""
Rolling mean and std deviation
""",
)
parser.add_argument(
"-w",
"--window",
action="store",
dest="n_window",
type=check_positive,
default=14,
help="Window length",
choices=range(5, 100),
metavar="N_WINDOW",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
rolling_view.display_mean_std(
symbol=self.ticker,
data=self.data,
target=self.target,
window=ns_parser.n_window,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_spread(self, other_args: List[str]):
"""Process spread command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="spread",
description="""Shows rolling spread measurement
""",
)
parser.add_argument(
"-w",
"--window",
action="store",
dest="n_window",
type=check_positive,
default=14,
help="Window length",
choices=range(5, 100),
metavar="N_WINDOW",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
rolling_view.display_spread(
symbol=self.ticker,
data=self.data,
target=self.target,
window=ns_parser.n_window,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_quantile(self, other_args: List[str]):
"""Process quantile command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="quantile",
description="""
The quantiles are values which divide the distribution such that
there is a given proportion of observations below the quantile.
For example, the median is a quantile. The median is the central
value of the distribution, such that half the points are less than
or equal to it and half are greater than or equal to it.
By default, q is set at 0.5, which effectively is median. Change q to
get the desired quantile (0<q<1).
""",
)
parser.add_argument(
"-w",
"--window",
action="store",
dest="n_window",
type=check_positive,
default=14,
help="window length",
choices=range(5, 100),
metavar="N_WINDOW",
)
parser.add_argument(
"-q",
"--quantile",
action="store",
dest="f_quantile",
type=check_proportion_range,
default=0.5,
help="quantile",
choices=np.arange(0.0, 1.0, 0.01).tolist(),
metavar="N_QUANTILE",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
rolling_view.display_quantile(
symbol=self.ticker,
data=self.data,
target=self.target,
window=ns_parser.n_window,
quantile=ns_parser.f_quantile,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_skew(self, other_args: List[str]):
"""Process skew command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="skew",
description="""
Skewness is a measure of asymmetry or distortion of symmetric
distribution. It measures the deviation of the given distribution
of a random variable from a symmetric distribution, such as normal
distribution. A normal distribution is without any skewness, as it is
symmetrical on both sides. Hence, a curve is regarded as skewed if
it is shifted towards the right or the left.
""",
)
parser.add_argument(
"-w",
"--window",
action="store",
dest="n_window",
type=check_positive,
default=14,
help="window length",
choices=range(5, 100),
metavar="N_WINDOW",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
rolling_view.display_skew(
symbol=self.ticker,
data=self.data,
target=self.target,
window=ns_parser.n_window,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_kurtosis(self, other_args: List[str]):
"""Process kurtosis command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="kurtosis",
description="""
Kurtosis is a measure of the "tailedness" of the probability distribution
of a real-valued random variable. Like skewness, kurtosis describes the shape
of a probability distribution and there are different ways of quantifying it
for a theoretical distribution and corresponding ways of estimating it from
a sample from a population. Different measures of kurtosis may have different
interpretations.
""",
)
parser.add_argument(
"-w",
"--window",
action="store",
dest="n_window",
type=check_positive,
default=14,
help="window length",
choices=range(5, 100),
metavar="N_WINDOW",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
rolling_view.display_kurtosis(
symbol=self.ticker,
data=self.data,
target=self.target,
window=ns_parser.n_window,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_normality(self, other_args: List[str]):
"""Process normality command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="normality",
description="""
Normality tests
""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_normality(
data=self.data, target=self.target, export=ns_parser.export
)
@log_start_end(log=logger)
def call_qqplot(self, other_args: List[str]):
"""Process qqplot command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="qqplot",
description="""
Display QQ plot vs normal quantiles
""",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
qa_view.display_qqplot(
symbol=self.ticker, data=self.data, target=self.target
)
@log_start_end(log=logger)
def call_unitroot(self, other_args: List[str]):
"""Process unitroot command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="unitroot",
description="""
Unit root test / stationarity (ADF, KPSS)
""",
)
parser.add_argument(
"-r",
"--fuller_reg",
help="Type of regression. Can be ‘c’,’ct’,’ctt’,’nc’ 'c' - Constant and t - trend order",
choices=self.FULLER_REG,
default="c",
type=str,
dest="fuller_reg",
)
parser.add_argument(
"-k",
"--kps_reg",
help="Type of regression. Can be ‘c’,’ct'",
choices=self.KPS_REG,
type=str,
dest="kpss_reg",
default="c",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_unitroot(
data=self.data,
target=self.target,
fuller_reg=ns_parser.fuller_reg,
kpss_reg=ns_parser.kpss_reg,
export=ns_parser.export,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forex/quantitative_analysis/qa_controller.py | 0.639061 | 0.179674 | qa_controller.py | pypi |
__docformat__ = "numpy"
# pylint:disable=too-many-lines
import argparse
import logging
from datetime import datetime
from typing import List
import numpy as np
import pandas as pd
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal import feature_flags as obbff
from openbb_terminal.common.technical_analysis import (
custom_indicators_view,
momentum_view,
overlap_view,
trend_indicators_view,
volatility_view,
volume_view,
volatility_model,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_BOTH_RAW_DATA_AND_FIGURES,
check_positive,
check_positive_list,
check_positive_float,
valid_date,
)
from openbb_terminal.forex.forex_helper import FOREX_SOURCES
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import StockBaseController
from openbb_terminal.rich_config import console, MenuText
logger = logging.getLogger(__name__)
class TechnicalAnalysisController(StockBaseController):
"""Technical Analysis Controller class"""
CHOICES_COMMANDS = [
"ema",
"sma",
"zlma",
"cci",
"macd",
"rsi",
"stoch",
"fisher",
"cg",
"adx",
"aroon",
"bbands",
"donchian",
"fib",
]
PATH = "/forex/ta/"
CHOICES_GENERATION = True
def __init__(
self,
ticker: str,
source: str,
start: datetime,
interval: str,
data: pd.DataFrame,
queue: List[str] = None,
):
"""Constructor"""
super().__init__(queue)
self.ticker = ticker
self.source = source
self.start = start
self.interval = interval
self.data = data
self.data["Adj Close"] = data["Close"]
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
mt = MenuText("forex/ta/")
mt.add_param(
"_currency", f"{self.ticker} (from {self.start.strftime('%Y-%m-%d')})"
)
mt.add_param("_source", FOREX_SOURCES[self.source])
mt.add_raw("\n")
mt.add_info("_overlap_")
mt.add_cmd("ema")
mt.add_cmd("sma")
mt.add_cmd("zlma")
mt.add_info("_momentum_")
mt.add_cmd("cci")
mt.add_cmd("macd")
mt.add_cmd("rsi")
mt.add_cmd("stoch")
mt.add_cmd("fisher")
mt.add_cmd("cg")
mt.add_info("_trend_")
mt.add_cmd("adx")
mt.add_cmd("aroon")
mt.add_info("_volatility_")
mt.add_cmd("bbands")
mt.add_cmd("donchian")
mt.add_info("_custom_")
mt.add_cmd("fib")
console.print(text=mt.menu_text, menu="Forex - Technical Analysis")
def custom_reset(self):
"""Class specific component of reset command"""
if self.ticker:
from_symbol, to_symbol = self.ticker.split("/")
return ["forex", f"from {from_symbol}", f"to {to_symbol}", "load", "ta"]
return []
# TODO: Go through all models and make sure all needed columns are in dfs.
# This todo is a duplicated in crypto/ta_controller
@log_start_end(log=logger)
def call_ema(self, other_args: List[str]):
"""Process ema command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ema",
description="""
The Exponential Moving Average is a staple of technical
analysis and is used in countless technical indicators. In a Simple Moving
Average, each value in the time period carries equal weight, and values outside
of the time period are not included in the average. However, the Exponential
Moving Average is a cumulative calculation, including all data. Past values have
a diminishing contribution to the average, while more recent values have a greater
contribution. This method allows the moving average to be more responsive to changes
in the data.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive_list,
default=[20, 50],
help="Window lengths. Multiple values indicated as comma separated values.",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=int,
default=0,
help="offset",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
overlap_view.view_ma(
ma_type="EMA",
symbol=self.ticker,
data=self.data["Close"],
window=ns_parser.n_length,
offset=ns_parser.n_offset,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_sma(self, other_args: List[str]):
"""Process sma command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="sma",
description="""
Moving Averages are used to smooth the data in an array to
help eliminate noise and identify trends. The Simple Moving Average is literally
the simplest form of a moving average. Each output value is the average of the
previous n values. In a Simple Moving Average, each value in the time period carries
equal weight, and values outside of the time period are not included in the average.
This makes it less responsive to recent changes in the data, which can be useful for
filtering out those changes.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive_list,
default=[20, 50],
help="Window lengths. Multiple values indicated as comma separated values. ",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=int,
default=0,
help="offset",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
overlap_view.view_ma(
ma_type="SMA",
symbol=self.ticker,
data=self.data["Close"],
window=ns_parser.n_length,
offset=ns_parser.n_offset,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_zlma(self, other_args: List[str]):
"""Process zlma command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="zlma",
description="""
The zero lag exponential moving average (ZLEMA) indicator
was created by John Ehlers and Ric Way. The idea is do a
regular exponential moving average (EMA) calculation but
on a de-lagged data instead of doing it on the regular data.
Data is de-lagged by removing the data from "lag" days ago
thus removing (or attempting to) the cumulative effect of
the moving average.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive_list,
default=[20],
help="Window lengths. Multiple values indicated as comma separated values.",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=int,
default=0,
help="offset",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
overlap_view.view_ma(
ma_type="ZLMA",
symbol=self.ticker,
data=self.data["Close"],
window=ns_parser.n_length,
offset=ns_parser.n_offset,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_cci(self, other_args: List[str]):
"""Process cci command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="cci",
description="""
The CCI is designed to detect beginning and ending market trends.
The range of 100 to -100 is the normal trading range. CCI values outside of this
range indicate overbought or oversold conditions. You can also look for price
divergence in the CCI. If the price is making new highs, and the CCI is not,
then a price correction is likely.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=14,
help="length",
)
parser.add_argument(
"-s",
"--scalar",
action="store",
dest="n_scalar",
type=check_positive,
default=0.015,
help="scalar",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_cci(
symbol=self.ticker,
data=self.data,
window=ns_parser.n_length,
scalar=ns_parser.n_scalar,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_macd(self, other_args: List[str]):
"""Process macd command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="macd",
description="""
The Moving Average Convergence Divergence (MACD) is the difference
between two Exponential Moving Averages. The Signal line is an Exponential Moving
Average of the MACD. \n \n The MACD signals trend changes and indicates the start
of new trend direction. High values indicate overbought conditions, low values
indicate oversold conditions. Divergence with the price indicates an end to the
current trend, especially if the MACD is at extreme high or low values. When the MACD
line crosses above the signal line a buy signal is generated. When the MACD crosses
below the signal line a sell signal is generated. To confirm the signal, the MACD
should be above zero for a buy, and below zero for a sell.
""",
)
parser.add_argument(
"--fast",
action="store",
dest="n_fast",
type=check_positive,
default=12,
help="The short period.",
)
parser.add_argument(
"--slow",
action="store",
dest="n_slow",
type=check_positive,
default=26,
help="The long period.",
)
parser.add_argument(
"--signal",
action="store",
dest="n_signal",
type=check_positive,
default=9,
help="The signal period.",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_macd(
symbol=self.ticker,
data=self.data["Adj Close"],
n_fast=ns_parser.n_fast,
n_slow=ns_parser.n_slow,
n_signal=ns_parser.n_signal,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_rsi(self, other_args: List[str]):
"""Process rsi command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="rsi",
description="""
The Relative Strength Index (RSI) calculates a ratio of the
recent upward price movements to the absolute price movement. The RSI ranges
from 0 to 100. The RSI is interpreted as an overbought/oversold indicator when
the value is over 70/below 30. You can also look for divergence with price. If
the price is making new highs/lows, and the RSI is not, it indicates a reversal.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=14,
help="length",
)
parser.add_argument(
"-s",
"--scalar",
action="store",
dest="n_scalar",
type=check_positive,
default=100,
help="scalar",
)
parser.add_argument(
"-d",
"--drift",
action="store",
dest="n_drift",
type=check_positive,
default=1,
help="drift",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_rsi(
symbol=self.ticker,
data=self.data["Adj Close"],
window=ns_parser.n_length,
scalar=ns_parser.n_scalar,
drift=ns_parser.n_drift,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_stoch(self, other_args: List[str]):
"""Process stoch command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="stoch",
description="""
The Stochastic Oscillator measures where the close is in relation
to the recent trading range. The values range from zero to 100. %D values over 75
indicate an overbought condition; values under 25 indicate an oversold condition.
When the Fast %D crosses above the Slow %D, it is a buy signal; when it crosses
below, it is a sell signal. The Raw %K is generally considered too erratic to use
for crossover signals.
""",
)
parser.add_argument(
"-k",
"--fastkperiod",
action="store",
dest="n_fastkperiod",
type=check_positive,
default=14,
help="The time period of the fastk moving average",
)
parser.add_argument(
"-d",
"--slowdperiod",
action="store",
dest="n_slowdperiod",
type=check_positive,
default=3,
help="The time period of the slowd moving average",
)
parser.add_argument(
"--slowkperiod",
action="store",
dest="n_slowkperiod",
type=check_positive,
default=3,
help="The time period of the slowk moving average",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_stoch(
symbol=self.ticker,
data=self.data,
fastkperiod=ns_parser.n_fastkperiod,
slowdperiod=ns_parser.n_slowdperiod,
slowkperiod=ns_parser.n_slowkperiod,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_fisher(self, other_args: List[str]):
"""Process fisher command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="fisher",
description="""
The Fisher Transform is a technical indicator created by John F. Ehlers
that converts prices into a Gaussian normal distribution.1 The indicator
highlights when prices have moved to an extreme, based on recent prices.
This may help in spotting turning points in the price of an asset. It also
helps show the trend and isolate the price waves within a trend.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=14,
help="length",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_fisher(
symbol=self.ticker,
data=self.data,
window=ns_parser.n_length,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_cg(self, other_args: List[str]):
"""Process cg command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="cg",
description="""
The Center of Gravity indicator, in short, is used to anticipate future price movements
and to trade on price reversals as soon as they happen. However, just like other oscillators,
the COG indicator returns the best results in range-bound markets and should be avoided when
the price is trending. Traders who use it will be able to closely speculate the upcoming
price change of the asset.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=14,
help="length",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_cg(
symbol=self.ticker,
data=self.data["Adj Close"],
window=ns_parser.n_length,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_adx(self, other_args: List[str]):
"""Process adx command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="adx",
description="""
The ADX is a Welles Wilder style moving average of the Directional Movement Index (DX).
The values range from 0 to 100, but rarely get above 60. To interpret the ADX, consider
a high number to be a strong trend, and a low number, a weak trend.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=14,
help="length",
)
parser.add_argument(
"-s",
"--scalar",
action="store",
dest="n_scalar",
type=check_positive,
default=100,
help="scalar",
)
parser.add_argument(
"-d",
"--drift",
action="store",
dest="n_drift",
type=check_positive,
default=1,
help="drift",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
trend_indicators_view.display_adx(
symbol=self.ticker,
data=self.data,
window=ns_parser.n_length,
scalar=ns_parser.n_scalar,
drift=ns_parser.n_drift,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_aroon(self, other_args: List[str]):
"""Process aroon command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="aroon",
description="""
The word aroon is Sanskrit for "dawn's early light." The Aroon
indicator attempts to show when a new trend is dawning. The indicator consists
of two lines (Up and Down) that measure how long it has been since the highest
high/lowest low has occurred within an n period range. \n \n When the Aroon Up is
staying between 70 and 100 then it indicates an upward trend. When the Aroon Down
is staying between 70 and 100 then it indicates an downward trend. A strong upward
trend is indicated when the Aroon Up is above 70 while the Aroon Down is below 30.
Likewise, a strong downward trend is indicated when the Aroon Down is above 70 while
the Aroon Up is below 30. Also look for crossovers. When the Aroon Down crosses above
the Aroon Up, it indicates a weakening of the upward trend (and vice versa).
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=25,
help="length",
)
parser.add_argument(
"-s",
"--scalar",
action="store",
dest="n_scalar",
type=check_positive,
default=100,
help="scalar",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=check_positive,
default=0,
help="offset",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
trend_indicators_view.display_aroon(
symbol=self.ticker,
data=self.data,
window=ns_parser.n_length,
scalar=ns_parser.n_scalar,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_bbands(self, other_args: List[str]):
"""Process bbands command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="bbands",
description="""
Bollinger Bands consist of three lines. The middle band is a simple
moving average (generally 20 periods) of the typical price (TP). The upper and lower
bands are F standard deviations (generally 2) above and below the middle band.
The bands widen and narrow when the volatility of the price is higher or lower,
respectively. \n \nBollinger Bands do not, in themselves, generate buy or sell signals;
they are an indicator of overbought or oversold conditions. When the price is near the
upper or lower band it indicates that a reversal may be imminent. The middle band
becomes a support or resistance level. The upper and lower bands can also be
interpreted as price targets. When the price bounces off of the lower band and crosses
the middle band, then the upper band becomes the price target.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=5,
help="length",
)
parser.add_argument(
"-s",
"--std",
action="store",
dest="n_std",
type=check_positive_float,
default=2,
help="std",
choices=np.arange(0.0, 10, 0.25).tolist(),
metavar="N_STD",
)
parser.add_argument(
"-m",
"--mamode",
action="store",
dest="s_mamode",
default="sma",
choices=volatility_model.MAMODES,
help="mamode",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volatility_view.display_bbands(
symbol=self.ticker,
data=self.data,
window=ns_parser.n_length,
n_std=ns_parser.n_std,
mamode=ns_parser.s_mamode,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_donchian(self, other_args: List[str]):
"""Process donchian command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="donchian",
description="""
Donchian Channels are three lines generated by moving average
calculations that comprise an indicator formed by upper and lower
bands around a midrange or median band. The upper band marks the
highest price of a security over N periods while the lower band
marks the lowest price of a security over N periods. The area
between the upper and lower bands represents the Donchian Channel.
""",
)
parser.add_argument(
"-u",
"--length_upper",
action="store",
dest="n_length_upper",
type=check_positive,
default=20,
help="length",
)
parser.add_argument(
"-l",
"--length_lower",
action="store",
dest="n_length_lower",
type=check_positive,
default=20,
help="length",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volatility_view.display_donchian(
symbol=self.ticker,
data=self.data,
upper_length=ns_parser.n_length_upper,
lower_length=ns_parser.n_length_lower,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_ad(self, other_args: List[str]):
"""Process ad command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ad",
description="""
The Accumulation/Distribution Line is similar to the On Balance
Volume (OBV), which sums the volume times +1/-1 based on whether the close is
higher than the previous close. The Accumulation/Distribution indicator, however
multiplies the volume by the close location value (CLV). The CLV is based on the
movement of the issue within a single bar and can be +1, -1 or zero. \n \n
The Accumulation/Distribution Line is interpreted by looking for a divergence in
the direction of the indicator relative to price. If the Accumulation/Distribution
Line is trending upward it indicates that the price may follow. Also, if the
Accumulation/Distribution Line becomes flat while the price is still rising (or falling)
then it signals an impending flattening of the price.
""",
)
parser.add_argument(
"--open",
action="store_true",
default=False,
dest="b_use_open",
help="uses open value of data",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volume_view.display_ad(
symbol=self.ticker,
data=self.data,
use_open=ns_parser.b_use_open,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_obv(self, other_args: List[str]):
"""Process obv command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="obv",
description="""
The On Balance Volume (OBV) is a cumulative total of the up and
down volume. When the close is higher than the previous close, the volume is added
to the running total, and when the close is lower than the previous close, the volume
is subtracted from the running total. \n \n To interpret the OBV, look for the OBV
to move with the price or precede price moves. If the price moves before the OBV,
then it is a non-confirmed move. A series of rising peaks, or falling troughs, in the
OBV indicates a strong trend. If the OBV is flat, then the market is not trending.
""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volume_view.display_obv(
symbol=self.ticker,
data=self.data,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_fib(self, other_args: List[str]):
"""Process fib command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="fib",
description="Calculates the fibonacci retracement levels",
)
parser.add_argument(
"-p",
"--period",
dest="period",
type=int,
help="Days to look back for retracement",
default=120,
choices=range(1, 960),
metavar="PERIOD",
)
parser.add_argument(
"--start",
dest="start",
type=valid_date,
help="Starting date to select",
required="--end" in other_args,
)
parser.add_argument(
"--end",
dest="end",
type=valid_date,
help="Ending date to select",
required="--start" in other_args,
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
custom_indicators_view.fibonacci_retracement(
symbol=self.ticker,
data=self.data,
limit=ns_parser.period,
start_date=ns_parser.start,
end_date=ns_parser.end,
export=ns_parser.export,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forex/technical_analysis/ta_controller.py | 0.562777 | 0.195172 | ta_controller.py | pypi |
__docformat__ = "numpy"
import logging
import os
from PIL import Image
from openbb_terminal.decorators import log_start_end
from openbb_terminal.economy import finviz_model
from openbb_terminal.helper_funcs import export_data, print_rich_table
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_performance_map(period: str = "1d", map_filter: str = "sp500"):
"""Opens Finviz map website in a browser. [Source: Finviz]
Parameters
----------
period : str
Performance period. Available periods are 1d, 1w, 1m, 3m, 6m, 1y.
map_filter : str
Map filter. Available map filters are sp500, world, full, etf.
"""
finviz_model.get_performance_map(period, map_filter)
@log_start_end(log=logger)
def display_valuation(
group: str = "sector",
sortby: str = "Name",
ascend: bool = True,
export: str = "",
):
"""Display group (sectors, industry or country) valuation data. [Source: Finviz]
Parameters
----------
group : str
Group by category. Available groups can be accessed through get_groups().
sortby : str
Column to sort by
ascend : bool
Flag to sort in ascending order
export : str
Export data to csv,json,xlsx or png,jpg,pdf,svg file
"""
df_group = finviz_model.get_valuation_data(group, sortby, ascend)
if df_group.empty:
return
print_rich_table(
df_group,
show_index=False,
headers=list(df_group.columns),
title="Group Valuation Data",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"valuation",
df_group,
)
@log_start_end(log=logger)
def display_performance(
group: str = "sector",
sortby: str = "Name",
ascend: bool = True,
export: str = "",
):
"""View group (sectors, industry or country) performance data. [Source: Finviz]
Parameters
----------
group : str
Group by category. Available groups can be accessed through get_groups().
sortby : str
Column to sort by
ascend : bool
Flag to sort in ascending order
export : str
Export data to csv,json,xlsx or png,jpg,pdf,svg file
"""
df_group = finviz_model.get_performance_data(group, sortby, ascend)
if df_group.empty:
return
print_rich_table(
df_group,
show_index=False,
headers=df_group.columns,
title="Group Performance Data",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"performance",
df_group,
)
@log_start_end(log=logger)
def display_spectrum(group: str = "sector", export: str = ""):
"""Display finviz spectrum in system viewer [Source: Finviz]
Parameters
----------
group: str
Group by category. Available groups can be accessed through get_groups().
export: str
Format to export data
"""
finviz_model.get_spectrum_data(group)
group = finviz_model.GROUPS[group]
img = Image.open(group + ".jpg")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"spectrum",
)
img.show()
@log_start_end(log=logger)
def display_future(
future_type: str = "Indices",
sortby: str = "ticker",
ascend: bool = False,
export: str = "",
):
"""Display table of a particular future type. [Source: Finviz]
Parameters
----------
future_type : str
From the following: Indices, Energy, Metals, Meats, Grains, Softs, Bonds, Currencies
sortby : str
Column to sort by
ascend : bool
Flag to sort in ascending order
export : str
Export data to csv,json,xlsx or png,jpg,pdf,svg file
"""
df = finviz_model.get_futures(future_type, sortby, ascend)
print_rich_table(
df,
show_index=True,
headers=["prevClose", "last", "change (%)"],
title="Future Table [Source: FinViz]",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
future_type.lower(),
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/finviz_view.py | 0.716417 | 0.32142 | finviz_view.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
import requests
from alpha_vantage.sectorperformance import SectorPerformances
from openbb_terminal.rich_config import console
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_sector_data() -> pd.DataFrame:
"""Get real-time performance sector data
Returns
-------
df_sectors : pd.Dataframe
Real-time performance data
"""
sector_perf = SectorPerformances(
key=cfg.API_KEY_ALPHAVANTAGE, output_format="pandas"
)
df_sectors, _ = sector_perf.get_sector()
# pylint: disable=invalid-sequence-index
df_rtp = df_sectors["Rank A: Real-Time Performance"]
df_rtp = df_rtp.apply(lambda x: x * 100)
df_rtp = df_rtp.to_frame().reset_index()
df_rtp.columns = ["Sector", "% Chg"]
return df_rtp
@log_start_end(log=logger)
def get_real_gdp(
interval: str = "q",
start_year: int = 2010,
) -> pd.DataFrame:
"""Get annual or quarterly Real GDP for US
Parameters
----------
interval : str, optional
Interval for GDP, by default "a" for annual, by default "q"
start_year : int, optional
Start year for plot, by default 2010
Returns
-------
pd.DataFrame
Dataframe of GDP
"""
s_interval = "quarterly" if interval == "q" else "annual"
url = (
"https://www.alphavantage.co/query?function=REAL_GDP"
+ f"&interval={s_interval}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
)
r = requests.get(url, headers={"User-Agent": get_user_agent()})
if r.status_code != 200:
console.print(f"Request error. Response code: {str(r.status_code)}.\n")
return pd.DataFrame()
payload = r.json()
# Successful requests
if "data" in payload:
if payload["data"]:
data = pd.DataFrame(payload["data"])
data["date"] = pd.to_datetime(data["date"])
data["GDP"] = data["value"].astype(float)
data = data.drop(columns=["value"])
return data[data["date"] >= f"{start_year}-01-01"]
console.print(f"No data found for {interval}.\n")
# Invalid API Keys
if "Error Message" in payload:
console.print(payload["Error Message"])
# Premium feature, API plan is not authorized
if "Information" in payload:
console.print(payload["Information"])
return pd.DataFrame()
@log_start_end(log=logger)
def get_gdp_capita(start_year: int = 2010) -> pd.DataFrame:
"""Real GDP per Capita for United States
Parameters
----------
start_year : int, optional
Start year for plot, by default 2010
Returns
-------
pd.DataFrame
DataFrame of GDP per Capita
"""
url = (
"https://www.alphavantage.co/query?function=REAL_GDP_PER_CAPITA"
+ f"&apikey={cfg.API_KEY_ALPHAVANTAGE}"
)
r = requests.get(url, headers={"User-Agent": get_user_agent()})
if r.status_code != 200:
console.print(f"Request error. Response code: {str(r.status_code)}.\n")
return pd.DataFrame()
payload = r.json()
if "data" in payload:
if payload["data"]:
data = pd.DataFrame(payload["data"])
data["date"] = pd.to_datetime(data["date"])
data["GDP"] = data["value"].astype(float)
data = data.drop(columns=["value"])
return data[data["date"] >= f"{start_year}-01-01"]
console.print("No data found.\n")
# Invalid API Keys
if "Error Message" in payload:
console.print(payload["Error Message"])
# Premium feature, API plan is not authorized
if "Information" in payload:
console.print(payload["Information"])
return pd.DataFrame()
@log_start_end(log=logger)
def get_inflation(start_year: int = 2010) -> pd.DataFrame:
"""Get historical Inflation for United States from AlphaVantage
Parameters
----------
start_year : int, optional
Start year for plot, by default 2010
Returns
-------
pd.DataFrame
DataFrame of inflation rates
"""
url = (
"https://www.alphavantage.co/query?function=INFLATION"
+ f"&apikey={cfg.API_KEY_ALPHAVANTAGE}"
)
r = requests.get(url, headers={"User-Agent": get_user_agent()})
if r.status_code != 200:
console.print(f"Request error. Response code: {str(r.status_code)}.\n")
return pd.DataFrame()
payload = r.json()
if "data" in payload:
if payload["data"]:
data = pd.DataFrame(payload["data"])
data["date"] = pd.to_datetime(data["date"])
data["Inflation"] = data["value"].astype(float)
data = data.drop(columns=["value"])
return data[data["date"] >= f"{start_year}-01-01"]
console.print("No data found.\n")
# Invalid API Keys
if "Error Message" in payload:
console.print(payload["Error Message"])
# Premium feature, API plan is not authorized
if "Information" in payload:
console.print(payload["Information"])
return pd.DataFrame()
@log_start_end(log=logger)
def get_cpi(interval: str = "m", start_year: int = 2010) -> pd.DataFrame:
"""Get Consumer Price Index from Alpha Vantage
Parameters
----------
interval : str
Interval for data. Either "m" or "s" for monthly or semiannual
start_year : int, optional
Start year for plot, by default 2010
Returns
-------
pd.DataFrame
Dataframe of CPI
"""
s_interval = "semiannual" if interval == "s" else "monthly"
url = (
f"https://www.alphavantage.co/query?function=CPI&interval={s_interval}"
+ f"&apikey={cfg.API_KEY_ALPHAVANTAGE}"
)
r = requests.get(url, headers={"User-Agent": get_user_agent()})
if r.status_code != 200:
console.print(f"Request error. Response code: {str(r.status_code)}.\n")
return pd.DataFrame()
payload = r.json()
# Successful requests
if "data" in payload:
if payload["data"]:
data = pd.DataFrame(payload["data"])
data["date"] = pd.to_datetime(data["date"])
data["CPI"] = data["value"].astype(float)
data = data.drop(columns=["value"])
return data[data["date"] >= f"{start_year}-01-01"]
console.print(f"No data found for {interval}.\n")
# Invalid API Keys
if "Error Message" in payload:
console.print(payload["Error Message"])
# Premium feature, API plan is not authorized
if "Information" in payload:
console.print(payload["Information"])
return pd.DataFrame()
@log_start_end(log=logger)
def get_treasury_yield(
interval: str = "m", maturity: str = "10y", start_date: str = "2010-01-01"
) -> pd.DataFrame:
"""Get historical yield for a given maturity
Parameters
----------
interval : str
Interval for data. Can be "d","w","m" for daily, weekly or monthly, by default "m"
start_date: str
Start date for data. Should be in YYYY-MM-DD format, by default "2010-01-01"
maturity : str
Maturity timeline. Can be "3mo","5y","10y" or "30y", by default "10y"
Returns
-------
pd.DataFrame
Dataframe of historical yields
"""
d_interval = {"d": "daily", "w": "weekly", "m": "monthly"}
d_maturity = {"3m": "3month", "5y": "5year", "10y": "10year", "30y": "30year"}
url = (
"https://www.alphavantage.co/query?function=TREASURY_YIELD"
+ f"&interval={d_interval[interval]}"
+ f"&maturity={d_maturity[maturity]}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
)
r = requests.get(url, headers={"User-Agent": get_user_agent()})
if r.status_code != 200:
console.print(f"Request error. Response code: {str(r.status_code)}.\n")
return pd.DataFrame()
payload = r.json()
# Successful requests
if "data" in payload:
if payload["data"]:
data = pd.DataFrame(payload["data"])
data["date"] = pd.to_datetime(data["date"])
data["Yield"] = data["value"].astype(float)
data = data.drop(columns=["value"])
return data[data["date"] >= start_date]
console.print(f"No data found for {interval}.\n")
# Invalid API Keys
if "Error Message" in payload:
console.print(payload["Error Message"])
# Premium feature, API plan is not authorized
if "Information" in payload:
console.print(payload["Information"])
return pd.DataFrame()
@log_start_end(log=logger)
def get_unemployment(start_year: int = 2010) -> pd.DataFrame:
"""Get historical unemployment for United States
Parameters
----------
start_year : int, optional
Start year for plot, by default 2010
Returns
-------
pd.DataFrame
Dataframe of historical yields
"""
url = f"https://www.alphavantage.co/query?function=UNEMPLOYMENT&apikey={cfg.API_KEY_ALPHAVANTAGE}"
r = requests.get(url, headers={"User-Agent": get_user_agent()})
if r.status_code != 200:
return pd.DataFrame()
payload = r.json()
data = pd.DataFrame()
# Successful requests
if "data" in payload:
if payload["data"]:
data = pd.DataFrame(payload["data"])
data["date"] = pd.to_datetime(data["date"])
data["unemp"] = data["value"].astype(float)
data = data.drop(columns=["value"])
return data[data["date"] >= f"{start_year}-01-01"]
console.print("No data found.\n")
# Invalid API Keys
if "Error Message" in payload:
console.print(payload["Error Message"])
# Premium feature, API plan is not authorized
if "Information" in payload:
console.print(payload["Information"])
return pd.DataFrame() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/alphavantage_model.py | 0.670608 | 0.192767 | alphavantage_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from openbb_terminal.decorators import log_start_end
from openbb_terminal.economy import wsj_model
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_overview(export: str = ""):
"""Market overview with daily change. [Source: Wall St. Journal]
Parameters
----------
export : str
Export dataframe data to csv,json,xlsx file
"""
df_data = wsj_model.market_overview()
if df_data.empty:
console.print("No overview data available\n")
return
print_rich_table(
df_data,
show_index=False,
headers=list(df_data.columns),
title="Market Overview",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"overview",
df_data,
)
@log_start_end(log=logger)
def display_indices(export: str = ""):
"""US indices. [Source: Wall St. Journal]
Parameters
----------
export : str
Export dataframe data to csv,json,xlsx file
"""
df_data = wsj_model.us_indices()
if df_data.empty:
console.print("No indices data available\n")
return
print_rich_table(
df_data, show_index=False, headers=list(df_data.columns), title="US Indices"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"indices",
df_data,
)
@log_start_end(log=logger)
def display_futures(export: str = ""):
"""Futures/Commodities. [Source: Wall St. Journal]
Parameters
----------
export : str
Export dataframe data to csv,json,xlsx file
"""
df_data = wsj_model.top_commodities()
if df_data.empty:
console.print("No futures/commodities data available\n")
return
print_rich_table(
df_data,
show_index=False,
headers=list(df_data.columns),
title="Futures/Commodities [Source: Wall St. Journal]",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"futures",
df_data,
)
@log_start_end(log=logger)
def display_usbonds(export: str = ""):
"""US bonds. [Source: Wall St. Journal]
Parameters
----------
export : str
Export dataframe data to csv,json,xlsx file
"""
df_data = wsj_model.us_bonds()
if df_data.empty:
console.print("No US bonds data available\n")
return
print_rich_table(
df_data, show_index=False, headers=list(df_data.columns), title="US Bonds"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"usbonds",
df_data,
)
@log_start_end(log=logger)
def display_glbonds(export: str = ""):
"""Global bonds. [Source: Wall St. Journal]
Parameters
----------
export : str
Export dataframe data to csv,json,xlsx file
"""
df_data = wsj_model.global_bonds()
if df_data.empty:
console.print("No global bonds data available\n")
return
print_rich_table(
df_data, show_index=False, headers=list(df_data.columns), title="Global Bonds"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"glbonds",
df_data,
)
@log_start_end(log=logger)
def display_currencies(export: str = ""):
"""Display currencies. [Source: Wall St. Journal]
Parameters
----------
export : str
Export dataframe data to csv,json,xlsx file
"""
df_data = wsj_model.global_currencies()
if df_data.empty:
console.print("No currencies data available\n")
return
print_rich_table(
df_data, show_index=False, headers=list(df_data.columns), title="Currencies"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"currencies",
df_data,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/wsj_view.py | 0.660501 | 0.267797 | wsj_view.py | pypi |
__docformat__ = "numpy"
# pylint: disable=no-member
import logging
from typing import Dict, Any, Optional, Tuple, Union
from urllib.error import HTTPError
from datetime import datetime
import pandas as pd
import pandas_datareader.data as web
import requests
import yfinance as yf
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.helpers_denomination import transform as transform_by_denomination
logger = logging.getLogger(__name__)
COUNTRY_CODES = {
"Albania": "AL",
"Argentina": "AR",
"Australia": "AU",
"Austria": "AT",
"Azerbaijan": "AZ",
"Bangladesh": "BD",
"Belarus": "BY",
"Belgium": "BE",
"Bhutan": "BT",
"Bosnia_and_Herzegovina": "BA",
"Botswana": "BW",
"Brazil": "BR",
"Bulgaria": "BG",
"Cambodia": "KH",
"Cameroon": "CM",
"Canada": "CA",
"Chile": "CL",
"China": "CN",
"Colombia": "CO",
"Croatia": "HR",
"Cyprus": "CY",
"Czechia": "CZ",
"Denmark": "DK",
"Dominican_Republic": "DO",
"Egypt": "EG",
"Estonia": "EE",
"European_Union": "EU",
"Finland": "FI",
"France": "FR",
"Germany": "DE",
"Greece": "GR",
"Honduras": "HN",
"Hong Kong": "HK",
"Hungary": "HU",
"India": "IN",
"Indonesia": "ID",
"Iran": "IR",
"Ireland": "IE",
"Israel": "IL",
"Italy": "IT",
"Japan": "JP",
"Kazakhstan": "KZ",
"Laos": "LA",
"Latvia": "LV",
"Lebanon": "LB",
"Lithuania": "LT",
"Luxembourg": "LU",
"Macedonia": "MK",
"Malaysia": "MY",
"Malta": "MT",
"Mexico": "MX",
"Mongolia": "MN",
"Netherlands": "NL",
"New_Zealand": "NZ",
"Nigeria": "NG",
"Norway": "NO",
"Oman": "OM",
"Pakistan": "PK",
"Panama": "PA",
"Paraguay": "PYG",
"Peru": "PE",
"Philippines": "PH",
"Poland": "PL",
"Portugal": "PT",
"Qatar": "QA",
"Romania": "RO",
"Russia": "RU",
"Saudi_Arabia": "SA",
"Serbia": "RS",
"Singapore": "SG",
"Slovakia": "SK",
"Slovenia": "SI",
"South_Africa": "ZA",
"South_Korea": "KR",
"Spain": "ES",
"Sweden": "SE",
"Switzerland": "CH",
"Taiwan": "TW",
"Thailand": "TH",
"Tunisia": "TN",
"Turkey": "TR",
"Ukraine": "UA",
"United_Arab_Emirates": "AE",
"United_Kingdom": "UK",
"United_States": "US",
"Uzbekistan": "UZ",
"Venezuela": "VE",
"Vietnam": "VN",
}
COUNTRY_CURRENCIES = {
"Albania": "ALL",
"Argentina": "ARS",
"Australia": "AUD",
"Austria": "EUR",
"Azerbaijan": "AZN",
"Bangladesh": "BDT",
"Belarus": "BYR",
"Belgium": "EUR",
"Bhutan": "BTN",
"Bosnia_and_Herzegovina": "BAM",
"Botswana": "BWP",
"Brazil": "BRL",
"Bulgaria": "BGN",
"Cambodia": "KHR",
"Cameroon": "XAF",
"Canada": "CAD",
"Chile": "CLP",
"China": "CNY",
"Colombia": "COP",
"Croatia": "HRK",
"Cyprus": "EUR",
"Czechia": "CZK",
"Denmark": "DKK",
"Dominican_Republic": "DOP",
"Egypt": "EGP",
"Estonia": "EUR",
"European_Union": "EUR",
"Finland": "EUR",
"France": "EUR",
"Germany": "EUR",
"Greece": "EUR",
"Honduras": "HNL",
"Hong Kong": "HKD",
"Hungary": "HUF",
"India": "INR",
"Indonesia": "IDR",
"Iran": "IRR",
"Ireland": "EUR",
"Israel": "ILS",
"Italy": "EUR",
"Japan": "JPY",
"Kazakhstan": "KZT",
"Laos": "LAK",
"Latvia": "EUR",
"Lebanon": "LBP",
"Lithuania": "EUR",
"Luxembourg": "EUR",
"Macedonia": "MKD",
"Malaysia": "MYR",
"Malta": "EUR",
"Mexico": "MXN",
"Mongolia": "MNT",
"Netherlands": "EUR",
"New_Zealand": "NZD",
"Nigeria": "NGN",
"Norway": "NOK",
"Oman": "OMR",
"Pakistan": "PKR",
"Panama": "PAB",
"Paraguay": "PYG",
"Peru": "PEN",
"Philippines": "PHP",
"Poland": "PLN",
"Portugal": "EUR",
"Qatar": "QAR",
"Romania": "RON",
"Russia": "RUB",
"Saudi_Arabia": "SAR",
"Serbia": "RSD",
"Singapore": "SGD",
"Slovakia": "EUR",
"Slovenia": "EUR",
"South_Africa": "ZAR",
"South_Korea": "KRW",
"Spain": "EUR",
"Sweden": "SEK",
"Switzerland": "CHF",
"Taiwan": "TWD",
"Thailand": "THB",
"Tunisia": "TND",
"Turkey": "TRY",
"Ukraine": "UAH",
"United_Arab_Emirates": "AED",
"United_Kingdom": "GBP",
"United_States": "USD",
"Uzbekistan": "UZS",
"Venezuela": "VEF",
"Vietnam": "VND",
}
PARAMETERS = {
"RGDP": {
"name": "Real gross domestic product",
"period": "Quarterly",
"description": "Inflation-adjusted measure that reflects the value of all goods and services produced by "
"an economy in a given year (chain-linked series).",
},
"RPRC": {
"name": "Real private consumption",
"period": "Quarterly",
"description": "All purchases made by consumers adjusted by inflation (chain-linked series).",
},
"RPUC": {
"name": "Real public consumption",
"period": "Quarterly",
"description": "All purchases made by the government adjusted by inflation (chain-linked series).",
},
"RGFCF": {
"name": "Real gross fixed capital formation",
"period": "Quarterly",
"description": "The acquisition of produced assets adjusted by inflation (chain-linked series).",
},
"REXP": {
"name": "Real exports of goods and services",
"period": "Quarterly",
"description": "Transactions in goods and services from residents to non-residents adjusted for "
"inflation (chain-linked series)",
},
"RIMP": {
"name": "Real imports of goods and services",
"period": "Quarterly",
"description": "Transactions in goods and services to residents from non-residents adjusted for "
"inflation (chain-linked series)",
},
"GDP": {
"name": "Gross domestic product",
"period": "Quarterly",
"description": "Measure that reflects the value of all goods and services produced by "
"an economy in a given year (chain-linked series).",
},
"PRC": {
"name": "Private consumption",
"period": "Quarterly",
"description": "All purchases made by consumers (chain-linked series).",
},
"PUC": {
"name": "Public consumption",
"period": "Quarterly",
"description": "All purchases made by the government (chain-linked series)",
},
"GFCF": {
"name": "Gross fixed capital formation",
"period": "Quarterly",
"description": "The acquisition of produced assets (chain-linked series).",
},
"EXP": {
"name": "Exports of goods and services",
"period": "Quarterly",
"description": "Transactions in goods and services from residents to non-residents (chain-linked series)",
},
"IMP": {
"name": "Imports of goods and services",
"period": "Quarterly",
"description": "Transactions in goods and services to residents from non-residents (chain-linked series)",
},
"CPI": {
"name": "Consumer price index",
"period": "Monthly",
"description": "Purchasing power defined with base 2015 for Europe with varying bases for others. See: "
"https://www.econdb.com/main-indicators",
},
"PPI": {
"name": "Producer price index",
"period": "Monthly",
"description": "Change in selling prices with base 2015 for Europe with varying bases for others. See: "
"https://www.econdb.com/main-indicators",
},
"CORE": {
"name": "Core consumer price index",
"period": "Monthly",
"description": "Purchasing power excluding food and energy defined with base 2015 for Europe with varying "
"bases for others. See: https://www.econdb.com/main-indicators",
},
"URATE": {
"name": "Unemployment",
"period": "Monthly",
"description": "Monthly average % of the working-age population that is unemployed.",
},
"EMP": {
"name": "Employment",
"period": "Quarterly",
"description": "The employed population within a country (in thousands).",
},
"ACOIO": {
"name": "Active population",
"period": "Quarterly",
"description": "The active population, unemployed and employed, in thousands.",
},
"EMRATIO": {
"name": "Employment to working age population",
"period": "Quarterly",
"description": "Unlike the unemployment rate, the employment-to-population ratio includes unemployed "
"people not looking for jobs.",
},
"RETA": {
"name": "Retail trade",
"period": "Monthly",
"description": "Turnover of sales in wholesale and retail trade",
},
"CONF": {
"name": "Consumer confidence index",
"period": "Monthly",
"description": "Measures how optimistic or pessimistic consumers are regarding their expected financial "
"situation.",
},
"IP": {
"name": "Industrial production",
"period": "Monthly",
"description": "Measures monthly changes in the price-adjusted output of industry.",
},
"CP": {
"name": "Construction production",
"period": "Monthly",
"description": "Measures monthly changes in the price-adjusted output of construction.",
},
"GBAL": {
"name": "Government balance",
"period": "Quarterly",
"description": "The government balance (or EMU balance) is the overall difference between government "
"revenues and spending.",
},
"GREV": {
"name": "General government total revenue",
"period": "Quarterly",
"description": "The total amount of revenues collected by governments is determined by past and "
"current political decisions.",
},
"GSPE": {
"name": "General government total expenditure",
"period": "Quarterly",
"description": "Total expenditure consists of total expense and the net acquisition of "
"non-financial assets. ",
},
"GDEBT": {
"name": "Government debt",
"period": "Quarterly",
"description": "The financial liabilities of the government.",
},
"CA": {
"name": "Current account balance",
"period": "Monthly",
"description": "A record of a country's international transactions with the rest of the world",
},
"TB": {
"name": "Trade balance",
"period": "Monthly",
"description": "The difference between the monetary value of a nation's exports and imports over a "
"certain time period.",
},
"NIIP": {
"name": "Net international investment position",
"period": "Quarterly",
"description": "Measures the gap between a nation's stock of foreign assets and a foreigner's stock "
"of that nation's assets",
},
"IIPA": {
"name": "Net international investment position (Assets)",
"period": "Quarterly",
"description": "A nation's stock of foreign assets.",
},
"IIPL": {
"name": "Net international investment position (Liabilities)",
"period": "Quarterly",
"description": "A foreigner's stock of the nation's assets.",
},
"Y10YD": {
"name": "Long term yield (10-year)",
"period": "Monthly",
"description": "The 10-year yield is used as a proxy for mortgage rates. It's also seen as a "
"sign of investor sentiment about the country's economy.",
},
"M3YD": {
"name": "3 month yield",
"period": "Monthly",
"description": "The yield received for investing in a government issued treasury security "
"that has a maturity of 3 months",
},
"HOU": {
"name": "House price index",
"period": "Monthly",
"description": "House price index defined with base 2015 for Europe with varying "
"bases for others. See: https://www.econdb.com/main-indicators",
},
"OILPROD": {
"name": "Oil production",
"period": "Monthly",
"description": "The amount of oil barrels produced per day in a month within a country.",
},
"POP": {
"name": "Population",
"period": "Monthly",
"description": "The population of a country. This can be in thousands or, "
"when relatively small, in actual units.",
},
}
TRANSFORM = {
"": "No transformation",
"TPOP": "total percentage change on period",
"TOYA": "total percentage since 1 year ago",
"TUSD": "level USD",
"TPGP": "Percentage of GDP",
"TNOR": "Start = 100",
}
SCALES = {
"Thousands": 1_000,
"Tens of thousands": 10_000,
"Millions": 1_000_000,
"Hundreds of millions": 100_000_000,
"Billions": 1_000_000_000,
"Units": 1,
}
TREASURIES: Dict = {
"frequencies": {
"annually": 203,
"monthly": 129,
"weekly": 21,
"daily": 9,
},
"instruments": {
"nominal": {
"identifier": "TCMNOM",
"maturities": {
"1m": "1-month",
"3m": "3-month",
"6m": "6-month",
"1y": "1-year",
"2y": "2-year",
"3y": "3-year",
"5y": "5-year",
"7y": "7-year",
"10y": "10-year",
"20y": "20-year",
"30y": "30-year",
},
},
"inflation": {
"identifier": "TCMII",
"maturities": {
"5y": "5-year",
"7y": "7-year",
"10y": "10-year",
"20y": "20-year",
"30y": "30-year",
},
},
"average": {
"identifier": "LTAVG",
"maturities": {
"Longer than 10-year": "Longer than 10-year",
},
},
"secondary": {
"identifier": "TB",
"maturities": {
"4w": "4-week",
"3m": "3-month",
"6m": "6-month",
"1y": "1-year",
},
},
},
}
@log_start_end(log=logger)
def get_macro_data(
parameter: str,
country: str,
transform: str = "",
start_date: str = "1900-01-01",
end_date: Optional[str] = None,
symbol: str = "",
) -> Tuple[pd.Series, Union[str, Any]]:
"""Query the EconDB database to find specific macro data about a company [Source: EconDB]
Parameters
----------
parameter: str
The type of data you wish to display
country : str
the selected country
transform : str
select data transformation from:
'' - no transformation
'TPOP' - total percentage change on period,
'TOYA' - total percentage since 1 year ago,
'TUSD' - level USD,
'TPGP' - Percentage of GDP,
'TNOR' - Start = 100
start_date : str
The starting date, format "YEAR-MONTH-DAY", i.e. 2010-12-31.
end_date : Optional[str]
The end date, format "YEAR-MONTH-DAY", i.e. 2020-06-05.
symbol : str
In what currency you wish to convert all values.
Returns
-------
Tuple[pd.Series, Union[str, Any]]
A series with the requested macro data of the chosen country,
The units of the macro data, e.g. 'Bbl/day" for oil.
"""
if end_date is None:
end_date = datetime.today().strftime("%Y-%m-%d")
df, units = pd.DataFrame(), ""
country = country.replace("_", " ")
country = country.title()
country = country.replace(" ", "_")
parameter = parameter.upper()
if country not in COUNTRY_CODES:
console.print(f"No data available for the country {country}.")
return pd.Series(dtype=float), ""
if parameter not in PARAMETERS:
console.print(f"The parameter {parameter} is not an option for {country}.")
return pd.Series(dtype=float), ""
if transform not in TRANSFORM:
console.print(f"The transform {transform} is not a valid option.")
return pd.Series(dtype=float), ""
country_code = COUNTRY_CODES[country]
country_currency = COUNTRY_CURRENCIES[country]
try:
code = f"{parameter}{country_code}"
if transform:
code += f"~{transform}"
r = requests.get(f"https://www.econdb.com/series/context/?tickers={code}")
res_json = r.json()
if res_json:
data = res_json[0]
scale = data["td"]["scale"]
units = data["td"]["units"]
df = pd.DataFrame(data["dataarray"])
df = df.set_index(pd.to_datetime(df["date"]))[code] * SCALES[scale]
df = df.sort_index().dropna()
# Since a percentage is done through differences, the first value is NaN
if transform in ["TPOP", "TOYA"]:
df = df.iloc[1:]
if not res_json or df.empty:
console.print(
f"No data available for {parameter} ({PARAMETERS[parameter]['name']}) "
f"of country {country.replace('_', ' ')}"
)
return pd.Series(dtype=float), ""
if start_date or end_date:
try:
dt_start = pd.to_datetime(start_date)
dt_end = pd.to_datetime(end_date)
df = df.loc[dt_start:dt_end]
except TypeError:
console.print("[red]Invalid date sent. Format as YYYY-MM-DD[/red]\n")
return pd.DataFrame(), "NA/NA"
if (
symbol
and country_currency != symbol
and units in COUNTRY_CURRENCIES.values()
):
if units in COUNTRY_CURRENCIES.values():
units = symbol
currency_data = yf.download(
f"{country_currency}{symbol}=X",
start=df.index[0],
end=df.index[-1],
progress=False,
)["Adj Close"]
merged_df = pd.merge_asof(
df, currency_data, left_index=True, right_index=True
)
df = merged_df[code] * merged_df["Adj Close"]
if pd.isna(df).any():
df_old_oldest, df_old_newest = df.index[0].date(), df.index[-1].date()
df = df.dropna()
df_new_oldest, df_new_newest = df.index[0].date(), df.index[-1].date()
console.print(
f"Due to missing exchange values, some data was dropped from {parameter} of {country}. "
f"Consider using the native currency if you want to prevent this. \n"
f"OLD: {df_old_oldest} - {df_old_newest}\n"
f"NEW: {df_new_oldest} - {df_new_newest}"
)
except HTTPError:
return console.print(
f"There is no data available for the combination {parameter} and {country}."
)
return df, units
@log_start_end(log=logger)
def get_macro_transform() -> Dict[str, str]:
"""This function returns the available macro transform with detail.
Returns
-------
Dict[str, str]
A dictionary with the available macro transforms.
"""
return TRANSFORM
@log_start_end(log=logger)
def get_macro_parameters() -> Dict[str, Dict[str, str]]:
"""This function returns the available macro parameters with detail.
Returns
-------
Dict[str, Dict[str, str]]
A dictionary with the available macro parameters.
"""
return PARAMETERS
@log_start_end(log=logger)
def get_macro_countries() -> Dict[str, str]:
"""This function returns the available countries and respective currencies.
Returns
-------
Dict[str, str]
A dictionary with the available countries and respective currencies.
"""
return COUNTRY_CURRENCIES
@log_start_end(log=logger)
def get_aggregated_macro_data(
parameters: list = None,
countries: list = None,
transform: str = "",
start_date: str = "1900-01-01",
end_date: Optional[str] = None,
symbol: str = "",
) -> Tuple[pd.DataFrame, Dict[Any, Dict[Any, Any]], str]:
"""This functions groups the data queried from the EconDB database [Source: EconDB]
Parameters
----------
parameters: list
The type of data you wish to download. Available parameters can be accessed through economy.macro_parameters().
countries : list
The selected country or countries. Available countries can be accessed through economy.macro_countries().
transform : str
The selected transform. Available transforms can be accessed through get_macro_transform().
start_date : str
The starting date, format "YEAR-MONTH-DAY", i.e. 2010-12-31.
end_date : Optional[str]
The end date, format "YEAR-MONTH-DAY", i.e. 2020-06-05.
symbol : str
In what currency you wish to convert all values.
Returns
-------
Tuple[pd.DataFrame, Dict[Any, Dict[Any, Any]], str]
A DataFrame with the requested macro data of all chosen countries,
A dictionary containing the units of each country's parameter (e.g. EUR),
A string denomination which can be Trillions, Billions, Millions, Thousands
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> macro_df = openbb.economy.macro()
"""
if end_date is None:
end_date = datetime.today().strftime("%Y-%m-%d")
if parameters is None:
parameters = ["CPI"]
if countries is None:
countries = ["United_States"]
country_data: Dict[Any, Dict[Any, pd.Series]] = {}
units: Dict[Any, Dict[Any, Any]] = {}
for country in countries:
country_data[country] = {}
units[country] = {}
for parameter in parameters:
(
country_data[country][parameter],
units[country][parameter],
) = get_macro_data(
parameter, country, transform, start_date, end_date, symbol
)
if country_data[country][parameter].empty:
del country_data[country][parameter]
del units[country][parameter]
country_data_df = (
pd.DataFrame.from_dict(country_data, orient="index").stack().to_frame()
)
country_data_df = pd.DataFrame(
country_data_df[0].values.tolist(), index=country_data_df.index
).T
(df_rounded, denomination) = transform_by_denomination(country_data_df)
return (
df_rounded,
units,
f" [in {denomination}]" if denomination != "Units" else "",
)
@log_start_end(log=logger)
def get_treasuries(
instruments: list = None,
maturities: list = None,
frequency: str = "monthly",
start_date: str = "1900-01-01",
end_date: Optional[str] = None,
) -> pd.DataFrame:
"""Get U.S. Treasury rates [Source: EconDB]
Parameters
----------
instruments: list
Type(s) of treasuries, nominal, inflation-adjusted (long term average) or secondary market.
Available options can be accessed through economy.treasury_maturities().
maturities : list
Treasury maturities to get. Available options can be accessed through economy.treasury_maturities().
frequency : str
Frequency of the data, this can be annually, monthly, weekly or daily.
start_date : str
Starting date, format "YEAR-MONTH-DAY", i.e. 2010-12-31.
end_date : Optional[str]
End date, format "YEAR-MONTH-DAY", i.e. 2020-06-05.
Returns
-------
treasury_data: pd.Dataframe
Holds data of the selected types and maturities
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.economy.treasury()
"""
if end_date is None:
end_date = datetime.today().strftime("%Y-%m-%d")
if instruments is None:
instruments = ["nominal"]
if maturities is None:
maturities = ["10y"]
treasury_data: Dict[Any, Dict[Any, pd.Series]] = {}
for instrument in instruments:
if instrument not in TREASURIES["instruments"]:
console.print(
f"{instrument} is not an option. Please choose between: "
f"{', '.join(TREASURIES['instruments'].keys())}"
)
else:
instrument_identifier = TREASURIES["instruments"][instrument]["identifier"]
frequency_number = TREASURIES["frequencies"][frequency]
df = web.DataReader(
"&".join(
[
"dataset=FRB_H15",
"v=Instrument",
"h=TIME",
f"instrument=[{instrument_identifier}]",
f"from={start_date}",
f"to={end_date}",
f"freq=[{frequency_number}",
"UNIT=[PERCENT:_PER_YEAR]",
]
),
"econdb",
)
if instrument == "average":
maturities_list = ["Longer than 10-year"]
type_string = "Long-term average"
else:
maturities_list = maturities
type_string = instrument.capitalize()
treasury_data[type_string] = {}
for maturity in maturities_list:
if maturity not in TREASURIES["instruments"][instrument]["maturities"]:
console.print(
f"The maturity {maturity} is not an option for {instrument}. Please choose between "
f"{', '.join(TREASURIES['instruments'][instrument]['maturities'].keys())}"
)
else:
maturity_string = TREASURIES["instruments"][instrument][
"maturities"
][maturity]
for column in df.columns:
# check if type inside the name and maturity inside the maturity string
if (
type_string.lower() in column[2].lower()
and maturity_string in column[3]
):
treasury_data[type_string][maturity_string] = df[
column
].dropna()
break
if maturity_string not in treasury_data[type_string]:
console.print(
f"No data found for the combination {instrument} and {maturity}."
)
df = pd.DataFrame.from_dict(treasury_data, orient="index").stack().to_frame()
df = pd.DataFrame(df[0].values.tolist(), index=df.index).T
df.columns = ["_".join(column) for column in df.columns]
return df
@log_start_end(log=logger)
def get_treasury_maturities() -> pd.DataFrame:
"""Get treasury maturity options [Source: EconDB]
Returns
-------
df: pd.DataFrame
Contains the name of the instruments and a string containing all options.
"""
instrument_maturities = {
instrument: ", ".join(values["maturities"].keys())
for instrument, values in TREASURIES["instruments"].items()
}
df = pd.DataFrame.from_dict(instrument_maturities, orient="index")
df.loc["average"] = "Defined by function"
df.index.name = "Instrument"
df.columns = ["Maturities"]
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/econdb_model.py | 0.601125 | 0.427576 | econdb_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
import textwrap
from typing import Optional, List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import check_api_key
from openbb_terminal.decorators import log_start_end
from openbb_terminal.economy import fred_model
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def format_units(num: int) -> str:
"""Helper to format number into string with K,M,B,T. Number will be in form of 10^n"""
number_zeros = int(np.log10(num))
if number_zeros < 3:
return str(num)
if number_zeros < 6:
return f"{int(num/1000)}K"
if number_zeros < 9:
return f"{int(num/1_000_000)}M"
if number_zeros < 12:
return f"{int(num/1_000_000_000)}B"
if number_zeros < 15:
return f"{int(num/1_000_000_000_000)}T"
return f"10^{number_zeros}"
@log_start_end(log=logger)
@check_api_key(["API_FRED_KEY"])
def notes(search_query: str, limit: int = 10):
"""Display series notes. [Source: FRED]
Parameters
----------
search_query : str
Text query to search on fred series notes database
limit : int
Maximum number of series notes to display
"""
df_search = fred_model.get_series_notes(search_query, limit)
if df_search.empty:
return
print_rich_table(
df_search[["id", "title", "notes"]],
title=f"[bold]Search results for {search_query}[/bold]",
show_index=False,
headers=["Series ID", "Title", "Description"],
)
@log_start_end(log=logger)
@check_api_key(["API_FRED_KEY"])
def display_fred_series(
series_ids: List[str],
start_date: Optional[str] = None,
end_date: Optional[str] = None,
limit: int = 10,
get_data: bool = False,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display (multiple) series from https://fred.stlouisfed.org. [Source: FRED]
Parameters
----------
series_ids : List[str]
FRED Series ID from https://fred.stlouisfed.org. For multiple series use: series1,series2,series3
start_date : Optional[str]
Starting date (YYYY-MM-DD) of data
end_date : Optional[str]
Ending date (YYYY-MM-DD) of data
limit : int
Number of data points to display.
raw : bool
Output only raw data
export : str
Export data to csv,json,xlsx or png,jpg,pdf,svg file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
data, detail = fred_model.get_aggregated_series_data(
series_ids, start_date, end_date
)
if data.empty:
logger.error("No data")
console.print("[red]No data available.[/red]\n")
else:
# Try to get everything onto the same 0-10 scale.
# To do so, think in scientific notation. Divide the data by whatever the E would be
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return None
for s_id, sub_dict in detail.items():
data_to_plot, title = format_data_to_plot(data[s_id], sub_dict)
ax.plot(
data_to_plot.index,
data_to_plot,
label="\n".join(textwrap.wrap(title, 80))
if len(series_ids) < 5
else title,
)
ax.legend(
bbox_to_anchor=(0, 0.40, 1, -0.52),
loc="upper right",
mode="expand",
borderaxespad=0,
prop={"size": 9},
)
ax.set_xlim(data.index[0], data.index[-1])
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
data.index = [x.strftime("%Y-%m-%d") for x in data.index]
if raw:
print_rich_table(
data.tail(limit),
headers=list(data.columns),
show_index=True,
index_name="Date",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"fred",
data,
)
if get_data:
return data, detail
return None
def format_data_to_plot(data: pd.DataFrame, detail: dict) -> Tuple[pd.DataFrame, str]:
"""Helper to format data to plot"""
data_to_plot = data.dropna()
exponent = int(np.log10(data_to_plot.max()))
data_to_plot /= 10**exponent
multiplier = f"x {format_units(10**exponent)}" if exponent > 0 else ""
title = f"{detail['title']} ({detail['units']}) {'['+multiplier+']' if multiplier else ''}"
data_to_plot.index = pd.to_datetime(data_to_plot.index)
return data_to_plot, title
@log_start_end(log=logger)
@check_api_key(["API_FRED_KEY"])
def display_yield_curve(
date: str = "",
external_axes: Optional[List[plt.Axes]] = None,
raw: bool = False,
export: str = "",
):
"""Display yield curve based on US Treasury rates for a specified date.
Parameters
----------
date: str
Date to get curve for. If None, gets most recent date (format yyyy-mm-dd)
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
raw : bool
Output only raw data
export : str
Export data to csv,json,xlsx or png,jpg,pdf,svg file
"""
rates, date_of_yield = fred_model.get_yield_curve(date, True)
if rates.empty:
console.print(f"[red]Yield data not found for {date_of_yield}.[/red]\n")
return
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(rates["Maturity"], rates["Rate"], "-o")
ax.set_xlabel("Maturity")
ax.set_ylabel("Rate (%)")
theme.style_primary_axis(ax)
if external_axes is None:
ax.set_title(f"US Yield Curve for {date_of_yield} ")
theme.visualize_output()
if raw:
print_rich_table(
rates,
headers=list(rates.columns),
show_index=False,
title=f"United States Yield Curve for {date_of_yield}",
floatfmt=".3f",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ycrv",
rates,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/fred_view.py | 0.844985 | 0.392162 | fred_view.py | pypi |
import logging
import os
from openbb_terminal.decorators import log_start_end
from openbb_terminal.decorators import check_api_key
from openbb_terminal.economy import finnhub_model
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_FINNHUB_KEY"])
def economy_calendar_events(country: str, limit: int, impact: str, export: str):
"""Output economy calendar impact events. [Source: Finnhub]
Parameters
----------
country : str
Country from where to get economy calendar impact events
limit : int
Number economy calendar impact events to display
impact : str
Impact of the economy event
export : str
Export dataframe data to csv,json,xlsx file
"""
df_events = finnhub_model.get_economy_calendar_events()
if df_events.empty:
return
df_econ_calendar = df_events[df_events["country"] == country].sort_values(
"time", ascending=True
)
if df_econ_calendar.empty:
console.print(
"No latest economy calendar events found in the specified country\n"
)
return
if impact != "all":
df_econ_calendar = df_econ_calendar[df_econ_calendar["impact"] == impact]
if df_econ_calendar.empty:
console.print(
"No latest economy calendar events found in the specified country with this impact\n"
)
return
df_econ_calendar = df_econ_calendar.fillna("").head(n=limit)
d_econ_calendar_map = {
"actual": "Actual release",
"prev": "Previous release",
"country": "Country",
"unit": "Unit",
"estimate": "Estimate",
"event": "Event",
"impact": "Impact Level",
"time": "Release time",
}
df_econ_calendar = df_econ_calendar[
["time", "event", "impact", "prev", "estimate", "actual", "unit"]
].rename(columns=d_econ_calendar_map)
df_econ_calendar.replace("", float("NaN"), inplace=True)
df_econ_calendar.dropna(how="all", axis=1, inplace=True)
print_rich_table(
df_econ_calendar,
headers=list(df_econ_calendar.columns),
show_index=False,
title="Economy Calendar",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"events",
df_econ_calendar,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/finnhub_view.py | 0.63443 | 0.238606 | finnhub_view.py | pypi |
__docformat__ = "numpy"
import logging
from ast import literal_eval
import webbrowser
from typing import List
import pandas as pd
import requests
from finvizfinance.group import performance, spectrum, valuation
from openbb_terminal.rich_config import console
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
# pylint: disable=unsupported-assignment-operation
logger = logging.getLogger(__name__)
# pylint: disable=unsupported-assignment-operation
GROUPS = {
"sector": "Sector",
"industry": "Industry",
"basic_materials": "Industry (Basic Materials)",
"communication_services": "Industry (Communication Services)",
"consumer_cyclical": "Industry (Consumer Cyclical)",
"consumer_defensive": "Industry (Consumer Defensive)",
"energy": "Industry (Energy)",
"financial": "Industry (Financial)",
"healthcare": "Industry (Healthcare)",
"industrials": "Industry (Industrials)",
"real_Estate": "Industry (Real Estate)",
"technology": "Industry (Technology)",
"utilities": "Industry (Utilities)",
"country": "Country (U.S. listed stocks only)",
"capitalization": "Capitalization",
}
@log_start_end(log=logger)
def get_performance_map(period: str = "1d", map_filter: str = "sp500"):
"""Opens Finviz map website in a browser. [Source: Finviz]
Parameters
----------
period : str
Performance period. Available periods are 1d, 1w, 1m, 3m, 6m, 1y.
scope : str
Map filter. Available map filters are sp500, world, full, etf.
"""
# Conversion from period and type, to fit url requirements
d_period = {"1d": "", "1w": "w1", "1m": "w4", "3m": "w13", "6m": "w26", "1y": "w52"}
d_type = {"sp500": "sec", "world": "geo", "full": "sec_all", "etf": "etf"}
url = f"https://finviz.com/map.ashx?t={d_type[map_filter]}&st={d_period[period]}"
webbrowser.open(url)
@log_start_end(log=logger)
def get_groups() -> List[str]:
"""Get group available"""
return list(GROUPS.keys())
@log_start_end(log=logger)
def get_valuation_data(
group: str = "sector", sortby: str = "Name", ascend: bool = True
) -> pd.DataFrame:
"""Get group (sectors, industry or country) valuation data. [Source: Finviz]
Parameters
----------
group : str
Group by category. Available groups can be accessed through get_groups().
sortby : str
Column to sort by
ascend : bool
Flag to sort in ascending order
Returns
-------
pd.DataFrame
dataframe with valuation/performance data
"""
if group not in GROUPS:
console.print(
f"[red]Group {group} not found. Check available groups through get_groups().[/red]\n"
)
return pd.DataFrame()
try:
group = GROUPS[group]
df_group = valuation.Valuation().screener_view(group=group)
df_group["Market Cap"] = df_group["Market Cap"].apply(
lambda x: float(x.strip("B"))
if x.endswith("B")
else float(x.strip("M")) / 1000
)
df_group.columns = [col.replace(" ", "") for col in df_group.columns]
df_group = df_group.sort_values(by=sortby, ascending=ascend)
df_group["Volume"] = df_group["Volume"] / 1_000_000
df_group = df_group.rename(columns={"Volume": "Volume [1M]"})
df_group.fillna("", inplace=True)
return df_group
except IndexError:
console.print("Data not found.\n")
return pd.DataFrame()
@log_start_end(log=logger)
def get_performance_data(
group: str = "sector", sortby: str = "Name", ascend: bool = True
) -> pd.DataFrame:
"""Get group (sectors, industry or country) performance data. [Source: Finviz]
Parameters
----------
group : str
Group by category. Available groups can be accessed through get_groups().
sortby : str
Column to sort by
ascend : bool
Flag to sort in ascending order
Returns
-------
pd.DataFrame
dataframe with performance data
"""
if group not in GROUPS:
console.print(
f"[red]Group {group} not found. Check available groups through get_groups().[/red]\n"
)
return pd.DataFrame()
try:
group = GROUPS[group]
df_group = performance.Performance().screener_view(group=group)
df_group = df_group.rename(
columns={
"Perf Week": "Week",
"Perf Month": "Month",
"Perf Quart": "3Month",
"Perf Half": "6Month",
"Perf Year": "1Year",
"Perf YTD": "YTD",
"Avg Volume": "AvgVolume",
"Rel Volume": "RelVolume",
}
)
df_group["Week"] = df_group["Week"].apply(lambda x: float(x.strip("%")) / 100)
df_group = df_group.sort_values(by=sortby, ascending=ascend)
df_group["Volume"] = df_group["Volume"] / 1_000_000
df_group["AvgVolume"] = df_group["AvgVolume"] / 1_000_000
df_group = df_group.rename(
columns={"Volume": "Volume [1M]", "AvgVolume": "AvgVolume [1M]"}
)
df_group.fillna("", inplace=True)
return df_group
except IndexError:
console.print("Data not found.\n")
return pd.DataFrame()
@log_start_end(log=logger)
def get_spectrum_data(group: str = "sector"):
"""Get group (sectors, industry or country) valuation/performance data. [Source: Finviz]
Parameters
----------
group : str
Group by category. Available groups can be accessed through get_groups().
"""
if group not in GROUPS:
console.print(
f"[red]Group {group} not found. Check available groups through get_groups().[/red]\n"
)
return
group = GROUPS[group]
spectrum.Spectrum().screener_view(group=group)
@log_start_end(log=logger)
def get_futures(
future_type: str = "Indices", sortby: str = "ticker", ascend: bool = False
) -> pd.DataFrame:
"""Get futures data. [Source: Finviz]
Parameters
----------
future_type : str
From the following: Indices, Energy, Metals, Meats, Grains, Softs, Bonds, Currencies
sortby : str
Column to sort by
ascend : bool
Flag to sort in ascending order
Returns
-------
pd.Dataframe
Indices, Energy, Metals, Meats, Grains, Softs, Bonds, Currencies
"""
source = requests.get(
"https://finviz.com/futures.ashx", headers={"User-Agent": get_user_agent()}
).text
slice_source = source[
source.find("var groups = ") : source.find( # noqa: E203
"\r\n\r\n groups.forEach(function(group) "
)
]
groups = literal_eval(
slice_source[
: slice_source.find("\r\n var tiles = ") - 1
].strip("var groups = ")
)
titles = literal_eval(
slice_source[
slice_source.find("\r\n var tiles = ") : -1 # noqa: E203
].strip("\r\n var tiles = ")
)
d_futures: dict = {}
for future in groups:
d_futures[future["label"]] = []
for ticker in future["contracts"]:
d_futures[future["label"]].append(titles[ticker["ticker"]])
df = pd.DataFrame(d_futures[future_type])
df = df.set_index("label")
df = df.sort_values(by=sortby, ascending=ascend)
df = df[["prevClose", "last", "change"]].fillna("")
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/finviz_model.py | 0.813424 | 0.24147 | finviz_model.py | pypi |
__docformat__ = "numpy"
from typing import List, Optional
import logging
import os
import matplotlib
import matplotlib.pyplot as plt
from openbb_terminal.decorators import check_api_key
from openbb_terminal.config_terminal import theme
from openbb_terminal import config_plot as cfp
from openbb_terminal.decorators import log_start_end
from openbb_terminal.economy import alphavantage_model
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def realtime_performance_sector(
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display Real-Time Performance sector. [Source: AlphaVantage]
Parameters
----------
raw : bool
Output only raw data
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_rtp = alphavantage_model.get_sector_data()
# pylint: disable=E1101
if df_rtp.empty:
return
if raw:
print_rich_table(
df_rtp,
show_index=False,
headers=df_rtp.columns,
title="Real-Time Performance",
)
else:
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
df_rtp.set_index("Sector", inplace=True)
df_rtp = df_rtp.squeeze(axis=1)
colors = [theme.up_color if x > 0 else theme.down_color for x in df_rtp.values]
df_rtp.plot(kind="barh", color=colors, ax=ax)
theme.style_primary_axis(ax)
ax.set_title("Real Time Performance (%) per Sector")
ax.tick_params(axis="x", labelrotation=90)
ax.xaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter("%.2f"))
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"rtps",
df_rtp,
)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_real_gdp(
interval: str = "q",
start_year: int = 2010,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display US GDP from AlphaVantage
Parameters
----------
interval : str
Interval for GDP. Either "a" or "q", by default "q"
start_year : int, optional
Start year for plot, by default 2010
raw : bool, optional
Flag to show raw data, by default False
export : str, optional
Format to export data, by default ""
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
gdp = alphavantage_model.get_real_gdp(interval, start_year)
if gdp.empty:
return
int_string = "Annual" if interval == "a" else "Quarterly"
year_str = str(start_year) if interval == "a" else str(list(gdp["date"])[-1].year)
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(gdp["date"], gdp["GDP"], marker="o")
ax.set_title(f"{int_string} US GDP ($B) from {year_str}")
ax.set_ylabel("US GDP ($B) ")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"gdp",
gdp,
)
if raw:
print_rich_table(
gdp.head(20), headers=["Date", "GDP"], show_index=False, title="US GDP"
)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_gdp_capita(
start_year: int = 2010,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display US GDP per Capita from AlphaVantage
Parameters
----------
start_year : int, optional
Start year for plot, by default 2010
raw : bool, optional
Flag to show raw data, by default False
export : str, optional
Format to export data, by default
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
gdp = alphavantage_model.get_gdp_capita(start_year)
if gdp.empty:
console.print("Error getting data. Check API Key")
return
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(gdp["date"], gdp["GDP"], marker="o")
ax.set_title(f"US GDP per Capita (Chained 2012 USD) from {start_year}")
ax.set_ylabel("US GDP (Chained 2012 USD) ")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"gdpc",
gdp,
)
if raw:
print_rich_table(
gdp.head(20),
headers=["Date", "GDP"],
show_index=False,
title="US GDP Per Capita",
)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_inflation(
start_year: int = 2010,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display US Inflation from AlphaVantage
Parameters
----------
start_year : int, optional
Start year for plot, by default 2010
raw : bool, optional
Flag to show raw data, by default False
export : str, optional
Format to export data, by default ""
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
inf = alphavantage_model.get_inflation(start_year)
if inf.empty:
console.print("Error getting data. Check API Key")
return
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(inf["date"], inf["Inflation"], marker="o")
ax.set_title(f"US Inflation from {list(inf['date'])[-1].year}")
ax.set_ylabel("Inflation (%)")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"inf",
inf,
)
if raw:
print_rich_table(
inf.head(20),
headers=["Date", "Inflation"],
show_index=False,
title="US Inflation",
)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_cpi(
interval: str = "m",
start_year: int = 2010,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display US consumer price index (CPI) from AlphaVantage
Parameters
----------
interval : str
Interval for GDP. Either "m" or "s"
start_year : int, optional
Start year for plot, by default 2010
raw : bool, optional
Flag to show raw data, by default False
export : str, optional
Format to export data, by default ""
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
cpi = alphavantage_model.get_cpi(interval, start_year)
if cpi.empty:
console.print("Error getting data. Check API Key")
return
int_string = "Semi-Annual" if interval == "s" else "Monthly"
year_str = str(list(cpi["date"])[-1].year)
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(cpi["date"], cpi["CPI"], marker="o")
ax.set_title(f"{int_string} Consumer Price Index from {year_str}")
ax.set_ylabel("CPI")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cpi",
cpi,
)
if raw:
print_rich_table(
cpi.head(20), headers=["Date", "CPI"], show_index=False, title="US CPI"
)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_treasury_yield(
interval: str = "m",
maturity: str = "10y",
start_date: str = "2010-01-01",
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display historical treasury yield for given maturity
Parameters
----------
interval : str
Interval for data. Can be "d","w","m" for daily, weekly or monthly, by default "m"
maturity : str
Maturity timeline. Can be "3mo","5y","10y" or "30y", by default "10y"
start_date: str
Start date for data. Should be in YYYY-MM-DD format, by default "2010-01-01"
raw : bool, optional
Flag to display raw data, by default False
export : str, optional
Format to export data, by default ""
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
d_maturity = {"3m": "3month", "5y": "5year", "10y": "10year", "30y": "30year"}
yld = alphavantage_model.get_treasury_yield(interval, maturity, start_date)
if yld.empty:
console.print("Error getting data. Check API Key")
return
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(yld["date"], yld["Yield"], marker="o")
ax.set_title(f"{d_maturity[maturity]} Treasury Yield")
ax.set_ylabel("Yield (%)")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"tyld",
yld,
)
if raw:
print_rich_table(
yld.head(20),
headers=["Date", "Yield"],
title="Historical Treasury Yield",
show_index=False,
)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_unemployment(
start_year: int = 2010,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display US unemployment AlphaVantage
Parameters
----------
start_year : int, optional
Start year for plot, by default 2010
raw : bool, optional
Flag to show raw data, by default False
export : str, optional
Format to export data, by default ""
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
un = alphavantage_model.get_unemployment(start_year)
if un.empty:
console.print("Error getting data. Check API Key")
return
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(un["date"], un["unemp"], marker="o")
ax.set_title(f"US Unemployment from {start_year}")
ax.set_ylabel("US Unemployment (%)")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"unemp",
un,
)
if raw:
print_rich_table(
un.head(20),
headers=["Date", "GDP"],
title="US Unemployment",
show_index=False,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/alphavantage_view.py | 0.861989 | 0.322146 | alphavantage_view.py | pypi |
import os
from typing import Optional, List, Dict, Any
from textwrap import fill
import pandas as pd
from matplotlib import pyplot as plt
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.config_terminal import theme
from openbb_terminal.helper_funcs import (
plot_autoscale,
export_data,
print_rich_table,
)
def show_plot(
dataset_yaxis_1,
dataset_yaxis_2,
export,
external_axes: Optional[List[plt.Axes]] = None,
):
"""
The ability to plot any data coming from EconDB, FRED or Yahoo Finance.
Parameters
----------
dataset_yaxis_1: pd.DataFrame
Data you wish to plot on the first y-axis.
dataset_yaxis_2 : pd.DataFrame
Data you wish to plot on the second y-axis.
external_axes: Optional[List[plt.axes]]
External axes to plot on
Returns
-------
Plots the data.
"""
if external_axes is None:
_, ax1 = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax1, ax2 = external_axes
color_palette = theme.get_colors()
ax_1_coloring = 0
ax_2_coloring = -1
dataset_yaxis_1 = dataset_yaxis_1.dropna()
for column in dataset_yaxis_1:
ax1.plot(
dataset_yaxis_1[column],
label=column.replace("_", " "),
color=color_palette[ax_1_coloring],
)
ax_1_coloring += 1
ax1.legend(
[fill(column, 45) for column in dataset_yaxis_1.columns],
bbox_to_anchor=(0, 0.40, 1, -0.52),
loc="upper right",
mode="expand",
borderaxespad=0,
prop={"size": 9},
)
theme.style_primary_axis(ax1)
if not dataset_yaxis_2.empty:
ax2 = ax1.twinx()
dataset_yaxis_2 = dataset_yaxis_2.dropna()
for column in dataset_yaxis_2:
ax2.plot(
dataset_yaxis_2[column],
label=column,
color=color_palette[ax_2_coloring],
)
ax_2_coloring += -1
theme.style_twin_axis(ax2)
ax2.legend(
[fill(column, 45) for column in dataset_yaxis_2.columns],
bbox_to_anchor=(0.55, 0.40, 1, -0.52),
loc="upper left",
mode="expand",
borderaxespad=0,
prop={"size": 9},
)
if external_axes is None:
theme.visualize_output()
if export:
df = pd.concat([dataset_yaxis_1, dataset_yaxis_2], axis=1)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"plot_macro_data",
df,
)
def show_options(
datasets: Dict[Any, pd.DataFrame], raw: str = "", limit: int = 10, export: str = ""
):
"""
The ability to plot any data coming from EconDB, FRED or Yahoo Finance.
Parameters
----------
datasets: Dict[Any, pd.DataFrame]
A dictionary with the format {command: data}.
raw : bool
Whether you wish to show the data available.
limit: int
The amount of rows you wish to show.
export: bool
Whether you want to export the data.
Returns
-------
Plots the data.
"""
if raw or export:
df = pd.DataFrame()
for _, data in datasets.items():
df = pd.concat([df, data], axis=1)
if raw:
print_rich_table(
df.tail(limit),
show_index=True,
headers=list(df.columns),
title="Macro data",
)
if export:
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"dataset",
df,
)
else:
options = {
command: ", ".join(values.keys()) for command, values in datasets.items()
}
print_rich_table(
pd.DataFrame.from_dict(options, orient="index", columns=["Options"]),
show_index=True,
index_name="Command",
title="Options available to plot",
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/plot_view.py | 0.847306 | 0.485356 | plot_view.py | pypi |
__docformat__ = "numpy"
# pylint:disable=too-many-arguments
import logging
import os
from textwrap import fill
from typing import Optional, List
from matplotlib import pyplot as plt
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.economy import econdb_model
from openbb_terminal.helper_funcs import (
plot_autoscale,
print_rich_table,
export_data,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def show_macro_data(
parameters: list = None,
countries: list = None,
transform: str = "",
start_date: str = "1900-01-01",
end_date: Optional[str] = None,
symbol: str = "",
raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
export: str = "",
):
"""Show the received macro data about a company [Source: EconDB]
Parameters
----------
parameters: list
The type of data you wish to display. Available parameters can be accessed through get_macro_parameters().
countries : list
The selected country or countries. Available countries can be accessed through get_macro_countries().
transform : str
select data transformation from:
'' - no transformation
'TPOP' - total percentage change on period,
'TOYA' - total percentage since 1 year ago,
'TUSD' - level USD,
'TPGP' - Percentage of GDP,
'TNOR' - Start = 100
start_date : str
The starting date, format "YEAR-MONTH-DAY", i.e. 2010-12-31.
end_date : Optional[str]
The end date, format "YEAR-MONTH-DAY", i.e. 2020-06-05.
symbol : str
In what currency you wish to convert all values.
raw : bool
Whether to display the raw output.
external_axes: Optional[List[plt.axes]]
External axes to plot on
export : str
Export data to csv,json,xlsx or png,jpg,pdf,svg file
Returns
-------
Plots the Series.
"""
if parameters is None:
parameters = ["CPI"]
if countries is None:
countries = ["United_States"]
df_rounded, units, denomination = econdb_model.get_aggregated_macro_data(
parameters, countries, transform, start_date, end_date, symbol
)
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
legend = []
for column in df_rounded.columns:
parameter_units = f"Units: {units[column[0]][column[1]]}"
country_label = column[0].replace("_", " ")
parameter_label = econdb_model.PARAMETERS[column[1]]["name"]
if len(parameters) > 1 and len(countries) > 1:
ax.plot(df_rounded[column])
ax.set_title(f"Macro data{denomination}", wrap=True, fontsize=12)
legend.append(f"{country_label} [{parameter_label}, {parameter_units}]")
elif len(parameters) > 1:
ax.plot(df_rounded[column])
ax.set_title(f"{country_label}{denomination}", wrap=True, fontsize=12)
legend.append(f"{parameter_label} [{parameter_units}]")
elif len(countries) > 1:
ax.plot(df_rounded[column])
ax.set_title(f"{parameter_label}{denomination}", wrap=True, fontsize=12)
legend.append(f"{country_label} [{parameter_units}]")
else:
ax.plot(df_rounded[column])
ax.set_title(
f"{parameter_label} of {country_label}{denomination} [{parameter_units}]",
wrap=True,
fontsize=12,
)
if len(parameters) > 1 or len(countries) > 1:
ax.legend(
[fill(label.title(), 45) for label in legend],
bbox_to_anchor=(0, 0.40, 1, -0.52),
loc="upper right",
mode="expand",
prop={"size": 9},
ncol=2,
)
df_rounded.columns = ["_".join(column) for column in df_rounded.columns]
if raw:
print_rich_table(
df_rounded.fillna("-").iloc[-10:],
headers=list(df_rounded.columns),
show_index=True,
title=f"Macro Data {denomination}",
)
if export:
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"macro_data",
df_rounded,
)
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
@log_start_end(log=logger)
def show_treasuries(
instruments: list = None,
maturities: list = None,
frequency: str = "monthly",
start_date: str = "1900-01-01",
end_date: Optional[str] = None,
raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
export: str = "",
):
"""Display U.S. Treasury rates [Source: EconDB]
Parameters
----------
instruments: list
Type(s) of treasuries, nominal, inflation-adjusted or secondary market.
Available options can be accessed through economy.treasury_maturities().
maturities : list
Treasury maturities to display. Available options can be accessed through economy.treasury_maturities().
frequency : str
Frequency of the data, this can be daily, weekly, monthly or annually
start_date : str
Starting date, format "YEAR-MONTH-DAY", i.e. 2010-12-31.
end_date : Optional[str]
End date, format "YEAR-MONTH-DAY", i.e. 2020-06-05.
raw : bool
Whether to display the raw output.
external_axes: Optional[List[plt.axes]]
External axes to plot on
export : str
Export data to csv,json,xlsx or png,jpg,pdf,svg file
Returns
-------
Plots the Treasury Series.
"""
if instruments is None:
instruments = ["nominal"]
if maturities is None:
maturities = ["10y"]
treasury_data = econdb_model.get_treasuries(
instruments, maturities, frequency, start_date, end_date
)
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
for col in treasury_data.columns:
col_label = col.split("_")
ax.plot(treasury_data[col], label=f"{col_label[0]} [{col_label[1]}]")
ax.set_title("U.S. Treasuries")
ax.set_ylabel("Yield (%)")
ax.legend(
bbox_to_anchor=(0, 0.40, 1, -0.52),
loc="upper right",
mode="expand",
borderaxespad=0,
prop={"size": 9},
ncol=3,
)
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
if raw:
print_rich_table(
treasury_data.iloc[-10:],
headers=list(treasury_data.columns),
show_index=True,
title="U.S. Treasuries",
)
if export:
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"treasuries_data",
treasury_data,
)
@log_start_end(log=logger)
def show_treasury_maturities():
"""Get treasury maturity options [Source: EconDB]
Returns
-------
A table containing the instruments and maturities.
"""
instrument_maturities = econdb_model.get_treasury_maturities()
print_rich_table(
instrument_maturities,
headers=list(["Maturities"]),
show_index=True,
index_name="Instrument",
title="Maturity options per instrument",
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/econdb_view.py | 0.857082 | 0.397295 | econdb_view.py | pypi |
__docformat__ = "numpy"
# pylint:disable=too-many-arguments
import logging
import os
from typing import Optional, List
from matplotlib import pyplot as plt
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.economy.yfinance_model import (
get_indices,
get_search_indices,
INDICES,
)
from openbb_terminal.helper_funcs import (
plot_autoscale,
print_rich_table,
export_data,
reindex_dates,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def show_indices(
indices: list,
interval: str = "1d",
start_date: int = None,
end_date: int = None,
column: str = "Adj Close",
returns: bool = False,
raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
export: str = "",
):
"""Load (and show) the selected indices over time [Source: Yahoo Finance]
Parameters
----------
indices: list
A list of indices you wish to load (and plot).
Available indices can be accessed through economy.available_indices().
interval: str
Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
Intraday data cannot extend last 60 days
start_date : str
The starting date, format "YEAR-MONTH-DAY", i.e. 2010-12-31.
end_date : str
The end date, format "YEAR-MONTH-DAY", i.e. 2020-06-05.
column : str
Which column to load in, by default this is the Adjusted Close.
returns: bool
Flag to show cumulative returns on index
raw : bool
Whether to display the raw output.
external_axes: Optional[List[plt.axes]]
External axes to plot on
export : str
Export data to csv,json,xlsx or png,jpg,pdf,svg file
Returns
-------
Plots the Series.
"""
indices_data = get_indices(indices, interval, start_date, end_date, column, returns)
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
for index in indices:
if index.lower() in INDICES:
label = INDICES[index.lower()]["name"]
else:
label = index
if not indices_data[index].empty:
if returns:
indices_data.index.name = "date"
data_to_percent = 100 * (indices_data[index].values - 1)
plot_data = reindex_dates(indices_data)
ax.plot(plot_data.index, data_to_percent, label=label)
else:
ax.plot(indices_data.index, indices_data[index], label=label)
ax.set_title("Indices")
if returns:
ax.set_ylabel("Performance (%)")
ax.legend(
bbox_to_anchor=(0, 0.40, 1, -0.52),
loc="upper right",
mode="expand",
borderaxespad=0,
prop={"size": 9},
ncol=2,
)
if returns:
theme.style_primary_axis(
ax,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
ax.set_xlim(plot_data.index[0], plot_data.index[-1])
else:
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
if raw:
print_rich_table(
indices_data.fillna("-").iloc[-10:],
headers=list(indices_data.columns),
show_index=True,
title=f"Indices [column: {column}]",
)
if export:
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"index_data",
indices_data,
)
return indices_data
@log_start_end(log=logger)
def search_indices(query: list, limit: int = 10):
"""Load (and show) the selected indices over time [Source: Yahoo Finance]
Parameters
----------
query: list
The keyword you wish to search for. This can include spaces.
limit: int
The amount of views you want to show, by default this is set to 10.
Returns
-------
Shows a rich table with the available options.
"""
keyword_adjusted, queried_indices = get_search_indices(query, limit)
print_rich_table(
queried_indices,
show_index=True,
index_name="ticker",
headers=queried_indices.columns,
title=f"Queried Indices with keyword {keyword_adjusted}",
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/yfinance_view.py | 0.847274 | 0.354573 | yfinance_view.py | pypi |
__docformat__ = "numpy"
# pylint:disable=too-many-lines,R1710,R0904,C0415,too-many-branches,unnecessary-dict-index-lookup
import argparse
import logging
import os
import itertools
from datetime import date, datetime as dt
from typing import List, Dict, Any
import pandas as pd
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal.decorators import check_api_key
from openbb_terminal import feature_flags as obbff
from openbb_terminal.decorators import log_start_end
from openbb_terminal.economy import (
alphavantage_view,
economy_helpers,
finviz_model,
finviz_view,
nasdaq_model,
nasdaq_view,
wsj_view,
econdb_view,
econdb_model,
fred_view,
fred_model,
yfinance_model,
yfinance_view,
investingcom_model,
plot_view,
commodity_view,
)
from openbb_terminal.helper_funcs import (
EXPORT_BOTH_RAW_DATA_AND_FIGURES,
EXPORT_ONLY_FIGURES_ALLOWED,
EXPORT_ONLY_RAW_DATA_ALLOWED,
print_rich_table,
valid_date,
parse_and_split_input,
list_from_str,
)
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.rich_config import console, MenuText
from openbb_terminal.menu import session
logger = logging.getLogger(__name__)
class EconomyController(BaseController):
"""Economy Controller class"""
CHOICES_COMMANDS = [
"eval",
"overview",
"futures",
"macro",
"fred",
"index",
"treasury",
"plot",
"valuation",
"performance",
"spectrum",
"map",
"rtps",
"bigmac",
"ycrv",
# "spread",
"events",
"edebt",
]
CHOICES_MENUS = [
"qa",
]
wsj_sortby_cols_dict = {c: None for c in ["ticker", "last", "change", "prevClose"]}
map_period_list = ["1d", "1w", "1m", "3m", "6m", "1y"]
map_filter_list = ["sp500", "world", "full", "etf"]
macro_us_interval = [
"annual",
"quarter",
"semiannual",
"monthly",
"weekly",
"daily",
]
macro_us_types = [
"GDP",
"GDPC",
"INF",
"CPI",
"TYLD",
"UNEMP",
"gdp",
"gpdc",
"inf",
"cpi",
"tyld",
"unemp",
]
overview_options = ["indices", "usbonds", "glbonds", "currencies"]
tyld_maturity = ["3m", "5y", "10y", "30y"]
valuation_sort_cols = [
"Name",
"MarketCap",
"P/E",
"FwdP/E",
"PEG",
"P/S",
"P/B",
"P/C",
"P/FCF",
"EPSpast5Y",
"EPSnext5Y",
"Salespast5Y",
"Change",
"Volume",
]
performance_sort_list = [
"Name",
"Week",
"Month",
"3Month",
"6Month",
"1Year",
"YTD",
"Recom",
"AvgVolume",
"RelVolume",
"Change",
"Volume",
]
index_interval = [
"1m",
"2m",
"5m",
"15m",
"30m",
"60m",
"90m",
"1h",
"1d",
"5d",
"1wk",
"1mo",
"3mo",
]
futures_commodities = ["energy", "metals", "meats", "grains", "softs"]
macro_show = ["parameters", "countries", "transform"]
d_GROUPS = finviz_model.GROUPS
PATH = "/economy/"
stored_datasets = ""
FILE_PATH = os.path.join(os.path.dirname(__file__), "README.md")
CHOICES_GENERATION = True
def __init__(self, queue: List[str] = None):
"""Constructor"""
super().__init__(queue)
self.current_series: Dict = dict()
self.fred_query: pd.Series = pd.Series(dtype=float)
self.DATASETS: Dict[Any, pd.DataFrame] = dict()
self.UNITS: Dict[Any, Dict[Any, Any]] = dict()
self.FRED_TITLES: Dict = dict()
self.DATASETS["macro"] = pd.DataFrame()
self.DATASETS["treasury"] = pd.DataFrame()
self.DATASETS["fred"] = pd.DataFrame()
self.DATASETS["index"] = pd.DataFrame()
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
# This is still needed because we can't use choices and nargs separated by comma
choices["treasury"]["--type"] = {
c: {} for c in econdb_model.TREASURIES["instruments"]
}
choices["treasury"]["-t"] = "--type"
choices["macro"]["--parameters"] = {c: {} for c in econdb_model.PARAMETERS}
choices["macro"]["-p"] = "--parameters"
choices["macro"]["--countries"] = {
c: {} for c in econdb_model.COUNTRY_CODES
}
choices["macro"]["-c"] = "--countries"
choices["fred"]["--parameter"] = {c: {} for c in self.fred_query.tolist()}
choices["fred"]["-p"] = "--parameter"
choices["index"]["--indices"] = {c: {} for c in yfinance_model.INDICES}
choices["index"]["-i"] = "--indices"
choices["bigmac"]["--countries"] = {
c: {} for c in nasdaq_model.get_country_codes()["Code"].values
}
choices["bigmac"]["-c"] = "--countries"
self.choices = choices
self.completer = NestedCompleter.from_nested_dict(choices)
def parse_input(self, an_input: str) -> List:
"""Parse controller input
Overrides the parent class function to handle github org/repo path convention.
See `BaseController.parse_input()` for details.
"""
# Filtering out sorting parameters with forward slashes like P/E
sort_filter = r"((\ -s |\ --sortby ).*?(P\/E|Fwd P\/E|P\/S|P\/B|P\/C|P\/FCF)*)"
custom_filters = [sort_filter]
commands = parse_and_split_input(
an_input=an_input, custom_filters=custom_filters
)
return commands
def update_runtime_choices(self):
if session and obbff.USE_PROMPT_TOOLKIT:
if not self.fred_query.empty:
self.choices["fred"]["--parameter"] = {c: None for c in self.fred_query}
if self.DATASETS:
options = [
option
for _, values in self.DATASETS.items()
for option in values.keys()
]
# help users to select multiple timeseries for one axis
economicdata = list()
for L in [1, 2]:
for subset in itertools.combinations(options, L):
economicdata.append(",".join(subset))
if len(subset) > 1:
economicdata.append(",".join(subset[::-1]))
for argument in [
"--y1",
"--y2",
]:
self.choices["plot"][argument] = {
option: None for option in economicdata
}
self.completer = NestedCompleter.from_nested_dict(self.choices)
def print_help(self):
"""Print help"""
mt = MenuText("economy/")
mt.add_cmd("overview")
mt.add_cmd("futures")
mt.add_cmd("map")
mt.add_cmd("bigmac")
mt.add_cmd("ycrv")
# Comment out spread while investpy is donw :()
# mt.add_cmd("spread")
mt.add_cmd("events")
mt.add_cmd("edebt")
mt.add_raw("\n")
mt.add_cmd("rtps")
mt.add_cmd("valuation")
mt.add_cmd("performance")
mt.add_cmd("spectrum")
mt.add_raw("\n")
mt.add_info("_database_")
mt.add_cmd("macro")
mt.add_cmd("treasury")
mt.add_cmd("fred")
mt.add_cmd("index")
mt.add_raw("\n")
mt.add_param("_stored", self.stored_datasets)
mt.add_raw("\n")
mt.add_cmd("eval")
mt.add_cmd("plot")
mt.add_raw("\n")
mt.add_menu("qa")
console.print(text=mt.menu_text, menu="Economy")
@log_start_end(log=logger)
def call_overview(self, other_args: List[str]):
"""Process overview command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="overview",
description="""
Provide a market overview of a variety of options. This can be a general overview,
indices, bonds and currencies. [Source: Wall St. Journal]
""",
)
parser.add_argument(
"-t",
"--type",
dest="type",
help="Obtain either US indices, US Bonds, Global Bonds or Currencies",
type=str,
choices=self.overview_options,
default="",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-t")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if not ns_parser.type:
wsj_view.display_overview(
export=ns_parser.export,
)
elif ns_parser.type == "indices":
wsj_view.display_indices(
export=ns_parser.export,
)
if ns_parser.type == "usbonds":
wsj_view.display_usbonds(
export=ns_parser.export,
)
if ns_parser.type == "glbonds":
wsj_view.display_glbonds(
export=ns_parser.export,
)
if ns_parser.type == "currencies":
wsj_view.display_currencies(
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_futures(self, other_args: List[str]):
"""Process futures command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="futures",
description="Futures/Commodities from Wall St. Journal and FinViz.",
)
parser.add_argument(
"-c",
"--commodity",
dest="commodity",
help="Obtain commodity futures from FinViz",
type=str,
choices=self.futures_commodities,
default="",
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
type=str,
choices=self.wsj_sortby_cols_dict.keys(),
default="ticker",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-c")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if ns_parser.source == "Finviz":
if ns_parser.commodity:
finviz_view.display_future(
future_type=ns_parser.commodity.capitalize(),
sortby=ns_parser.sortby,
ascend=ns_parser.reverse,
export=ns_parser.export,
)
else:
console.print(
"[red]Commodity group must be specified on Finviz.[/red]"
)
elif ns_parser.source == "WallStreetJournal":
if ns_parser.commodity:
console.print("[red]Commodity flag valid with Finviz only.[/red]")
wsj_view.display_futures(
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_map(self, other_args: List[str]):
"""Process map command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="map",
description="""
Performance index stocks map categorized by sectors and industries.
Size represents market cap. Opens web-browser. [Source: Finviz]
""",
)
parser.add_argument(
"-p",
"--period",
action="store",
dest="s_period",
type=str,
default="1d",
choices=self.map_period_list,
help="Performance period.",
)
parser.add_argument(
"-t",
"--type",
action="store",
dest="s_type",
type=str,
default="sp500",
choices=self.map_filter_list,
help="Map filter type.",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
finviz_view.display_performance_map(
period=ns_parser.s_period,
map_filter=ns_parser.s_type,
)
@log_start_end(log=logger)
def call_bigmac(self, other_args: List[str]):
"""Process bigmac command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="bigmac",
description="""
Get historical Big Mac Index [Nasdaq Data Link]
""",
)
parser.add_argument(
"--codes",
help="Flag to show all country codes",
dest="codes",
action="store_true",
default=False,
)
parser.add_argument(
"-c",
"--countries",
help="Country codes to get data for.",
dest="countries",
default="USA",
type=str,
)
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
export_allowed=EXPORT_BOTH_RAW_DATA_AND_FIGURES,
raw=True,
)
if ns_parser:
ns_parser.countries = list_from_str(ns_parser.countries)
if ns_parser.codes:
console.print(
nasdaq_model.get_country_codes().to_string(index=False), "\n"
)
else:
nasdaq_view.display_big_mac_index(
country_codes=ns_parser.countries,
raw=ns_parser.raw,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_macro(self, other_args: List[str]):
"""Process macro command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="macro",
description="Get a broad selection of macro data from one or multiple countries. This includes Gross "
"Domestic Product (RGDP & GDP) and the underlying components, Treasury Yields (Y10YD & M3YD), "
"Employment figures (URATE, EMP, AC0I0 and EMRATIO), Government components (e.g. GBAL & GREV), "
"Consumer and Producer Indices (CPI & PPI) and a variety of other indicators. [Source: EconDB]",
)
parser.add_argument(
"-p",
"--parameters",
type=str,
dest="parameters",
help="Abbreviation(s) of the Macro Economic data",
default="CPI",
)
parser.add_argument(
"-c",
"--countries",
type=str,
dest="countries",
help="The country or countries you wish to show data for",
default="united_states",
)
parser.add_argument(
"-t",
"--transform",
dest="transform",
help="The transformation to apply to the data",
default="",
choices=econdb_model.TRANSFORM,
)
parser.add_argument(
"--show",
dest="show",
help="Show parameters and what they represent using 'parameters'"
" or countries and their currencies using 'countries'",
choices=self.macro_show,
default=None,
)
parser.add_argument(
"-s",
"--start",
dest="start_date",
help="The start date of the data (format: YEAR-MONTH-DAY, i.e. 2010-12-31)",
default=None,
)
parser.add_argument(
"-e",
"--end",
dest="end_date",
help="The end date of the data (format: YEAR-MONTH-DAY, i.e. 2021-06-20)",
default=None,
)
parser.add_argument(
"--convert",
dest="currency",
help="Convert the currency of the chosen country to a specified currency. To find the "
"currency symbols use '--show countries'",
choices=econdb_model.COUNTRY_CURRENCIES,
default=False,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-p")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED, raw=True
)
if ns_parser:
parameters = list_from_str(ns_parser.parameters.upper())
countries = list_from_str(ns_parser.countries.lower())
if ns_parser.show:
if ns_parser.show == "parameters":
print_rich_table(
pd.DataFrame.from_dict(econdb_model.PARAMETERS, orient="index"),
show_index=True,
index_name="Parameter",
headers=["Name", "Period", "Description"],
)
elif ns_parser.show == "countries":
print_rich_table(
pd.DataFrame(econdb_model.COUNTRY_CURRENCIES.items()),
show_index=False,
headers=["Country", "Currency"],
)
elif ns_parser.show == "transform":
print_rich_table(
pd.DataFrame(econdb_model.TRANSFORM.items()),
show_index=False,
headers=["Code", "Transform"],
)
return self.queue
if ns_parser.parameters and ns_parser.countries:
# Store data
(df, units, _) = econdb_model.get_aggregated_macro_data(
parameters=parameters,
countries=countries,
transform=ns_parser.transform,
start_date=ns_parser.start_date,
end_date=ns_parser.end_date,
symbol=ns_parser.currency,
)
if not df.empty:
df.columns = ["_".join(column) for column in df.columns]
if ns_parser.transform:
df.columns = [df.columns[0] + f"_{ns_parser.transform}"]
self.DATASETS["macro"] = pd.concat([self.DATASETS["macro"], df])
# update units dict
for country, data in units.items():
if country not in self.UNITS:
self.UNITS[country] = {}
for key, value in data.items():
self.UNITS[country][key] = value
self.stored_datasets = (
economy_helpers.update_stored_datasets_string(self.DATASETS)
)
# Display data just loaded
econdb_view.show_macro_data(
parameters=parameters,
countries=countries,
transform=ns_parser.transform,
start_date=ns_parser.start_date,
end_date=ns_parser.end_date,
symbol=ns_parser.currency,
raw=ns_parser.raw,
export=ns_parser.export,
)
self.update_runtime_choices()
if obbff.ENABLE_EXIT_AUTO_HELP:
self.print_help()
@check_api_key(["API_FRED_KEY"])
def call_fred(self, other_args: List[str]):
"""Process fred command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="fred",
description="Query the FRED database and plot data based on the Series ID. [Source: FRED]",
)
parser.add_argument(
"-p",
"--parameter",
type=str,
dest="parameter",
default="",
help="Series ID of the Macro Economic data from FRED",
)
parser.add_argument(
"-s",
"--start",
dest="start_date",
type=valid_date,
help="Starting date (YYYY-MM-DD) of data",
default=None,
)
parser.add_argument(
"-e",
"--end",
dest="end_date",
type=valid_date,
help="Ending date (YYYY-MM-DD) of data",
default=None,
)
parser.add_argument(
"-q",
"--query",
type=str,
action="store",
nargs="+",
dest="query",
help="Query the FRED database to obtain Series IDs given the query search term.",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-p")
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
export_allowed=EXPORT_BOTH_RAW_DATA_AND_FIGURES,
raw=True,
limit=100,
)
if ns_parser:
parameters = list_from_str(ns_parser.parameter.upper())
if ns_parser.query:
query = " ".join(ns_parser.query)
df_search = fred_model.get_series_notes(search_query=query)
if not df_search.empty:
fred_view.notes(search_query=query, limit=ns_parser.limit)
self.fred_query = df_search["id"].head(ns_parser.limit)
self.update_runtime_choices()
if parameters:
console.print(
"\nWarning: -p/--parameter is ignored when using -q/--query."
)
return self.queue
if parameters:
series_dict = {}
for series in parameters:
information = fred_model.check_series_id(series)
if "seriess" in information:
series_dict[series] = {
"title": information["seriess"][0]["title"],
"units": information["seriess"][0]["units_short"],
}
self.current_series = {series: series_dict[series]}
if not series_dict:
return self.queue
df, detail = fred_view.display_fred_series(
series_ids=parameters,
start_date=ns_parser.start_date,
end_date=ns_parser.end_date,
limit=ns_parser.limit,
raw=ns_parser.raw,
export=ns_parser.export,
get_data=True,
)
if not df.empty:
for series_id, data in detail.items():
self.FRED_TITLES[
series_id
] = f"{data['title']} ({data['units']})"
# Making data available at the class level
self.DATASETS["fred"][series_id] = df[series_id]
self.stored_datasets = (
economy_helpers.update_stored_datasets_string(self.DATASETS)
)
self.update_runtime_choices()
if obbff.ENABLE_EXIT_AUTO_HELP:
self.print_help()
else:
console.print("[red]No data found for the given Series ID[/red]")
elif not parameters and ns_parser.raw:
console.print(
"Warning: -r/--raw should be combined with -p/--parameter."
)
@log_start_end(log=logger)
def call_index(self, other_args: List[str]):
"""Process index command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="index",
description="Obtain any set of indices and plot them together. With the -si argument the major indices are "
"shown. By using the arguments (for example 'nasdaq' and 'sp500') you can collect data and "
"plot the graphs together. [Source: Yahoo finance / FinanceDatabase]",
)
parser.add_argument(
"-i",
"--indices",
type=str,
dest="indices",
help="One or multiple indices",
)
parser.add_argument(
"--show",
dest="show_indices",
help="Show the major indices, their arguments and ticker",
action="store_true",
default=False,
)
parser.add_argument(
"--interval",
type=str,
dest="interval",
help="The preferred interval data is shown at. This can be 1m, 2m, 5m, 15m, 30m, 60m, 90m, 1h, "
"1d, 5d, 1wk, 1mo or 3mo",
choices=self.index_interval,
default="1d",
)
parser.add_argument(
"-s",
"--start",
dest="start_date",
help="The start date of the data (format: YEAR-MONTH-DAY, i.e. 2010-12-31)",
default="2000-01-01",
)
parser.add_argument(
"-e",
"--end",
dest="end_date",
help="The end date of the data (format: YEAR-MONTH-DAY, i.e. 2021-06-20)",
default=None,
)
parser.add_argument(
"-c",
"--column",
type=str,
dest="column",
help="The column you wish to load in, by default this is the Adjusted Close column",
default="Adj Close",
)
parser.add_argument(
"-q",
"--query",
type=str,
dest="query",
help="Search for indices with given keyword",
)
parser.add_argument(
"-r",
"--returns",
help="Flag to show compounded returns over interval.",
dest="returns",
action="store_true",
default=False,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-i")
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED,
raw=True,
limit=10,
)
if ns_parser:
indices = list_from_str(ns_parser.indices)
if ns_parser.query and ns_parser.limit:
yfinance_view.search_indices(ns_parser.query, ns_parser.limit)
return self.queue
if ns_parser.show_indices:
print_rich_table(
pd.DataFrame.from_dict(yfinance_model.INDICES, orient="index"),
show_index=True,
index_name="Argument",
headers=["Name", "Ticker"],
title="Major Indices",
)
return self.queue
if indices:
for i, index in enumerate(indices):
df = yfinance_model.get_index(
index,
interval=ns_parser.interval,
start_date=ns_parser.start_date,
end_date=ns_parser.end_date,
column=ns_parser.column,
)
if not df.empty:
self.DATASETS["index"][index] = df
self.stored_datasets = (
economy_helpers.update_stored_datasets_string(self.DATASETS)
)
# display only once in the last iteration
if i == len(indices) - 1:
yfinance_view.show_indices(
indices=indices,
interval=ns_parser.interval,
start_date=ns_parser.start_date,
end_date=ns_parser.end_date,
column=ns_parser.column,
raw=ns_parser.raw,
export=ns_parser.export,
returns=ns_parser.returns,
)
self.update_runtime_choices()
if obbff.ENABLE_EXIT_AUTO_HELP:
self.print_help()
@log_start_end(log=logger)
def call_treasury(self, other_args: List[str]):
"""Process treasury command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="treasury",
description="Obtain any set of U.S. treasuries and plot them together. These can be a range of maturities "
"for nominal, inflation-adjusted (on long term average of inflation adjusted) and secondary "
"markets over a lengthy period. Note: 3-month and 10-year treasury yields for other countries "
"are available via the command 'macro' and parameter 'M3YD' and 'Y10YD'. [Source: EconDB / FED]",
)
parser.add_argument(
"-m",
"--maturity",
type=str,
dest="maturity",
help="The preferred maturity which is dependent on the type of the treasury",
default="10y",
)
parser.add_argument(
"--show",
dest="show_maturities",
help="Show the maturities available for every instrument.",
action="store_true",
default=False,
)
parser.add_argument(
"--freq",
type=str,
dest="frequency",
choices=econdb_model.TREASURIES["frequencies"],
help="The frequency, this can be annually, monthly, weekly or daily",
default="monthly",
)
parser.add_argument(
"-t",
"--type",
type=str,
dest="type",
help="Choose from: nominal, inflation, average, secondary",
default="nominal",
)
parser.add_argument(
"-s",
"--start",
dest="start_date",
help="The start date of the data (format: YEAR-MONTH-DAY, i.e. 2010-12-31)",
default="1934-01-31",
)
parser.add_argument(
"-e",
"--end",
dest="end_date",
help="The end date of the data (format: YEAR-DAY-MONTH, i.e. 2021-06-02)",
default=date.today(),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-m")
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED,
raw=True,
limit=10,
)
if ns_parser:
maturities = list_from_str(ns_parser.maturity)
types = list_from_str(ns_parser.type)
for item in types:
if item not in econdb_model.TREASURIES["instruments"]:
print(f"{item} is not a valid instrument type.\n")
return self.queue
if ns_parser.show_maturities:
econdb_view.show_treasury_maturities()
return self.queue
if ns_parser.maturity and ns_parser.type:
df = econdb_model.get_treasuries(
instruments=types,
maturities=maturities,
frequency=ns_parser.frequency,
start_date=ns_parser.start_date,
end_date=ns_parser.end_date,
)
if not df.empty:
self.DATASETS["treasury"] = pd.concat(
[
self.DATASETS["treasury"],
df,
]
)
cols = []
for column in self.DATASETS["treasury"].columns:
if isinstance(column, tuple):
cols.append("_".join(column))
else:
cols.append(column)
self.DATASETS["treasury"].columns = cols
self.stored_datasets = (
economy_helpers.update_stored_datasets_string(self.DATASETS)
)
econdb_view.show_treasuries(
instruments=types,
maturities=maturities,
frequency=ns_parser.frequency,
start_date=ns_parser.start_date,
end_date=ns_parser.end_date,
raw=ns_parser.raw,
export=ns_parser.export,
)
self.update_runtime_choices()
if obbff.ENABLE_EXIT_AUTO_HELP:
self.print_help()
@log_start_end(log=logger)
def call_ycrv(self, other_args: List[str]):
"""Process ycrv command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ycrv",
description="Generate country yield curve. The yield curve shows the bond rates"
" at different maturities.",
)
parser.add_argument(
"-d",
"--date",
type=valid_date,
help="Date to get data from FRED. If not supplied, the most recent entry will be used.",
dest="date",
default=None,
)
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED,
raw=True,
)
if ns_parser:
fred_view.display_yield_curve(
date=ns_parser.date.strftime("%Y-%m-%d") if ns_parser.date else "",
raw=ns_parser.raw,
export=ns_parser.export,
)
# TODO: Add `Investing` to sources again when `investpy` is fixed
@log_start_end(log=logger)
def call_events(self, other_args: List[str]):
"""Process events command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="events",
description="Economic calendar. If no start or end dates, default is the current day high importance events.",
)
parser.add_argument(
"-c",
"--country",
action="store",
dest="country",
choices=[
x.replace(" ", "_") for x in investingcom_model.CALENDAR_COUNTRIES
],
type=str,
default="",
help="Display calendar for specific country.",
)
parser.add_argument(
"-s",
"--start",
dest="start_date",
type=valid_date,
help="The start date of the data (format: YEAR-MONTH-DAY, i.e. 2010-12-31)",
default=dt.now().strftime("%Y-%m-%d"),
)
parser.add_argument(
"-e",
"--end",
dest="end_date",
type=valid_date,
help="The start date of the data (format: YEAR-MONTH-DAY, i.e. 2010-12-31)",
default=dt.now().strftime("%Y-%m-%d"),
)
parser.add_argument(
"-d",
"--date",
dest="spec_date",
type=valid_date,
help="Get a specific date for events. Overrides start and end dates.",
default=None,
)
parser.add_argument(
"-i",
"--importance",
action="store",
dest="importance",
choices=investingcom_model.IMPORTANCES,
help="Event importance classified as high, medium, low or all.",
)
parser.add_argument(
"--categories",
action="store",
dest="category",
choices=investingcom_model.CATEGORIES,
default=None,
help="[INVESTING source only] Event category.",
)
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED,
raw=True,
limit=100,
)
if ns_parser:
if ns_parser.start_date:
start_date = ns_parser.start_date.strftime("%Y-%m-%d")
else:
start_date = None
if ns_parser.end_date:
end_date = ns_parser.end_date.strftime("%Y-%m-%d")
else:
end_date = None
# TODO: Add `Investing` to sources again when `investpy` is fixed
countries = (
ns_parser.country.replace("_", " ").title().split(",")
if ns_parser.country
else []
)
if ns_parser.spec_date:
start_date = ns_parser.spec_date.strftime("%Y-%m-%d")
end_date = ns_parser.spec_date.strftime("%Y-%m-%d")
else:
start_date, end_date = sorted([start_date, end_date])
nasdaq_view.display_economic_calendar(
country=countries,
start_date=start_date,
end_date=end_date,
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_plot(self, other_args: List[str]):
"""Process plot command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="plot",
description="This command can plot any data on two y-axes obtained from the macro, fred, index and "
"treasury commands. To be able to use this data, just load the available series from the previous "
"commands. For example 'macro -p GDP -c Germany Netherlands' will store the data for usage "
"in this command. Therefore, it allows you to plot different time series in one graph. "
"The example above could be plotted the following way: 'plot --y1 Germany_GDP --y2 Netherlands_GDP' "
"or 'plot --y1 Germany_GDP Netherlands_GDP'",
)
parser.add_argument(
"--y1",
type=str,
dest="yaxis1",
help="Select the data you wish to plot on the first y-axis. You can select multiple variables here.",
default="",
)
parser.add_argument(
"--y2",
type=str,
dest="yaxis2",
help="Select the data you wish to plot on the second y-axis. You can select multiple variables here.",
default="",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "--y1")
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED,
limit=10,
)
if ns_parser:
y1s = list_from_str(ns_parser.yaxis1)
y2s = list_from_str(ns_parser.yaxis2)
if not self.DATASETS:
console.print(
"There is no data stored yet. Please use either the 'macro', 'fred', 'index' and/or "
"'treasury' command."
)
else:
dataset_yaxis1 = pd.DataFrame()
dataset_yaxis2 = pd.DataFrame()
if y1s:
for variable in y1s:
for key, data in self.DATASETS.items():
if variable in data.columns:
if key == "macro":
split = variable.split("_")
transform = ""
if (
len(split) == 3
and split[2] in econdb_model.TRANSFORM
):
(
country,
parameter_abbreviation,
transform,
) = split
elif len(split) == 2:
country, parameter_abbreviation = split
else:
country = f"{split[0]} {split[1]}"
parameter_abbreviation = split[2]
parameter = econdb_model.PARAMETERS[
parameter_abbreviation
]["name"]
units = self.UNITS[country.replace(" ", "_")][
parameter_abbreviation
]
if transform:
transformtype = (
f" ({econdb_model.TRANSFORM[transform]}) "
)
else:
transformtype = " "
dataset_yaxis1[
f"{country}{transformtype}[{parameter}, Units: {units}]"
] = data[variable]
elif key == "fred":
compound_detail = self.FRED_TITLES[variable]
detail = {
"units": compound_detail.split("(")[-1].split(
")"
)[0],
"title": compound_detail.split("(")[0].strip(),
}
data_to_plot, title = fred_view.format_data_to_plot(
data[variable], detail
)
dataset_yaxis1[title] = data_to_plot
elif (
key == "index"
and variable in yfinance_model.INDICES
):
dataset_yaxis1[
yfinance_model.INDICES[variable]["name"]
] = data[variable]
elif key == "treasury":
parameter, maturity = variable.split("_")
dataset_yaxis1[f"{parameter} [{maturity}]"] = data[
variable
]
else:
dataset_yaxis1[variable] = data[variable]
break
if dataset_yaxis1.empty:
console.print(
f"[red]Not able to find any data for the --y1 argument. The currently available "
f"options are: {', '.join(self.choices['plot']['--y1'])}[/red]\n"
)
if y2s:
for variable in y2s:
for key, data in self.DATASETS.items():
if variable in data.columns:
if key == "macro":
split = variable.split("_")
transform = ""
if (
len(split) == 3
and split[2] in econdb_model.TRANSFORM
):
(
country,
parameter_abbreviation,
transform,
) = split
elif len(split) == 2:
country, parameter_abbreviation = split
else:
country = f"{split[0]} {split[1]}"
parameter_abbreviation = split[2]
parameter = econdb_model.PARAMETERS[
parameter_abbreviation
]["name"]
units = self.UNITS[country.replace(" ", "_")][
parameter_abbreviation
]
if transform:
transformtype = (
f" ({econdb_model.TRANSFORM[transform]}) "
)
else:
transformtype = " "
dataset_yaxis2[
f"{country}{transformtype}[{parameter}, Units: {units}]"
] = data[variable]
elif key == "fred":
compound_detail = self.FRED_TITLES[variable]
detail = {
"units": compound_detail.split("(")[-1].split(
")"
)[0],
"title": compound_detail.split("(")[0].strip(),
}
data_to_plot, title = fred_view.format_data_to_plot(
data[variable], detail
)
dataset_yaxis2[title] = data_to_plot
elif (
key == "index"
and variable in yfinance_model.INDICES
):
dataset_yaxis2[
yfinance_model.INDICES[variable]["name"]
] = data[variable]
elif key == "treasury":
parameter, maturity = variable.split("_")
dataset_yaxis2[f"{parameter} [{maturity}]"] = data[
variable
]
else:
dataset_yaxis2[variable] = data[variable]
break
if dataset_yaxis2.empty:
console.print(
f"[red]Not able to find any data for the --y2 argument. The currently available "
f"options are: {', '.join(self.choices['plot']['--y2'])}[/red]\n"
)
if y1s or y2s:
plot_view.show_plot(
dataset_yaxis_1=dataset_yaxis1,
dataset_yaxis_2=dataset_yaxis2,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_rtps(self, other_args: List[str]):
"""Process rtps command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="rtps",
description="""
Real-time and historical sector performances calculated from
S&P500 incumbents. Pops plot in terminal. [Source: Alpha Vantage]
""",
)
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
export_allowed=EXPORT_BOTH_RAW_DATA_AND_FIGURES,
raw=True,
)
if ns_parser:
alphavantage_view.realtime_performance_sector(
raw=ns_parser.raw,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_valuation(self, other_args: List[str]):
"""Process valuation command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="valuation",
description="""
View group (sectors, industry or country) valuation data. [Source: Finviz]
""",
)
parser.add_argument(
"-g",
"--group",
type=str,
choices=list(self.d_GROUPS.keys()),
default="sector",
dest="group",
help="Data group (sectors, industry or country)",
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
type=str,
choices=self.valuation_sort_cols,
default="Name",
help="Column to sort by",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-g")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
ns_group = (
" ".join(ns_parser.group)
if isinstance(ns_parser.group, list)
else ns_parser.group
)
finviz_view.display_valuation(
group=ns_group,
sortby=ns_parser.sortby,
ascend=ns_parser.reverse,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_performance(self, other_args: List[str]):
"""Process performance command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="performance",
description="""
View group (sectors, industry or country) performance data. [Source: Finviz]
""",
)
parser.add_argument(
"-g",
"--group",
type=str,
choices=list(self.d_GROUPS.keys()),
default="sector",
dest="group",
help="Data group (sector, industry or country)",
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
choices=self.performance_sort_list,
default="Name",
help="Column to sort by",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-g")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
ns_group = (
" ".join(ns_parser.group)
if isinstance(ns_parser.group, list)
else ns_parser.group
)
finviz_view.display_performance(
group=ns_group,
sortby=ns_parser.sortby,
ascend=ns_parser.reverse,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_edebt(self, other_args: List[str]):
"""Process edebt command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="edebt",
description="""
National debt statistics for various countries. [Source: Wikipedia]
""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED, limit=20
)
if ns_parser:
commodity_view.display_debt(export=ns_parser.export, limit=ns_parser.limit)
@log_start_end(log=logger)
def call_spectrum(self, other_args: List[str]):
"""Process spectrum command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="spectrum",
description="""
View group (sectors, industry or country) spectrum data. [Source: Finviz]
""",
)
parser.add_argument(
"-g",
"--group",
type=str,
choices=list(self.d_GROUPS.keys()),
default="sector",
dest="group",
help="Data group (sector, industry or country)",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-g")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_FIGURES_ALLOWED
)
if ns_parser:
ns_group = (
" ".join(ns_parser.group)
if isinstance(ns_parser.group, list)
else ns_parser.group
)
finviz_view.display_spectrum(group=ns_group)
# # Due to Finviz implementation of Spectrum, we delete the generated spectrum figure
# # after saving it and displaying it to the user
os.remove(self.d_GROUPS[ns_group] + ".jpg")
@log_start_end(log=logger)
def call_eval(self, other_args):
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="eval",
description="""Create custom data column from loaded datasets. Can be mathematical expressions supported
by pandas.eval() function.
Example. If I have loaded `fred DGS2,DGS5` and I want to create a new column that is the difference
between these two, I can create a new column by doing `eval spread = DGS2 - DGS5`.
Notice that the command is case sensitive, i.e., `DGS2` is not the same as `dgs2`.
""",
)
parser.add_argument(
"-q",
"--query",
type=str,
nargs="+",
dest="query",
help="Query to evaluate on loaded datasets",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-q")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
self.DATASETS = economy_helpers.create_new_entry(
self.DATASETS, " ".join(ns_parser.query)
)
self.stored_datasets = economy_helpers.update_stored_datasets_string(
self.DATASETS
)
console.print()
@log_start_end(log=logger)
def call_qa(self, _):
"""Process qa command"""
if not any(True for x in self.DATASETS.values() if not x.empty):
console.print(
"There is no data stored. Please use either the 'macro', 'fred', 'index' and/or "
"'treasury' command in combination with the -st argument to plot data.\n"
)
return
from openbb_terminal.economy.quantitative_analysis.qa_controller import (
QaController,
)
data: Dict = {}
for source, _ in self.DATASETS.items():
if not self.DATASETS[source].empty:
if len(self.DATASETS[source].columns) == 1:
data[self.DATASETS[source].columns[0]] = self.DATASETS[source]
else:
for col in list(self.DATASETS[source].columns):
data[col] = self.DATASETS[source][col].to_frame()
if data:
self.queue = self.load_class(QaController, data, self.queue)
else:
console.print(
"[red]Please load a dataset before moving to the qa menu[/red]\n"
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/economy_controller.py | 0.567697 | 0.185744 | economy_controller.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List, Union
from matplotlib import ticker
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from matplotlib import colors
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
import seaborn as sns
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.economy import investingcom_model
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
register_matplotlib_converters()
COLORS = ["rgb", "binary", "openbb"]
# pylint: disable=unnecessary-lambda-assignment
@log_start_end(log=logger)
def display_spread_matrix(
countries: Union[str, List[str]] = "G7",
maturity: str = "10Y",
change: bool = False,
color: str = "openbb",
raw: bool = False,
external_axes: Optional[List[plt.Axes]] = None,
export: str = "",
):
"""Display spread matrix. [Source: Investing.com]
Parameters
----------
countries: Union[str, List[str]]
Countries or group of countries. List of available countries is accessible through get_ycrv_countries().
maturity: str
Maturity to get data. By default 10Y.
change: bool
Flag to use 1 day change or not. By default False.
color: str
Color theme to use on heatmap, from rgb, binary or openbb By default, openbb.
raw : bool
Output only raw data.
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = investingcom_model.get_spread_matrix(countries, maturity, change)
if not df.empty:
if raw:
pretty_df = df.copy()
# Convert to string spreads
pretty_df[list(pretty_df.columns)[1:]] = pretty_df[
list(pretty_df.columns)[1:]
].applymap(lambda x: f"{x:+.1f}" if x != 0 else "")
# Convert to string yields
pretty_df[list(pretty_df.columns)[0]] = pd.DataFrame(
df[list(pretty_df.columns)[0]]
).applymap(lambda x: f"{x/100:.3f}%" if not change else f"{x:+.1f}")
# Add colors
pretty_df = pretty_df.applymap(
lambda x: f"[{theme.down_color}]{x}[/{theme.down_color}]"
if "-" in x
else f"[{theme.up_color}]{x}[/{theme.up_color}]"
)
if isinstance(countries, str):
title = f"{countries} - Spread Matrix - {maturity}"
else:
title = f"Spread Matrix - {maturity}"
print_rich_table(
pretty_df,
headers=list(pretty_df.columns),
show_index=True,
title=title,
)
else:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
mask = np.zeros((df.shape[0], df.shape[1]), dtype=bool)
mask[np.tril_indices(len(mask))] = True
mask[:, 0] = False
for i in range(df.shape[0]):
mask[i][i + 1] = True
x_labels = list(df.columns)
x_labels[1] = ""
# https://stackoverflow.com/questions/53754012/create-a-gradient-colormap-matplotlib
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
f"trunc({cmap.name},{minval:.2f},{maxval:.2f})",
cmap(np.linspace(minval, maxval, n)),
)
return new_cmap
# This is not a bool so that we can add different colors in future
if color.lower() == "rgb":
cmap = truncate_colormap(plt.get_cmap("brg"), 1, 0.4)
elif color.lower() == "openbb":
cmap = truncate_colormap(plt.get_cmap("magma"), 0.5, 0.1)
else: # binary
cmap = colors.ListedColormap([theme.up_color, theme.down_color])
heatmap = sns.heatmap(
df,
cmap=cmap,
cbar=False,
annot=True,
annot_kws={
"fontsize": 12,
},
center=0,
fmt="+.1f",
linewidths=0.5,
linecolor="black"
if any(substring in theme.mpl_style for substring in ["dark", "boring"])
else "white",
xticklabels=x_labels,
mask=mask,
ax=ax,
)
ax.xaxis.tick_top()
ax.xaxis.set_major_locator(
ticker.FixedLocator([x + 0.25 for x in ax.get_xticks().tolist()])
)
ax.set_xticklabels(x_labels, rotation=45)
ax.set_yticklabels(list(df.index.values), rotation=0)
ax.yaxis.set_label_position("left")
y_labels = list(df.index.values)
y_labels[-1] = ""
ax1 = ax.twinx()
ticks_loc = ax.get_yticks().tolist()
ax1.yaxis.set_major_locator(ticker.FixedLocator(ticks_loc))
ax1.set_yticklabels(y_labels, rotation=0)
ax1.yaxis.set_label_position("right")
ax1.set_ylim(ax.get_ylim())
ax1.grid(False)
ax1.set_frame_on(False)
# Set 3 decimal places for yield and 1 spread
if not change:
spacing = df.shape[1] - 1
k = 0
for index, t in enumerate(heatmap.texts):
current_text = t.get_text()
if index == k:
k += spacing
spacing -= 1
text_transform = (
lambda x: f"{round(float(x)/100, 3)}%"
) # flake8: noqa
t.set_text(text_transform(current_text))
else:
t.set_text(current_text)
if isinstance(countries, str):
ax.set_title(f"{countries} - Spread matrix - {maturity}", loc="center")
else:
ax.set_title(f"Spread matrix - {maturity}", loc="center")
if not external_axes:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "spread", df)
@log_start_end(log=logger)
def display_yieldcurve(
country: str = "United States",
external_axes: Optional[List[plt.Axes]] = None,
raw: bool = False,
export: str = "",
):
"""Display yield curve for specified country. [Source: Investing.com]
Parameters
----------
country: str
Country to display yield curve. List of available countries is accessible through get_ycrv_countries().
export : str
Export dataframe data to csv,json,xlsx file
"""
country = country.title()
df = investingcom_model.get_yieldcurve(country)
if not df.empty:
if external_axes is None:
_, (ax1, ax2) = plt.subplots(
nrows=2,
ncols=1,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
gridspec_kw={"height_ratios": [2, 1]},
)
else:
if len(external_axes) != 2:
logger.error("Expected list of 3 axis items")
console.print("[red]Expected list of 3 axis items.\n[/red]")
return
(ax1, ax2) = external_axes
ax1.plot(
df["Tenor"],
df["Previous"],
linestyle="--",
marker="o",
label="Previous",
)
ax1.plot(df["Tenor"], df["Current"], "-o", label="Current")
ax1.set_ylabel("Yield (%)")
theme.style_primary_axis(ax1)
ax1.yaxis.set_label_position("left")
ax1.yaxis.set_ticks_position("left")
ax1.yaxis.set_major_formatter(FormatStrFormatter("%.2f"))
ax1.legend(
loc="lower right",
prop={"size": 9},
ncol=3,
)
colors_ = [
theme.up_color if x > 0 else theme.down_color for x in df["Change"].values
]
ax2.bar(df["Tenor"], df["Change"], width=1, color=colors_)
ax2.set_ylabel("Change (bps)")
ax2.set_xlabel("Maturity (years)")
theme.style_primary_axis(ax2)
ax2.yaxis.set_label_position("left")
ax2.yaxis.set_ticks_position("left")
if external_axes is None:
ax1.set_title(f"Yield Curve - {country.title()} ")
theme.visualize_output()
if raw:
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title=f"{country.title()} Yield Curve",
floatfmt=".3f",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ycrv",
df,
)
@log_start_end(log=logger)
def display_economic_calendar(
country: str = "all",
importance: str = "",
category: str = "",
start_date: str = "",
end_date: str = "",
limit=100,
export: str = "",
):
"""Display economic calendar. [Source: Investing.com]
Parameters
----------
country: str
Country selected. List of available countries is accessible through get_events_countries().
importances: str
Importance selected from high, medium, low or all.
categories: str
Event category. List of available categories is accessible through get_events_categories().
start_date: datetime.date
First date to get events.
end_date: datetime.date
Last date to get events.
limit: int
The maximum number of events to show, default is 100.
export : str
Export dataframe data to csv,json,xlsx file
"""
df, detail = investingcom_model.get_economic_calendar(
country, importance, category, start_date, end_date, limit
)
if not df.empty:
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title=detail,
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"events",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/investingcom_view.py | 0.838217 | 0.357511 | investingcom_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import matplotlib.pyplot as plt
from openbb_terminal.decorators import check_api_key
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.economy import nasdaq_model
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_economic_calendar(
country: str, start_date: str, end_date: str, limit: int = 10, export: str = ""
) -> None:
"""Display economic calendar for specified country between start and end dates
Parameters
----------
country : str
Country to display calendar for
start_date : str
Start date for calendar
end_date : str
End date for calendar
limit : int
Limit number of rows to display
export : str
Export data to csv or excel file
"""
df = nasdaq_model.get_economic_calendar(country, start_date, end_date)
if df.empty:
return
print_rich_table(
df.head(limit),
title="Economic Calendar",
show_index=False,
headers=df.columns,
)
console.print()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "events", df)
@log_start_end(log=logger)
@check_api_key(["API_KEY_QUANDL"])
def display_big_mac_index(
country_codes: List[str] = None,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display Big Mac Index for given countries
Parameters
----------
country_codes : List[str]
List of country codes (ISO-3 letter country code). Codes available through economy.country_codes().
raw : bool, optional
Flag to display raw data, by default False
export : str, optional
Format data, by default ""
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
big_mac = nasdaq_model.get_big_mac_indices(country_codes)
if not big_mac.empty:
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
big_mac.plot(ax=ax, marker="o")
ax.legend()
ax.set_title("Big Mac Index (USD)")
ax.set_ylabel("Price of Big Mac in USD")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
if raw:
print_rich_table(
big_mac,
headers=list(big_mac.columns),
title="Big Mac Index",
show_index=True,
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "bigmac", big_mac
)
else:
logger.error("Unable to get big mac data")
console.print("[red]Unable to get big mac data[/red]\n") | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/nasdaq_view.py | 0.83957 | 0.316053 | nasdaq_view.py | pypi |
__docformat__ = "numpy"
import logging
import argparse
import datetime
import math
from typing import Dict, List, Tuple, Union
import pandas as pd
import pytz
import investpy
from tqdm import tqdm
from openbb_terminal.decorators import log_start_end
from openbb_terminal import helper_funcs
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
BOND_COUNTRIES = investpy.bonds.get_bond_countries()
CALENDAR_COUNTRIES = list(investpy.utils.constant.COUNTRY_ID_FILTERS.keys()) + ["all"]
CATEGORIES = [
"employment",
"credit",
"balance",
"economic_activity",
"central_banks",
"bonds",
"inflation",
"confidence_index",
]
IMPORTANCES = ["high", "medium", "low", "all"]
# Commented countries either have no data or are not correctly formatted in investpy itself
MATRIX_COUNTRIES = {
"G7": [
"United states",
"Canada",
"Japan",
"Germany",
"France",
"Italy",
"United Kingdom",
],
"PIIGS": [
"Portugal",
"Italy",
"Ireland",
"Greece",
"Spain",
],
"EZ": [
"Austria",
"Belgium",
"Cyprus",
# "Estonia",
"Finland",
"France",
"Germany",
"Greece",
"Ireland",
"Italy",
# "Latvia",
# "Lithuania",
# "Luxembourg",
"Malta",
"Netherlands",
"Portugal",
"Slovakia",
"Slovenia",
"Spain",
],
"AMERICAS": [
"Brazil",
"Canada",
"Chile",
"Colombia",
"Mexico",
"Peru",
"United states",
],
"EUROPE": [
"Austria",
"Belgium",
"Bulgaria",
"Croatia",
"Cyprus",
# "Czech Republic",
"Finland",
"France",
"Germany",
"Greece",
"Hungary",
"Iceland",
"Ireland",
"Italy",
"Malta",
"Netherlands",
"Norway",
"Poland",
"Portugal",
"Romania",
"Russia",
"Serbia",
"Slovakia",
"Slovenia",
"Spain",
"Switzerland",
"Turkey",
# "Ukraine",
"United Kingdom",
],
"ME": [
# "Bahrain",
"Egypt",
"Israel",
"Jordan",
"Qatar",
],
"APAC": [
"Australia",
"Bangladesh",
"China",
# "Hong Kong",
"India",
"Indonesia",
"Japan",
# "Kazakhstan",
"Malaysia",
# "New Zealand",
"Pakistan",
"Philippines",
"Singapore",
# "South Korea",
# "Sri Lanka",
"Taiwan",
"Vietnam",
],
"AFRICA": [
# "Botswana",
"Kenya",
"Mauritius",
"Morocco",
"Namibia",
"Nigeria",
# "South Africa",
"Uganda",
],
}
MATRIX_CHOICES = list(MATRIX_COUNTRIES.keys())
@log_start_end(log=logger)
def check_correct_country(country: str, countries: list) -> bool:
"""Check if country is in list and warn if not."""
if country.lower() not in countries:
joined_countries = [x.replace(" ", "_").lower() for x in countries]
choices = ", ".join(joined_countries)
console.print(
f"[red]'{country}' is an invalid country. Choose from {choices}[/red]\n"
)
return False
return True
@log_start_end(log=logger)
def countries_string_to_list(countries_list: str) -> List[str]:
"""Transform countries string to list if countries valid
Parameters
----------
countries_list : str
String of countries separated by commas
Returns
-------
List[str]
List of countries
"""
valid_countries = [
country.lower().strip()
for country in countries_list.split(",")
if check_correct_country(country.strip(), BOND_COUNTRIES)
]
if valid_countries:
return valid_countries
raise argparse.ArgumentTypeError("No valid countries provided.")
@log_start_end(log=logger)
def create_matrix(dictionary: Dict[str, Dict[str, float]]) -> pd.DataFrame:
"""Create matrix of yield and spreads.
Parameters
----------
dictionary: Dict[str, Dict[str, float]]
Dictionary of yield data by country. E.g. {'10Y': {'United States': 4.009, 'Canada': 3.48}}
Returns
-------
pd.DataFrame
Spread matrix.
"""
maturity = list(dictionary.keys())[0]
d = dictionary[maturity]
countries = list(d.keys())
# Create empty matrix
matrix: List[List[float]] = []
N = len(d)
for i in range(N):
matrix.append([0] * N)
for i, country_i in enumerate(countries):
for j, country_j in enumerate(countries):
matrix[i][j] = round((d[country_i] - d[country_j]) * 100, 1)
matrixdf = pd.DataFrame(matrix)
matrixdf.columns = list(d.keys())
matrixdf = matrixdf.set_index(matrixdf.columns)
matrixdf.insert(
0, "Yield " + maturity, pd.DataFrame.from_dict(d, orient="index") * 100
)
return matrixdf
@log_start_end(log=logger)
def get_spread_matrix(
countries: Union[str, List[str]] = "G7",
maturity: str = "10Y",
change: bool = False,
) -> pd.DataFrame:
"""Get spread matrix. [Source: Investing.com]
Parameters
----------
countries: Union[str, List[str]]
Countries or group of countries. List of available countries is accessible through get_ycrv_countries().
maturity: str
Maturity to get data. By default 10Y.
change: bool
Flag to use 1 day change or not. By default False.
Returns
-------
pd.DataFrame
Spread matrix.
"""
if isinstance(countries, str) and countries.upper() in MATRIX_CHOICES:
countries = MATRIX_COUNTRIES[countries.upper()]
d0: Dict[str, Dict[str, float]] = {maturity: {}}
d1: Dict[str, Dict[str, float]] = {maturity: {}}
no_data_countries = []
for country in tqdm(countries, desc="Downloading"):
country = country.title()
try:
df = investpy.bonds.get_bonds_overview(country)
d0[maturity][country] = df[df["name"].str.contains(maturity)]["last"].iloc[
0
]
d1[maturity][country] = df[df["name"].str.contains(maturity)][
"last_close"
].iloc[0]
except Exception:
no_data_countries.append(country)
if no_data_countries:
s = ", ".join(no_data_countries)
console.print(f"[red]No data for {s}.[/red]")
if change:
return create_matrix(d0) - create_matrix(d1)
return create_matrix(d0)
@log_start_end(log=logger)
def get_ycrv_countries() -> List[str]:
"""Get available countries for ycrv command.
Returns
-------
List[str]
List of available countries.
"""
return BOND_COUNTRIES
@log_start_end(log=logger)
def get_events_countries() -> List[str]:
"""Get available countries for events command.
Returns
-------
List[str]
List of available countries.
"""
return CALENDAR_COUNTRIES
@log_start_end(log=logger)
def get_events_categories() -> List[str]:
"""Get available event categories for events command.
Returns
-------
List[str]
List of available event categories.
"""
return CATEGORIES
@log_start_end(log=logger)
def get_yieldcurve(country: str = "United States") -> pd.DataFrame:
"""Get yield curve for specified country. [Source: Investing.com]
Parameters
----------
country: str
Country to display yield curve. List of available countries is accessible through get_ycrv_countries().
Returns
-------
pd.DataFrame
Country yield curve
"""
if not check_correct_country(country, BOND_COUNTRIES):
return pd.DataFrame()
try:
data = investpy.bonds.get_bonds_overview(country)
except Exception:
console.print(f"[red]Yield curve data not found for {country}.[/red]\n")
return pd.DataFrame()
data.drop(columns=data.columns[0], axis=1, inplace=True)
data.rename(
columns={
"name": "Tenor",
"last": "Current",
"last_close": "Previous",
"high": "High",
"low": "Low",
"change": "Change",
"change_percentage": "% Change",
},
inplace=True,
)
data = data.replace(float("NaN"), "")
data["Change"] = (data["Current"] - data["Previous"]) * 100
for i, row in data.iterrows():
t = row["Tenor"][-3:].strip()
data.at[i, "Tenor"] = t
if t[-1] == "M":
data.at[i, "Tenor"] = int(t[:-1]) / 12
elif t[-1] == "Y":
data.at[i, "Tenor"] = int(t[:-1])
return data
def format_date(date: datetime.date) -> str:
year = str(date.year)
if date.month < 10:
month = "0" + str(date.month)
else:
month = str(date.month)
if date.day < 10:
day = "0" + str(date.day)
else:
day = str(date.day)
return day + "/" + month + "/" + year
@log_start_end(log=logger)
def get_economic_calendar(
country: str = "all",
importance: str = "",
category: str = "",
start_date: str = "",
end_date: str = "",
limit=100,
) -> Tuple[pd.DataFrame, str]:
"""Get economic calendar [Source: Investing.com]
Parameters
----------
country: str
Country selected. List of available countries is accessible through get_events_countries().
importance: str
Importance selected from high, medium, low or all
category: str
Event category. List of available categories is accessible through get_events_categories().
start_date: datetime.date
First date to get events.
end_date: datetime.date
Last date to get events.
Returns
-------
Tuple[pd.DataFrame, str]
Economic calendar Dataframe and detail string about country/time zone.
"""
if not check_correct_country(country, CALENDAR_COUNTRIES):
return pd.DataFrame(), ""
time_filter = "time_only"
countries_list = []
importances_list = []
categories_list = []
if country:
countries_list = [country.lower()]
if importance:
importances_list = [importance.lower()]
if category:
categories_list = [category.title()]
# Joint default for countries and importances
if countries_list == ["all"]:
countries_list = CALENDAR_COUNTRIES[:-1]
if not importances_list:
importances_list = ["high"]
elif importances_list is None:
importances_list = ["all"]
if start_date and not end_date:
end_date_string = (
datetime.datetime.strptime(start_date, "%Y-%m-%d")
+ datetime.timedelta(days=7)
).strftime("%d/%m/%Y")
start_date_string = (
datetime.datetime.strptime(start_date, "%Y-%m-%d")
).strftime("%d/%m/%Y")
elif end_date and not start_date:
start_date_string = (
datetime.datetime.strptime(end_date, "%Y-%m-%d")
+ datetime.timedelta(days=-7)
).strftime("%d/%m/%Y")
end_date_string = (datetime.datetime.strptime(end_date, "%Y-%m-%d")).strftime(
"%d/%m/%Y"
)
elif end_date and start_date:
start_date_string = (
datetime.datetime.strptime(start_date, "%Y-%m-%d")
).strftime("%d/%m/%Y")
end_date_string = (datetime.datetime.strptime(end_date, "%Y-%m-%d")).strftime(
"%d/%m/%Y"
)
else:
start_date_string = None
end_date_string = None
# Get user time zone in GMT offset format
user_time_zone = pytz.timezone(helper_funcs.get_user_timezone())
diff = pd.Timestamp.now(tz=user_time_zone).tz_localize(
None
) - pd.Timestamp.utcnow().tz_localize(None)
# Ceil time difference, might have actual decimal difference
# between .now() and .utcnow()
offset = divmod(math.ceil(diff.total_seconds()), 3600)[0]
sign = "+" if offset > 0 else ""
time_zone = "GMT " + sign + str(int(offset)) + ":00"
args = [
time_filter,
countries_list,
importances_list,
categories_list,
start_date_string,
end_date_string,
]
try:
data = investpy.news.economic_calendar(time_zone, *args)
except Exception:
try:
data = investpy.news.economic_calendar(None, *args)
except Exception:
console.print(
f"[red]Economic calendar data not found for {country}.[/red]\n"
)
return pd.DataFrame(), ""
if data.empty:
logger.error("No data")
console.print("[red]No data.[/red]\n")
return pd.DataFrame(), ""
data.drop(columns=data.columns[0], axis=1, inplace=True)
data.drop_duplicates(keep="first", inplace=True)
data["date"] = data["date"].apply(
lambda date: date[-4:] + "-" + date[3:5] + "-" + date[:2]
)
data.sort_values(by=data.columns[0], inplace=True)
if importances_list:
if importances_list == ["all"]:
importances_list = IMPORTANCES
data = data[data["importance"].isin(importances_list)]
if time_zone is None:
time_zone = "GMT"
console.print("[red]Error on timezone, default was used.[/red]\n")
data.fillna(value="", inplace=True)
data.columns = data.columns.str.title()
if len(countries_list) == 1:
del data["Zone"]
detail = f"{country.title()} economic calendar ({time_zone})"
else:
detail = f"Economic Calendar ({time_zone})"
data["Zone"] = data["Zone"].str.title()
data["Importance"] = data["Importance"].str.title()
data = data[:limit]
return data, detail | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/investingcom_model.py | 0.667581 | 0.397237 | investingcom_model.py | pypi |
__docformat__ = "numpy"
import logging
from datetime import datetime
from typing import Dict, Optional
import pandas as pd
import yfinance as yf
import financedatabase as fd
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
INDICES = {
"sp500": {"name": "S&P 500 Index", "ticker": "^GSPC"},
"sp400": {"name": "S&P 400 Mid Cap Index", "ticker": "^SP400"},
"sp600": {"name": "S&P 600 Small Cap Index", "ticker": "^SP600"},
"sp500tr": {"name": "S&P 500 TR Index", "ticker": "^SP500TR"},
"sp_xsp": {"name": "S&P 500 Mini SPX Options Index", "ticker": "^XSP"},
"nyse_ny": {"name": "NYSE US 100 Index", "ticker": "^NY"},
"dow_djus": {"name": "Dow Jones US Index", "ticker": "^DJUS"},
"nyse": {"name": "NYSE Composite Index", "ticker": "^NYA"},
"amex": {"name": "NYSE-AMEX Composite Index", "ticker": "^XAX"},
"nasdaq": {"name": "Nasdaq Composite Index", "ticker": "^IXIC"},
"nasdaq100": {"name": "NASDAQ 100", "ticker": "^NDX"},
"nasdaq100_ew": {"name": "NASDAQ 100 Equal Weighted Index", "ticker": "^NDXE"},
"nasdaq50": {"name": "NASDAQ Q50 Index", "ticker": "^NXTQ"},
"russell1000": {"name": "Russell 1000 Index", "ticker": "^RUI"},
"russell2000": {"name": "Russell 2000 Index", "ticker": "^RUT"},
"cboe_bxr": {"name": "CBOE Russell 2000 Buy-Write Index", "ticker": "^BXR"},
"cboe_bxrt": {
"name": "CBOE Russell 2000 30-Delta Buy-Write Index",
"ticker": "^BXRT",
},
"russell3000": {"name": "Russell 3000 Index", "ticker": "^RUA"},
"russellvalue": {"name": "Russell 2000 Value Index", "ticker": "^RUJ"},
"russellgrowth": {"name": "Russell 2000 Growth Index", "ticker": "^RUO"},
"w5000": {"name": "Wilshire 5000", "ticker": "^W5000"},
"w5000flt": {"name": "Wilshire 5000 Float Adjusted Index", "ticker": "^W5000FLT"},
"dow_dja": {"name": "Dow Jones Composite Average Index", "ticker": "^DJA"},
"dow_dji": {"name": "Dow Jones Industrial Average Index", "ticker": "^DJI"},
"ca_tsx": {"name": "TSX Composite Index (CAD)", "ticker": "^GSPTSE"},
"ca_banks": {"name": "S&P/TSX Composite Banks Index (CAD)", "ticker": "TXBA.TS"},
"mx_ipc": {"name": "IPC Mexico Index (MXN)", "ticker": "^MXX"},
"arca_mxy": {"name": "NYSE ARCA Mexico Index (USD)", "ticker": "^MXY"},
"br_bvsp": {"name": "IBOVESPA Sao Paulo Brazil Index (BRL)", "ticker": "^BVSP"},
"br_ivbx": {"name": "IVBX2 Indice Valour (BRL)", "ticker": "^IVBX"},
"ar_mervel": {"name": "S&P MERVAL TR Index (USD)", "ticker": "M.BA"},
"eu_fteu1": {"name": "FTSE Eurotop 100 Index (EUR)", "ticker": "^FTEU1"},
"eu_speup": {"name": "S&P Europe 350 Index (EUR)", "ticker": "^SPEUP"},
"eu_n100": {"name": "Euronext 100 Index (EUR)", "ticker": "^N100"},
"ftse100": {"name": "FTSE Global 100 Index (GBP)", "ticker": "^FTSE"},
"ftse250": {"name": "FTSE Global 250 Index (GBP)", "ticker": "^FTMC"},
"ftse350": {"name": "FTSE Global 350 Index (GBP)", "ticker": "^FTLC"},
"ftai": {"name": "FTSE AIM All-Share Global Index (GBP)", "ticker": "^FTAI"},
"uk_ftas": {"name": "UK FTSE All-Share Index (GBP)", "ticker": "^FTAS"},
"uk_spuk": {"name": "S&P United Kingdom Index (PDS)", "ticker": "^SPUK"},
"uk_100": {"name": "CBOE UK 100 Index (GBP)", "ticker": "^BUK100P"},
"ie_iseq": {"name": "ISEQ Irish All Shares Index (EUR)", "ticker": "^ISEQ"},
"nl_aex": {"name": "Euronext Dutch 25 Index (EUR)", "ticker": "^AEX"},
"nl_amx": {"name": "Euronext Dutch Mid Cap Index (EUR)", "ticker": "^AMX"},
"at_atx": {"name": "Wiener Börse Austrian 20 Index (EUR)", "ticker": "^ATX"},
"at_atx5": {"name": "Vienna ATX Five Index (EUR)", "ticker": "^ATX5"},
"at_prime": {"name": "Vienna ATX Prime Index (EUR)", "ticker": "^ATXPRIME"},
"ch_stoxx": {"name": "Zurich STXE 600 PR Index (EUR)", "ticker": "^STOXX"},
"ch_stoxx50e": {"name": "Zurich ESTX 50 PR Index (EUR)", "ticker": "^STOXX50E"},
"se_omx30": {"name": "OMX Stockholm 30 Index (SEK)", "ticker": "^OMX"},
"se_omxspi": {"name": "OMX Stockholm All Share PI (SEK)", "ticker": "^OMXSPI"},
"se_benchmark": {"name": "OMX Stockholm Benchmark GI (SEK)", "ticker": "^OMXSBGI"},
"dk_benchmark": {"name": "OMX Copenhagen Benchmark GI (DKK)", "ticker": "^OMXCBGI"},
"dk_omxc25": {"name": "OMX Copenhagen 25 Index (DKK)", "ticker": "^OMXC25"},
"fi_omxh25": {"name": "OMX Helsinki 25 (EUR)", "ticker": "^OMXH25"},
"de_dax40": {"name": "DAX Performance Index (EUR)", "ticker": "^GDAXI"},
"de_mdax60": {"name": "DAX Mid Cap Performance Index (EUR)", "ticker": "^MDAXI"},
"de_sdax70": {"name": "DAX Small Cap Performance Index (EUR)", "ticker": "^SDAXI"},
"de_tecdax30": {"name": "DAX Tech Sector TR Index (EUR)", "ticker": "^TECDAX"},
"fr_cac40": {"name": "CAC 40 PR Index (EUR)", "ticker": "^FCHI"},
"fr_next20": {"name": "CAC Next 20 Index (EUR)", "ticker": "^CN20"},
"it_mib40": {"name": "FTSE MIB 40 Index (EUR)", "ticker": "FTSEMIB.MI"},
"be_bel20": {"name": "BEL 20 Brussels Index (EUR)", "ticker": "^BFX"},
"pt_bvlg": {
"name": "Lisbon PSI All-Share Index GR (EUR)",
"ticker": "^BVLG",
},
"es_ibex35": {"name": "IBEX 35 - Madrid CATS (EUR)", "ticker": "^IBEX"},
"in_bse": {"name": "S&P Bombay SENSEX (INR)", "ticker": "^BSESN"},
"in_bse500": {
"name": "S&P BSE 500 Index (INR)",
"ticker": "BSE-500.BO",
},
"in_bse200": {
"name": "S&P BSE 200 Index (INR)",
"ticker": "BSE-200.BO",
},
"in_bse100": {
"name": "S&P BSE 100 Index (INR)",
"ticker": "BSE-100.BO",
},
"in_bse_mcap": {
"name": "S&P Bombay Mid Cap Index (INR)",
"ticker": "BSE-MIDCAP.BO",
},
"in_bse_scap": {
"name": "S&P Bombay Small Cap Index (INR)",
"ticker": "BSE-SMLCAP.BO",
},
"in_nse50": {"name": "NSE Nifty 50 Index (INR)", "ticker": "^NSEI"},
"in_nse_mcap": {"name": "NSE Nifty 50 Mid Cap Index (INR)", "ticker": "^NSEMDCP50"},
"in_nse_bank": {
"name": "NSE Nifty Bank Industry Index (INR)",
"ticker": "^NSEBANK",
},
"in_nse500": {"name": "NSE Nifty 500 Index (INR)", "ticker": "^CRSLDX"},
"il_ta125": {"name": "Tel-Aviv 125 Index (ILS)", "ticker": "^TA125.TA"},
"za_shariah": {
"name": "Johannesburg Shariah All Share Index (ZAR)",
"ticker": "^J143.JO",
},
"za_jo": {"name": "Johannesburg All Share Index (ZAR)", "ticker": "^J203.JO"},
"za_jo_mcap": {
"name": "Johannesburg Large and Mid Cap Index (ZAR)",
"ticker": "^J206.JO",
},
"za_jo_altex": {
"name": "Johannesburg Alt Exchange Index (ZAR)",
"ticker": "^J232.JO",
},
"ru_moex": {"name": "MOEX Russia Index (RUB)", "ticker": "IMOEX.ME"},
"au_aord": {"name": "Australia All Ordinary Share Index (AUD)", "ticker": "^AORD"},
"au_small": {"name": "S&P/ASX Small Ordinaries Index (AUD)", "ticker": "^AXSO"},
"au_asx20": {
"name": "S&P/ASX 20 Index (AUD)",
"ticker": "^ATLI",
},
"au_asx50": {
"name": "S&P/ASX 50 Index (AUD)",
"ticker": "^AFLI",
},
"au_asx50_mid": {
"name": "S&P/ASX Mid Cap 50 Index (AUD)",
"ticker": "^AXMD",
},
"au_asx100": {
"name": "S&P/ASX 100 Index (AUD)",
"ticker": "^ATOI",
},
"au_asx200": {"name": "S&P/ASX 200 Index (AUD)", "ticker": "^AXJO"},
"au_asx300": {
"name": "S&P/ASX 300 Index (AUD)",
"ticker": "^AXKO",
},
"au_energy": {
"name": "S&P/ASX 200 Energy Sector Index (AUD)",
"ticker": "^AXEJ",
},
"au_resources": {
"name": "S&P/ASX 200 Resources Sector Index (AUD)",
"ticker": "^AXJR",
},
"au_materials": {
"name": "S&P/ASX 200 Materials Sector Index (AUD)",
"ticker": "^AXMJ",
},
"au_mining": {
"name": "S&P/ASX 300 Metals and Mining Sector Index (AUD)",
"ticker": "^AXMM",
},
"au_industrials": {
"name": "S&P/ASX 200 Industrials Sector Index (AUD)",
"ticker": "^AXNJ",
},
"au_discretionary": {
"name": "S&P/ASX 200 Consumer Discretionary Sector Index (AUD)",
"ticker": "^AXDJ",
},
"au_staples": {
"name": "S&P/ASX 200 Consumer Staples Sector Index (AUD)",
"ticker": "^AXSJ",
},
"au_health": {
"name": "S&P/ASX 200 Health Care Sector Index (AUD)",
"ticker": "^AXHJ",
},
"au_financials": {
"name": "S&P/ASX 200 Financials Sector Index (AUD)",
"ticker": "^AXFJ",
},
"au_reit": {"name": "S&P/ASX 200 A-REIT Industry Index (AUD)", "ticker": "^AXPJ"},
"au_tech": {"name": "S&P/ASX 200 Info Tech Sector Index (AUD)", "ticker": "^AXIJ"},
"au_communications": {
"name": "S&P/ASX 200 Communications Sector Index (AUD)",
"ticker": "^AXTJ",
},
"au_utilities": {
"name": "S&P/ASX 200 Utilities Sector Index (AUD)",
"ticker": "^AXUJ",
},
"nz50": {"name": "S&P New Zealand 50 Index (NZD)", "ticker": "^nz50"},
"nz_small": {"name": "S&P/NZX Small Cap Index (NZD)", "ticker": "^NZSC"},
"kr_kospi": {"name": "KOSPI Composite Index (KRW)", "ticker": "^KS11"},
"jp_arca": {"name": "NYSE ARCA Japan Index (JPY)", "ticker": "^JPN"},
"jp_n225": {"name": "Nikkei 255 Index (JPY)", "ticker": "^N225"},
"jp_n300": {"name": "Nikkei 300 Index (JPY)", "ticker": "^N300"},
"jp_nknr": {"name": "Nikkei Avg Net TR Index (JPY)", "ticker": "^NKVI.OS"},
"jp_nkrc": {"name": "Nikkei Avg Risk Control Index (JPY)", "ticker": "^NKRC.OS"},
"jp_nklv": {"name": "Nikkei Avg Leverage Index (JPY)", "ticker": "^NKLV.OS"},
"jp_nkcc": {"name": "Nikkei Avg Covered Call Index (JPY)", "ticker": "^NKCC.OS"},
"jp_nkhd": {
"name": "Nikkei Avg High Dividend Yield Index (JPY)",
"ticker": "^NKHD.OS",
},
"jp_auto": {
"name": "Nikkei 500 Auto & Auto Parts Index (JPY)",
"ticker": "^NG17.OS",
},
"jp_fintech": {
"name": "Global Fintech Japan Hedged Index (JPY)",
"ticker": "^FDSFTPRJPY",
},
"jp_nkdh": {"name": "Nikkei Average USD Hedge Index (JPY)", "ticker": "^NKDH.OS"},
"jp_nkeh": {"name": "Nikkei Average EUR Hedge Index (JPY)", "ticker": "^NKEH.OS"},
"jp_ndiv": {
"name": "Nikkei Average Double Inverse Index (JPY)",
"ticker": "^NDIV.OS",
},
"cn_csi300": {"name": "China CSI 300 Index (CNY)", "ticker": "000300.SS"},
"cn_sse_comp": {"name": "SSE Composite Index (CNY)", "ticker": "000001.SS"},
"cn_sse_a": {"name": "SSE A Share Index (CNY)", "ticker": "000002.SS"},
"cn_szse_comp": {"name": "SZSE Component Index (CNY)", "ticker": "399001.SZ"},
"cn_szse_a": {"name": "SZSE A-Shares Index (CNY)", "ticker": "399107.SZ"},
"tw_twii": {"name": "TSEC Weighted Index (TWD)", "ticker": "^TWII"},
"tw_tpai": {"name": "TSEC Paper and Pulp Subindex (TWD)", "ticker": "^TPAI"},
"hk_hsi": {"name": "Hang Seng Index (HKD)", "ticker": "^HSI"},
"hk_utilities": {
"name": "Hang Seng Utilities Sector Index (HKD)",
"ticker": "^HSNU",
},
"hk_china": {
"name": "Hang Seng China-Affiliated Corporations Index (HKD)",
"ticker": "^HSCC",
},
"hk_finance": {"name": "Hang Seng Finance Sector Index (HKD)", "ticker": "^HSNF"},
"hk_properties": {
"name": "Hang Seng Properties Sector Index (HKD)",
"ticker": "^HSNP",
},
"hk_hko": {"name": "NYSE ARCA Hong Kong Options Index (USD)", "ticker": "^HKO"},
"id_jkse": {"name": "Jakarta Composite Index (IDR)", "ticker": "^JKSE"},
"id_lq45": {
"name": "Indonesia Stock Exchange LQ45 Index (IDR)",
"ticker": "^JKLQ45",
},
"my_klci": {"name": "FTSE Kuala Lumpur Composite Index (MYR)", "ticker": "^KLSE"},
"ph_psei": {"name": "Philippine Stock Exchange Index (PHP)", "ticker": "PSEI.PS"},
"sg_sti": {"name": "STI Singapore Index (SGD)", "ticker": "^STI"},
"th_set": {"name": "Thailand SET Index (THB)", "ticker": "^SET.BK"},
"sp_energy_ig": {
"name": "S&P 500 Energy (Industry Group) Index",
"ticker": "^SP500-1010",
},
"sp_energy_equipment": {
"name": "S&P 500 Energy Equipment & Services Industry Index",
"ticker": "^SP500-101010",
},
"sp_energy_oil": {
"name": "S&P 500 Oil, Gas & Consumable Fuels Industry Index",
"ticker": "^SP500-101020",
},
"sp_materials_sector": {
"name": "S&P 500 Materials Sector Index",
"ticker": "^SP500-15",
},
"sp_materials_ig": {
"name": "S&P 500 Materials (Industry Group) Index",
"ticker": "^SP500-1510",
},
"sp_materials_construction": {
"name": "S&P 500 Construction Materials Industry Index",
"ticker": "^SP500-151020",
},
"sp_materials_metals": {
"name": "S&P 500 Mining & Metals Industry Index",
"ticker": "^SP500-151040",
},
"sp_industrials_sector": {
"name": "S&P 500 Industrials Sector Index",
"ticker": "^SP500-20",
},
"sp_industrials_goods_ig": {
"name": "S&P 500 Capital Goods (Industry Group) Index",
"ticker": "^SP500-2010",
},
"sp_industrials_aerospace": {
"name": "S&P 500 Aerospace & Defense Industry Index",
"ticker": "^SP500-201010",
},
"sp_industrials_building": {
"name": "S&P 500 Building Products Industry Index",
"ticker": "^SP500-201020",
},
"sp_industrials_construction": {
"name": "S&P 500 Construction & Engineering Industry Index",
"ticker": "^SP500-201030",
},
"sp_industrials_electrical": {
"name": "S&P 500 Electrical Equipment Industry Index",
"ticker": "^SP500-201040",
},
"sp_industrials_conglomerates": {
"name": "S&P 500 Industrial Conglomerates Industry Index",
"ticker": "^SP500-201050",
},
"sp_industrials_machinery": {
"name": "S&P 500 Machinery Industry Index",
"ticker": "^SP500-201060",
},
"sp_industrials_distributors": {
"name": "S&P 500 Trading Companies & Distributors Industry Index",
"ticker": "^SP500-201070",
},
"sp_industrials_services_ig": {
"name": "S&P 500 Commercial & Professional Services (Industry Group) Index",
"ticker": "^SP500-2020",
},
"sp_industrials_services_supplies": {
"name": "S&P 500 Commercial Services & Supplies Industry Index",
"ticker": "^SP500-202010",
},
"sp_industrials_transport_ig": {
"name": "S&P 500 Transportation (Industry Group) Index",
"ticker": "^SP500-2030",
},
"sp_industrials_transport_air": {
"name": "S&P 500 Air Freight & Logistics Industry",
"ticker": "^SP500-203010",
},
"sp_industrials_transport_airlines": {
"name": "S&P 500 Airlines Industry Index",
"ticker": "^SP500-203020",
},
"sp_industrials_transport_ground": {
"name": "S&P 500 Road & Rail Industry Index",
"ticker": "^SP500-203040",
},
"sp_discretionary_sector": {
"name": "S&P 500 Consumer Discretionary Index",
"ticker": "^SP500-25",
},
"sp_discretionary_autos-ig": {
"name": "S&P 500 Automobiles and Components (Industry Group) Index",
"ticker": "^SP500-2510",
},
"sp_discretionary_auto_components": {
"name": "S&P 500 Auto Components Industry Index",
"ticker": "^SP500-251010",
},
"sp_discretionary_autos": {
"name": "S&P 500 Automobiles Industry Index",
"ticker": "^SP500-251020",
},
"sp_discretionary_durables_ig": {
"name": "S&P 500 Consumer Durables & Apparel (Industry Group) Index",
"ticker": "^SP500-2520",
},
"sp_discretionary_durables_household": {
"name": "S&P 500 Household Durables Industry Index",
"ticker": "^SP500-252010",
},
"sp_discretionary_leisure": {
"name": "S&P 500 Leisure Products Industry Index",
"ticker": "^SP500-252020",
},
"sp_discretionary_textiles": {
"name": "S&P 500 Textiles, Apparel & Luxury Goods Industry Index",
"ticker": "^SP500-252030",
},
"sp_discretionary_services_consumer": {
"name": "S&P 500 Consumer Services (Industry Group) Index",
"ticker": "^SP500-2530",
},
"sp_staples_sector": {
"name": "S&P 500 Consumer Staples Sector Index",
"ticker": "^SP500-30",
},
"sp_staples_retail_ig": {
"name": "S&P 500 Food & Staples Retailing (Industry Group) Index",
"ticker": "^SP500-3010",
},
"sp_staples_food_ig": {
"name": "S&P 500 Food Beverage & Tobacco (Industry Group) Index",
"ticker": "^SP500-3020",
},
"sp_staples_beverages": {
"name": "S&P 500 Beverages Industry Index",
"ticker": "^SP500-302010",
},
"sp_staples_products_food": {
"name": "S&P 500 Food Products Industry Index",
"ticker": "^SP500-302020",
},
"sp_staples_tobacco": {
"name": "S&P 500 Tobacco Industry Index",
"ticker": "^SP500-302030",
},
"sp_staples_household_ig": {
"name": "S&P 500 Household & Personal Products (Industry Group) Index",
"ticker": "^SP500-3030",
},
"sp_staples_products_household": {
"name": "S&P 500 Household Products Industry Index",
"ticker": "^SP500-303010",
},
"sp_staples_products_personal": {
"name": "S&P 500 Personal Products Industry Index",
"ticker": "^SP500-303020",
},
"sp_health_sector": {
"name": "S&P 500 Health Care Sector Index",
"ticker": "^SP500-35",
},
"sp_health_equipment": {
"name": "S&P 500 Health Care Equipment & Services (Industry Group) Index",
"ticker": "^SP500-3510",
},
"sp_health_supplies": {
"name": "S&P 500 Health Care Equipment & Supplies Industry Index",
"ticker": "^SP500-351010",
},
"sp_health_providers": {
"name": "S&P 500 Health Care Providers & Services Industry Index",
"ticker": "^SP500-351020",
},
"sp_health_sciences": {
"name": "S&P 500 Pharmaceuticals, Biotechnology & Life Sciences (Industry Group) Index",
"ticker": "^SP500-3520",
},
"sp_health_biotech": {
"name": "S&P 500 Biotechnology Industry Index",
"ticker": "^SP500-352010",
},
"sp_health_pharma": {
"name": "S&P 500 Pharmaceuticals Industry Index",
"ticker": "^SP500-352020",
},
"sp_financials_sector": {
"name": "S&P 500 Financials Sector Index",
"ticker": "^SP500-40",
},
"sp_financials_diversified_ig": {
"name": "S&P 500 Diversified Financials (Industry Group) Index",
"ticker": "^SP500-4020",
},
"sp_financials_services": {
"name": "S&P 500 Diversified Financial Services Industry Index",
"ticker": "^SP500-402010",
},
"sp_financials_consumer": {
"name": "S&P 500 Consumer Finance Industry Index",
"ticker": "^SP500-402020",
},
"sp_financials_capital": {
"name": "S&P 500 Capital Markets Industry Index",
"ticker": "^SP500-402030",
},
"sp_it_sector": {
"name": "S&P 500 IT Sector Index",
"ticker": "^SP500-45",
},
"sp_it_saas_ig": {
"name": "S&P 500 Software and Services (Industry Group) Index",
"ticker": "^SP500-4510",
},
"sp_it_software": {
"name": "S&P 500 Software Industry Index",
"ticker": "^SP500-451030",
},
"sp_it_hardware": {
"name": "S&P 500 Technology Hardware Equipment (Industry Group) Index",
"ticker": "^SP500-4520",
},
"sp_it_semi": {
"name": "S&P 500 Semiconductor & Semiconductor Equipment Industry",
"ticker": "^SP500-453010",
},
"sp_communications_sector": {
"name": "S&P 500 Communications Sector Index",
"ticker": "^SP500-50",
},
"sp_communications_telecom": {
"name": "S&P 500 Diversified Telecommunications Services Industry Index",
"ticker": "^SP500-501010",
},
"sp_utilities_sector": {
"name": "S&P 500 Utilities Sector Index",
"ticker": "^SP500-55",
},
"sp_utilities_electricity": {
"name": "S&P 500 Electric Utilities Index",
"ticker": "^SP500-551010",
},
"sp_utilities_multis": {
"name": "S&P 500 Multi-Utilities Industry Index",
"ticker": "^SP500-551030",
},
"sp_re_sector": {
"name": "S&P 500 Real Estate Sector Index",
"ticker": "^SP500-60",
},
"sp_re_ig": {
"name": "S&P 500 Real Estate (Industry Group) Index",
"ticker": "^SP500-6010",
},
"sphyda": {"name": "S&P High Yield Aristocrats Index", "ticker": "^SPHYDA"},
"dow_djt": {"name": "Dow Jones Transportation Average Index", "ticker": "^DJT"},
"dow_dju": {"name": "Dow Jones Utility Average Index", "ticker": "^DJU"},
"dow_rci": {"name": "Dow Jones Composite All REIT Index", "ticker": "^RCI"},
"reit_fnar": {"name": "FTSE Nareit All Equity REITs Index", "ticker": "^FNAR"},
"nq_ixch": {"name": "NASDAQ Health Care Index", "ticker": "^IXCH"},
"nq_nbi": {"name": "NASDAQ Biotech Index", "ticker": "^NBI"},
"nq_tech": {"name": "NASDAQ 100 Technology Sector Index", "ticker": "^NDXT"},
"nq_ex_tech": {"name": "NASDAQ 100 Ex-Tech Sector Index", "ticker": "^NDXX"},
"nq_ixtc": {"name": "NASDAQ Telecommunications Index", "ticker": "^IXTC"},
"nq_inds": {"name": "NASDAQ Industrial Index", "ticker": "^INDS"},
"nq_ixco": {"name": "NASDAQ Computer Index", "ticker": "^INCO"},
"nq_bank": {"name": "NASDAQ Bank Index", "ticker": "^BANK"},
"nq_bkx": {"name": "KBW NASDAQ Bank Index", "ticker": "^BKX"},
"nq_krx": {"name": "KBW NASDAQ Regional Bank Index", "ticker": "^KRX"},
"nq_kix": {"name": "KBW NASDAQ Insurance Index", "ticker": "^KIX"},
"nq_ksx": {"name": "KBW NASDAQ Capital Markets Index", "ticker": "^KSX"},
"nq_tran": {"name": "NASDAQ Transportation Index", "ticker": "^TRAN"},
"ice_auto": {"name": "ICE FactSet Global NextGen Auto Index", "ticker": "^ICEFSNA"},
"ice_comm": {
"name": "ICE FactSet Global NextGen Communications Index",
"ticker": "^ICEFSNC",
},
"nyse_nyl": {"name": "NYSE World Leaders Index", "ticker": "^NYL"},
"nyse_nyi": {"name": "NYSE International 100 Index", "ticker": "^NYI"},
"nyse_nyy": {"name": "NYSE TMT Index", "ticker": "^NYY"},
"arca_xmi": {"name": "NYSE ARCA Major Market Index", "ticker": "^XMI"},
"arca_xbd": {"name": "NYSE ARCA Securities Broker/Dealer Index", "ticker": "^XBD"},
"arca_xii": {"name": "NYSE ARCA Institutional Index", "ticker": "^XII"},
"arca_xoi": {"name": "NYSE ARCA Oil and Gas Index", "ticker": "^XOI"},
"arca_xng": {"name": "NYSE ARCA Natural Gas Index", "ticker": "^XNG"},
"arca_hui": {"name": "NYSE ARCA Gold Bugs Index", "ticker": "^HUI"},
"arca_ixb": {"name": "NYSE Materials Select Sector Index", "ticker": "^IXB"},
"arca_drg": {"name": "NYSE ARCA Phramaceutical Index", "ticker": "^DRG"},
"arca_btk": {"name": "NYSE ARCA Biotech Index", "ticker": "^BKT"},
"arca_pse": {"name": "NYSE ARCA Tech 100 Index", "ticker": "^PSE"},
"arca_nwx": {"name": "NYSE ARCA Networking Index", "ticker": "^NWX"},
"arca_xci": {"name": "NYSE ARCA Computer Tech Index", "ticker": "^XCI"},
"arca_xal": {"name": "NYSE ARCA Airline Index", "ticker": "^XAL"},
"arca_xtc": {"name": "NYSE ARCA N.A. Telecom Industry Index", "ticker": "^XTC"},
"phlx_sox": {"name": "PHLX Semiconductor Index", "ticker": "^SOX"},
"phlx_xau": {"name": "PHLX Gold/Silver Index", "ticker": "^XAU"},
"phlx_hgx": {"name": "PHLX Housing Sector Index", "ticker": "^HGX"},
"phlx_osx": {"name": "PHLX Oil Services Sector Index", "ticker": "^OSX"},
"phlx_uty": {"name": "PHLX Utility Sector Index", "ticker": "^UTY"},
"w5klcg": {"name": "Wilshire US Large Cap Growth Index", "ticker": "^W5KLCG"},
"w5klcv": {"name": "Wilshire US Large Cap Value Index", "ticker": "^W5KLCV"},
"reit_wgreit": {"name": "Wilshire Global REIT Index", "ticker": "^WGREIT"},
"reit_wgresi": {
"name": "Wilshire Global Real Estate Sector Index",
"ticker": "^WGRESI",
},
"reit_wilreit": {"name": "Wilshire US REIT Index", "ticker": "^WILREIT"},
"reit_wilresi": {
"name": "Wilshire US Real Estate Security Index",
"ticker": "^WILRESI",
},
"cboe_bxm": {"name": "CBOE Buy-Write Monthly Index", "ticker": "^BXM"},
"cboe_vix": {"name": "CBOE Volatility Index", "ticker": "^VIX"},
"cboe_vin": {"name": "CBOE Near-Term VIX Index", "ticker": "^VIN"},
"cboe_vvix": {"name": "CBOE VIX Volatility Index", "ticker": "^VVIX"},
"cboe_shortvol": {"name": "CBOE Short VIX Futures Index", "ticker": "^SHORTVOL"},
"cboe_skew": {"name": "CBOE Skew Index", "ticker": "^SKEW"},
"cboe_vxn": {"name": "CBOE NASDAQ 100 Volatility Index", "ticker": "^VXN"},
"cboe_gvz": {"name": "CBOE Gold Volatility Index", "ticker": "^GVZ"},
"cboe_ovx": {"name": "CBOE Crude Oil Volatility Index", "ticker": "^OVX"},
"cboe_tnx": {"name": "CBOE Interest Rate 10 Year T-Note", "ticker": "^TNX"},
"cboe_tyx": {"name": "CBOE 30 year Treasury Yields", "ticker": "^TYX"},
"cboe_irx": {"name": "CBOE 13 Week Treasury Bill", "ticker": "^IRX"},
"move": {"name": "ICE BofAML Move Index", "ticker": "^MOVE"},
"dxy": {"name": "US Dollar Index", "ticker": "DX-Y.NYB"},
"crypto200": {"name": "CMC Crypto 200 Index by Solacti", "ticker": "^CMC200"},
}
@log_start_end(log=logger)
def get_index(
index: str,
interval: str = "1d",
start_date: Optional[str] = None,
end_date: Optional[str] = None,
column: str = "Adj Close",
) -> pd.Series:
"""Obtain data on any index [Source: Yahoo Finance]
Parameters
----------
index: str
The index you wish to collect data for.
start_date : Optional[str]
the selected country
end_date : Optional[str]
The currency you wish to convert the data to.
interval : str
Valid intervals: 1m, 2m, 5m, 15m, 30m, 60m, 90m, 1h, 1d, 5d, 1wk, 1mo or 3mo
Intraday data cannot extend last 60 days
column : str
The column you wish to select, by default this is Adjusted Close.
Returns
-------
pd.Series
A series with the requested index
"""
if index.lower() in INDICES:
ticker = INDICES[index.lower()]["ticker"]
else:
ticker = index
try:
if start_date:
datetime.strptime(str(start_date), "%Y-%m-%d")
if end_date:
datetime.strptime(str(end_date), "%Y-%m-%d")
except ValueError:
console.print("[red]Please format date as YYYY-MM-DD[/red]\n")
return pd.Series(dtype="object")
index_data = yf.download(
ticker,
start=start_date,
end=end_date,
interval=interval,
progress=False,
show_errors=False,
)
if column not in index_data.columns:
console.print(
f"The chosen column is not available for {ticker}. Please choose "
f"between: {', '.join(index_data.columns)}\n"
)
return pd.Series(dtype="float64")
if index_data.empty or len(index_data) < 2:
console.print(
f"The chosen index {ticker}, returns no data. Please check if "
f"there is any data available.\n"
)
return pd.Series(dtype="float64")
return index_data[column]
@log_start_end(log=logger)
def get_available_indices() -> Dict[str, Dict[str, str]]:
"""Get available indices
Returns
-------
Dict[str, Dict[str, str]]
Dictionary with available indices and respective detail
"""
return INDICES
@log_start_end(log=logger)
def get_indices(
indices: list,
interval: str = "1d",
start_date: int = None,
end_date: int = None,
column: str = "Adj Close",
returns: bool = False,
) -> pd.DataFrame:
"""Get data on selected indices over time [Source: Yahoo Finance]
Parameters
----------
indices: list
A list of indices to get data. Available indices can be accessed through economy.available_indices().
interval: str
Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
Intraday data cannot extend last 60 days
start_date : str
The starting date, format "YEAR-MONTH-DAY", i.e. 2010-12-31.
end_date : str
The end date, format "YEAR-MONTH-DAY", i.e. 2020-06-05.
column : str
Which column to load in, by default "Adjusted Close".
returns: bool
Flag to show cumulative returns on index
Returns
-------
pd.Dataframe
Dataframe with historical data on selected indices.
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.economy.available_indices()
>>> openbb.economy.index(["^GSPC", "sp400"])
"""
indices_data: pd.DataFrame = pd.DataFrame()
for index in indices:
indices_data[index] = get_index(index, interval, start_date, end_date, column)
if returns:
indices_data = indices_data.pct_change().dropna()
indices_data = indices_data + 1
indices_data = indices_data.cumprod()
return indices_data
@log_start_end(log=logger)
def get_search_indices(keyword: list, limit: int = 10) -> pd.DataFrame:
"""Search indices by keyword. [Source: FinanceDatabase]
Parameters
----------
keyword: list
The keyword you wish to search for. This can include spaces.
limit: int
The amount of views you want to show, by default this is set to 10.
Returns
-------
pd.Dataframe
Dataframe with the available options.
"""
if isinstance(keyword, str):
keyword_adjusted = keyword.replace(",", " ")
else:
keyword_adjusted = " ".join(keyword)
indices = fd.select_indices()
queried_indices = pd.DataFrame.from_dict(
fd.search_products(indices, keyword_adjusted, "short_name"), orient="index"
)
queried_indices = queried_indices.iloc[:limit]
return keyword_adjusted, queried_indices | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/yfinance_model.py | 0.613931 | 0.394143 | yfinance_model.py | pypi |
__docformat__ = "numpy"
import os
import logging
import textwrap
from typing import List, Optional, Tuple
from datetime import datetime, timedelta
from requests import HTTPError
import fred
import pandas as pd
import requests
from fredapi import Fred
import certifi
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.helper_funcs import get_user_agent
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_FRED_KEY"])
def check_series_id(series_id: str) -> Tuple[bool, dict]:
"""Checks if series ID exists in fred
Parameters
----------
series_id: str
Series ID to check
Returns
-------
Tuple[bool, Dict]
Boolean if series ID exists,
Dictionary of series information
"""
url = f"https://api.stlouisfed.org/fred/series?series_id={series_id}&api_key={cfg.API_FRED_KEY}&file_type=json"
r = requests.get(url, headers={"User-Agent": get_user_agent()})
# The above returns 200 if series is found
# There seems to be an occasional bug giving a 503 response where the json decoding fails
if r.status_code == 200:
payload = r.json()
elif r.status_code >= 500:
payload = {}
# cover invalid api keys & series does not exist
elif r.status_code == 400:
payload = {}
if "api_key" in r.json()["error_message"]:
console.print("[red]Invalid API Key[/red]\n")
logger.error("[red]Invalid API Key[/red]\n")
elif "The series does not exist" in r.json()["error_message"]:
console.print(f"[red]{series_id} not found.[/red]\n")
logger.error("%s not found", str(series_id))
else:
console.print(r.json()["error_message"])
logger.error(r.json()["error_message"])
return payload
@log_start_end(log=logger)
@check_api_key(["API_FRED_KEY"])
def get_series_notes(search_query: str, limit: int = -1) -> pd.DataFrame:
"""Get series notes. [Source: FRED]
Parameters
----------
search_query : str
Text query to search on fred series notes database
limit : int
Maximum number of series notes to display
Returns
-------
pd.DataFrame
DataFrame of matched series
"""
fred.key(cfg.API_FRED_KEY)
d_series = fred.search(search_query)
df_fred = pd.DataFrame()
if "error_message" in d_series:
if "api_key" in d_series["error_message"]:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(d_series["error_message"])
else:
if "seriess" in d_series:
if d_series["seriess"]:
df_fred = pd.DataFrame(d_series["seriess"])
df_fred["notes"] = df_fred["notes"].fillna("No description provided.")
else:
console.print("No matches found. \n")
else:
console.print("No matches found. \n")
if "notes" in df_fred.columns:
df_fred["notes"] = df_fred["notes"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=100))
if isinstance(x, str)
else x
)
if "title" in df_fred.columns:
df_fred["title"] = df_fred["title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=50))
if isinstance(x, str)
else x
)
if limit != -1:
df_fred = df_fred[:limit]
return df_fred
@log_start_end(log=logger)
@check_api_key(["API_FRED_KEY"])
def get_series_ids(search_query: str, limit: int = -1) -> pd.DataFrame:
"""Get Series IDs. [Source: FRED]
Parameters
----------
search_query : str
Text query to search on fred series notes database
limit : int
Maximum number of series IDs to output
Returns
-------
pd.Dataframe
Dataframe with series IDs and titles
"""
fred.key(cfg.API_FRED_KEY)
d_series = fred.search(search_query)
# Cover invalid api and empty search terms
if "error_message" in d_series:
if "api_key" in d_series["error_message"]:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(d_series["error_message"])
return [], []
if "seriess" not in d_series:
return [], []
if not d_series["seriess"]:
return [], []
df_series = pd.DataFrame(d_series["seriess"])
df_series = df_series.sort_values(by=["popularity"], ascending=False)
if limit != -1:
df_series = df_series.head(limit)
df_series = df_series[["id", "title"]]
df_series.set_index("id", inplace=True)
return df_series
@log_start_end(log=logger)
@check_api_key(["API_FRED_KEY"])
def get_series_data(
series_id: str, start_date: Optional[str] = None, end_date: Optional[str] = None
) -> pd.DataFrame:
"""Get Series data. [Source: FRED]
Parameters
----------
series_id : str
Series ID to get data from
start_date : Optional[str]
Start date to get data from, format yyyy-mm-dd
end_date : Optional[str]
End data to get from, format yyyy-mm-dd
Returns
-------
pd.DataFrame
Series data
"""
df = pd.DataFrame()
try:
# Necessary for installer so that it can locate the correct certificates for
# API calls and https
# https://stackoverflow.com/questions/27835619/urllib-and-ssl-certificate-verify-failed-error/73270162#73270162
os.environ["REQUESTS_CA_BUNDLE"] = certifi.where()
os.environ["SSL_CERT_FILE"] = certifi.where()
fredapi_client = Fred(cfg.API_FRED_KEY)
df = fredapi_client.get_series(series_id, start_date, end_date)
# Series does not exist & invalid api keys
except HTTPError as e:
console.print(e)
return df
@log_start_end(log=logger)
@check_api_key(["API_FRED_KEY"])
def get_aggregated_series_data(
series_ids: List[str],
start_date: Optional[str] = None,
end_date: Optional[str] = None,
) -> Tuple[pd.DataFrame, dict]:
"""Get Series data. [Source: FRED]
Parameters
----------
series_ids : List[str]
Series ID to get data from
start_date : str
Start date to get data from, format yyyy-mm-dd
end_date : str
End data to get from, format yyyy-mm-dd
Returns
-------
pd.DataFrame
Series data
dict
Dictionary of series ids and titles
"""
data = pd.DataFrame()
detail = {}
for ids in series_ids:
information = check_series_id(ids)
if "seriess" in information:
detail[ids] = {
"title": information["seriess"][0]["title"],
"units": information["seriess"][0]["units_short"],
}
for s_id in series_ids:
series = pd.DataFrame(
get_series_data(s_id, start_date, end_date), columns=[s_id]
).dropna()
data[s_id] = series[s_id]
return data, detail
@log_start_end(log=logger)
@check_api_key(["API_FRED_KEY"])
def get_yield_curve(
date: str = "", return_date: bool = False
) -> Tuple[pd.DataFrame, str]:
"""Gets yield curve data from FRED
Parameters
----------
date: str
Date to get curve for. If empty, gets most recent date (format yyyy-mm-dd)
return_date: bool
If True, returns date of yield curve
Returns
-------
Tuple[pd.DataFrame, str]
Dataframe of yields and maturities,
Date for which the yield curve is obtained
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> ycrv_df = openbb.economy.ycrv()
Since there is a delay with the data, the most recent date is returned and can be accessed with return_date=True
>>> ycrv_df, ycrv_date = openbb.economy.ycrv(return_date=True)
"""
# Necessary for installer so that it can locate the correct certificates for
# API calls and https
# https://stackoverflow.com/questions/27835619/urllib-and-ssl-certificate-verify-failed-error/73270162#73270162
# os.environ["REQUESTS_CA_BUNDLE"] = certifi.where()
# os.environ["SSL_CERT_FILE"] = certifi.where()
fredapi_client = Fred(cfg.API_FRED_KEY)
fred_series = {
"1Month": "DGS1MO",
"3Month": "DGS3MO",
"6Month": "DGS6MO",
"1Year": "DGS1",
"2Year": "DGS2",
"3Year": "DGS3",
"5Year": "DGS5",
"7Year": "DGS7",
"10Year": "DGS10",
"20Year": "DGS20",
"30Year": "DGS30",
}
df = pd.DataFrame()
# Check that the date is in the past
today = datetime.now().strftime("%Y-%m-%d")
if date and date >= today:
console.print("[red]Date cannot be today or in the future[/red]")
if return_date:
return pd.DataFrame(), date
return pd.DataFrame()
# Add in logic that will get the most recent date.
get_last = False
if not date:
date = (datetime.now() - timedelta(days=7)).strftime("%Y-%m-%d")
get_last = True
for key, s_id in fred_series.items():
df = pd.concat(
[
df,
pd.DataFrame(fredapi_client.get_series(s_id, date), columns=[key]),
],
axis=1,
)
if df.empty:
if return_date:
return pd.DataFrame(), date
return pd.DataFrame()
# Drop rows with NaN -- corresponding to weekends typically
df = df.dropna()
if date not in df.index or get_last:
# If the get_last flag is true, we want the first date, otherwise we want the last date.
idx = -1 if get_last else 0
date_of_yield = df.index[idx].strftime("%Y-%m-%d")
rates = pd.DataFrame(df.iloc[idx, :].values, columns=["Rate"])
else:
date_of_yield = date
series = df[df.index == date]
if series.empty:
return pd.DataFrame(), date_of_yield
rates = pd.DataFrame(series.values.T, columns=["Rate"])
rates.insert(
0,
"Maturity",
[1 / 12, 0.25, 0.5, 1, 2, 3, 5, 7, 10, 20, 30],
)
if return_date:
return rates, date_of_yield
return rates | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/fred_model.py | 0.8586 | 0.178848 | fred_model.py | pypi |
__docformat__ = "numpy"
import argparse
import logging
import os
from typing import List, Optional, Union
from datetime import datetime as dt
import pandas as pd
import requests
from openbb_terminal.config_terminal import API_KEY_QUANDL
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_economic_calendar(
countries: Union[List[str], str] = "",
start_date: Optional[str] = None,
end_date: Optional[str] = None,
) -> pd.DataFrame:
"""Get economic calendar for countries between specified dates
Parameters
----------
countries : [List[str],str]
List of countries to include in calendar. Empty returns all
start_date : Optional[str]
Start date for calendar
end_date : Optional[str]
End date for calendar
Returns
-------
pd.DataFrame
Economic calendar
Examples
--------
Get todays economic calendar for the United States
>>> from openbb_terminal.sdk import openbb
>>> calendar = openbb.economy.events("United States")
To get multiple countries for a given date, pass the same start and end date as well as
a list of countries
>>> calendars = openbb.economy.events(["United States","Canada"], start_date="2022-11-18", end_date="2022-11-18")
"""
if start_date is None:
start_date = dt.now().strftime("%Y-%m-%d")
if end_date is None:
end_date = dt.now().strftime("%Y-%m-%d")
if countries == "":
countries = []
if isinstance(countries, str):
countries = [countries]
if start_date == end_date:
dates = [start_date]
else:
dates = (
pd.date_range(start=start_date, end=end_date).strftime("%Y-%m-%d").tolist()
)
calendar = pd.DataFrame()
for date in dates:
try:
df = pd.DataFrame(
requests.get(
f"https://api.nasdaq.com/api/calendar/economicevents?date={date}",
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36"
},
).json()["data"]["rows"]
).replace(" ", "-")
df.loc[:, "Date"] = date
calendar = pd.concat([calendar, df], axis=0)
except TypeError:
continue
if calendar.empty:
console.print("[red]No data found for date range.[/red]")
return pd.DataFrame()
calendar = calendar.rename(
columns={"gmt": "Time (GMT)", "country": "Country", "eventName": "Event"}
)
calendar = calendar.drop(columns=["description"])
if not countries:
return calendar
calendar = calendar[calendar["Country"].isin(countries)].reset_index(drop=True)
if calendar.empty:
console.print(f"[red]No data found for {','.join(countries)}[/red]")
return pd.DataFrame()
return calendar
@log_start_end(log=logger)
def check_country_code_type(list_of_codes: str) -> List[str]:
"""Check that codes are valid for NASDAQ API"""
nasdaq_codes = list(
pd.read_csv(os.path.join(os.path.dirname(__file__), "NASDAQ_CountryCodes.csv"))[
"Code"
]
)
valid_codes = [
code.upper()
for code in list_of_codes.split(",")
if code.upper() in nasdaq_codes
]
if valid_codes:
return valid_codes
raise argparse.ArgumentTypeError("No valid codes provided.")
@log_start_end(log=logger)
def get_country_codes() -> List[str]:
"""Get available country codes for Bigmac index
Returns
-------
List[str]
List of ISO-3 letter country codes.
"""
file = os.path.join(os.path.dirname(__file__), "NASDAQ_CountryCodes.csv")
codes = pd.read_csv(file, index_col=0)
return codes
@log_start_end(log=logger)
@check_api_key(["API_KEY_QUANDL"])
def get_big_mac_index(country_code: str = "USA") -> pd.DataFrame:
"""Get the Big Mac index calculated by the Economist
Parameters
----------
country_code : str
ISO-3 letter country code to retrieve. Codes available through get_country_codes().
Returns
-------
pd.DataFrame
Dataframe with Big Mac index converted to USD equivalent.
"""
URL = f"https://data.nasdaq.com/api/v3/datasets/ECONOMIST/BIGMAC_{country_code}"
URL += f"?column_index=3&api_key={API_KEY_QUANDL}"
try:
r = requests.get(URL)
except Exception:
console.print("[red]Error connecting to NASDAQ API[/red]\n")
return pd.DataFrame()
df = pd.DataFrame()
if r.status_code == 200:
response_json = r.json()
df = pd.DataFrame(response_json["dataset"]["data"])
df.columns = response_json["dataset"]["column_names"]
df["Date"] = pd.to_datetime(df["Date"])
# Wrong API Key
elif r.status_code == 400:
console.print(r.text)
# Premium Feature
elif r.status_code == 403:
console.print(r.text)
# Catching other exception
elif r.status_code != 200:
console.print(r.text)
return df
@log_start_end(log=logger)
@check_api_key(["API_KEY_QUANDL"])
def get_big_mac_indices(country_codes: List[str] = None) -> pd.DataFrame:
"""Display Big Mac Index for given countries
Parameters
----------
country_codes : List[str]
List of country codes (ISO-3 letter country code). Codes available through economy.country_codes().
Returns
-------
pd.DataFrame
Dataframe with Big Mac indices converted to USD equivalent.
"""
if country_codes is None:
country_codes = ["USA"]
df_cols = ["Date"]
df_cols.extend(country_codes)
big_mac = pd.DataFrame(columns=df_cols)
for country in country_codes:
df1 = get_big_mac_index(country)
if not df1.empty:
big_mac[country] = df1["dollar_price"]
big_mac["Date"] = df1["Date"]
big_mac.set_index("Date", inplace=True)
return big_mac | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/nasdaq_model.py | 0.821331 | 0.258838 | nasdaq_model.py | pypi |
__docformat__ = "numpy"
import argparse
import logging
from typing import List, Dict
import numpy as np
import pandas as pd
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal import feature_flags as obbff
from openbb_terminal.common.quantitative_analysis import qa_view, rolling_view
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_ONLY_FIGURES_ALLOWED,
EXPORT_ONLY_RAW_DATA_ALLOWED,
check_positive,
check_proportion_range,
check_list_dates,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import StockBaseController
from openbb_terminal.rich_config import console, MenuText
logger = logging.getLogger(__name__)
class QaController(StockBaseController):
"""Quantitative Analysis Controller class"""
CHOICES_COMMANDS = [
"pick",
"raw",
"summary",
"line",
"hist",
"cdf",
"bw",
"rolling",
"decompose",
"cusum",
"acf",
"spread",
"quantile",
"skew",
"kurtosis",
"normality",
"qqplot",
"unitroot",
"goodness",
"unitroot",
]
FULLER_REG = ["c", "ct", "ctt", "nc"]
KPS_REG = ["c", "ct"]
PATH = "/economy/qa/"
def __init__(
self,
all_economy_data: Dict,
queue: List[str] = None,
):
"""Constructor"""
super().__init__(queue)
self.datasets = all_economy_data
self.options = []
for _, sub_df in all_economy_data.items():
self.options.extend(list(sub_df.columns))
self.sources = list(self.datasets.keys())
self.current_source = self.sources[0]
# The common functions take in a df and a 'target' column. In order to replicate this,
# the following code uses the dictionary of dataframes and finds which source the selected column comes from.
self.current_source_dataframe = self.datasets[self.current_source].copy()
self.current_source_dataframe.index.name = "date"
self.data = self.current_source_dataframe.iloc[:, 0].copy()
if np.any(pd.isna(self.data)):
self.current_source_dataframe = self.current_source_dataframe.dropna(axis=0)
self.data = self.data.dropna(axis=0)
self.current_id = self.current_source_dataframe.columns[0]
self.start_date = self.data.index[0]
self.resolution = "" # For the views
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = {c: {} for c in self.controller_choices}
choices["pick"] = {c: {} for c in self.options}
choices["unitroot"] = {
"--fuller_reg": {c: {} for c in self.FULLER_REG},
"-r": "--fuller_reg",
"--kps_reg": {c: {} for c in self.KPS_REG},
"-k": "--kps_reg",
}
choices["line"] = {
"--log": {},
"--ml": None,
"--ms": None,
}
choices["hist"] = {
"--bins": {str(c): {} for c in range(10, 100)},
"-b": "--bins",
}
choices["bw"] = {
"--yearly": {},
"-y": {},
}
choices["acf"] = {
"--lags": {str(c): {} for c in range(5, 100)},
"-l": "--lags",
}
choices["rolling"] = {
"--window": {str(c): {} for c in range(5, 100)},
"-w": "--window",
}
choices["spread"] = {
"--window": {str(c): {} for c in range(5, 100)},
"-w": "--window",
}
choices["quantile"] = {
"--window": {str(c): {} for c in range(5, 100)},
"-w": "--window",
"--quantile": {str(c): {} for c in np.arange(0.0, 1.0, 0.01)},
"-q": "--quantile",
}
choices["skew"] = {
"--window": {str(c): {} for c in range(5, 100)},
"-w": "--window",
}
choices["kurtosis"] = {
"--window": {str(c): {} for c in range(5, 100)},
"-w": "--window",
}
choices["raw"] = {
"--limit": None,
"-l": "--limit",
"--sortby": {},
"-s": "--sortby",
"--reverse": {},
"-r": "--reverse",
}
choices["decompose"] = {
"--multiplicative": None,
"-m": "--multiplicative",
}
choices["cusum"] = {
"--threshold": None,
"-t": "--threshold",
"--drift": None,
"-d": "--drift",
}
choices["support"] = self.SUPPORT_CHOICES
choices["about"] = self.ABOUT_CHOICES
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
mt = MenuText("economy/qa/")
mt.add_cmd("pick")
mt.add_raw("\n")
mt.add_param("_series", self.current_id)
mt.add_raw("\n")
mt.add_info("_statistics_")
mt.add_cmd("summary")
mt.add_cmd("normality")
mt.add_cmd("unitroot")
mt.add_info("_plots_")
mt.add_cmd("line")
mt.add_cmd("hist")
mt.add_cmd("cdf")
mt.add_cmd("bw")
mt.add_cmd("acf")
mt.add_cmd("qqplot")
mt.add_info("_rolling_metrics_")
mt.add_cmd("rolling")
mt.add_cmd("spread")
mt.add_cmd("quantile")
mt.add_cmd("skew")
mt.add_cmd("kurtosis")
mt.add_info("_other_")
mt.add_cmd("raw")
mt.add_cmd("decompose")
mt.add_cmd("cusum")
console.print(text=mt.menu_text, menu="Economy - Quantitative Analysis")
def custom_reset(self):
"""Class specific component of reset command"""
return ["economy"]
@log_start_end(log=logger)
def call_pick(self, other_args: List[str]):
"""Process add command"""
parser = argparse.ArgumentParser(
add_help=False,
prog="load",
description="Load a FRED series to current selection",
)
parser.add_argument(
"-c",
"--column",
dest="column",
type=str,
help="Which loaded source to get data from",
choices=self.options,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-c")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
for source, sub_df in self.datasets.items():
if ns_parser.column in sub_df.columns:
self.current_source = source
self.current_source_dataframe = sub_df.copy()
self.current_source_dataframe.index.name = "date"
# This menu throws errors if there are nans, so lets manipulate them here
self.data = sub_df[ns_parser.column].copy()
if np.any(pd.isna(self.data)):
self.current_source_dataframe = (
self.current_source_dataframe.dropna(axis=0)
)
self.data = self.data.dropna(axis=0)
self.current_id = ns_parser.column
self.start_date = self.data.index[0]
console.print(f"{ns_parser.column} loaded from {source}.\n")
return
console.print(f"[red]{ns_parser.column} not found in data.[/red]\n")
@log_start_end(log=logger)
def call_raw(self, other_args: List[str]):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="raw",
description="""
Print raw data to console
""",
)
parser.add_argument(
"-l",
"--limit",
help="Number to show",
type=check_positive,
default=20,
dest="limit",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
parser.add_argument(
"-s",
"--sortby",
help="The column to sort by",
type=str.lower,
dest="sortby",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if isinstance(self.data, pd.Series):
data = self.data.to_frame()
else:
data = self.data
if ns_parser:
qa_view.display_raw(
data=data,
limit=ns_parser.limit,
sortby=ns_parser.sortby,
ascend=ns_parser.reverse,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_summary(self, other_args: List[str]):
"""Process summary command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="summary",
description="""
Summary statistics
""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_summary(
data=self.current_source_dataframe, export=ns_parser.export
)
@log_start_end(log=logger)
def call_line(self, other_args: List[str]):
"""Process line command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="line",
description="Show line plot of selected data or highlight specific datetimes.",
)
parser.add_argument(
"--log",
help="Plot with y on log scale",
dest="log",
action="store_true",
default=False,
)
parser.add_argument(
"--ml",
help="Draw vertical line markers to highlight certain events",
dest="ml",
type=check_list_dates,
default="",
)
parser.add_argument(
"--ms",
help="Draw scatter markers to highlight certain events",
dest="ms",
type=check_list_dates,
default="",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_FIGURES_ALLOWED
)
if ns_parser:
qa_view.display_line(
self.data,
title=f"{self.current_id.upper()}",
log_y=ns_parser.log,
markers_lines=ns_parser.ml,
markers_scatter=ns_parser.ms,
)
@log_start_end(log=logger)
def call_hist(self, other_args: List[str]):
"""Process hist command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="hist",
description="""
Histogram with density and rug
""",
)
parser.add_argument(
"-b", "--bins", type=check_positive, default=15, dest="n_bins"
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
qa_view.display_hist(
symbol="",
data=self.current_source_dataframe,
target=self.current_id,
bins=ns_parser.n_bins,
)
@log_start_end(log=logger)
def call_cdf(self, other_args: List[str]):
"""Process cdf command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="cdf",
description="""
Cumulative distribution function
""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_cdf(
symbol="",
data=self.current_source_dataframe,
target=self.current_id,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_bw(self, other_args: List[str]):
"""Process bwy command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="bw",
description="""
Box and Whisker plot
""",
)
parser.add_argument(
"-y",
"--yearly",
action="store_true",
default=False,
dest="year",
help="Flag to show yearly bw plot",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
qa_view.display_bw(
symbol="",
data=self.current_source_dataframe,
target=self.current_id,
yearly=ns_parser.year,
)
@log_start_end(log=logger)
def call_decompose(self, other_args: List[str]):
"""Process decompose command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="decompose",
description="""
Decompose time series as:
- Additive Time Series = Level + CyclicTrend + Residual + Seasonality
- Multiplicative Time Series = Level * CyclicTrend * Residual * Seasonality
""",
)
parser.add_argument(
"-m",
"--multiplicative",
action="store_true",
default=False,
dest="multiplicative",
help="decompose using multiplicative model instead of additive",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_seasonal(
symbol="",
data=self.current_source_dataframe,
target=self.current_id,
multiplicative=ns_parser.multiplicative,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_cusum(self, other_args: List[str]):
"""Process cusum command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="cusum",
description="""
Cumulative sum algorithm (CUSUM) to detect abrupt changes in data
""",
)
parser.add_argument(
"-t",
"--threshold",
dest="threshold",
type=float,
default=(max(self.data.values) - min(self.data.values)) / 40,
help="threshold",
)
parser.add_argument(
"-d",
"--drift",
dest="drift",
type=float,
default=(max(self.data.values) - min(self.data.values)) / 80,
help="drift",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
qa_view.display_cusum(
data=self.current_source_dataframe,
target=self.current_id,
threshold=ns_parser.threshold,
drift=ns_parser.drift,
)
@log_start_end(log=logger)
def call_acf(self, other_args: List[str]):
"""Process acf command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="acf",
description="""
Auto-Correlation and Partial Auto-Correlation Functions for diff and diff diff stock data
""",
)
parser.add_argument(
"-l",
"--lags",
dest="lags",
type=check_positive,
default=15,
help="maximum lags to display in plots",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
qa_view.display_acf(
symbol="",
data=self.current_source_dataframe,
target=self.current_id,
lags=ns_parser.lags,
)
@log_start_end(log=logger)
def call_rolling(self, other_args: List[str]):
"""Process rolling command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="rolling",
description="""
Rolling mean and std deviation
""",
)
parser.add_argument(
"-w",
"--window",
action="store",
dest="n_window",
type=check_positive,
default=14,
help="Window length",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
rolling_view.display_mean_std(
symbol="",
data=self.current_source_dataframe,
target=self.current_id,
window=ns_parser.n_window,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_spread(self, other_args: List[str]):
"""Process spread command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="spread",
description="""Shows rolling spread measurement
""",
)
parser.add_argument(
"-w",
"--window",
action="store",
dest="n_window",
type=check_positive,
default=14,
help="Window length",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
rolling_view.display_spread(
symbol="",
data=self.current_source_dataframe,
target=self.current_id,
window=ns_parser.n_window,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_quantile(self, other_args: List[str]):
"""Process quantile command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="quantile",
description="""
The quantiles are values which divide the distribution such that
there is a given proportion of observations below the quantile.
For example, the median is a quantile. The median is the central
value of the distribution, such that half the points are less than
or equal to it and half are greater than or equal to it.
By default, q is set at 0.5, which effectively is median. Change q to
get the desired quantile (0<q<1).
""",
)
parser.add_argument(
"-w",
"--window",
action="store",
dest="n_window",
type=check_positive,
default=14,
help="window length",
)
parser.add_argument(
"-q",
"--quantile",
action="store",
dest="f_quantile",
type=check_proportion_range,
default=0.5,
help="quantile",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
rolling_view.display_quantile(
symbol="",
data=self.current_source_dataframe,
target=self.current_id,
window=ns_parser.n_window,
quantile=ns_parser.f_quantile,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_skew(self, other_args: List[str]):
"""Process skew command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="skew",
description="""
Skewness is a measure of asymmetry or distortion of symmetric
distribution. It measures the deviation of the given distribution
of a random variable from a symmetric distribution, such as normal
distribution. A normal distribution is without any skewness, as it is
symmetrical on both sides. Hence, a curve is regarded as skewed if
it is shifted towards the right or the left.
""",
)
parser.add_argument(
"-w",
"--window",
action="store",
dest="n_window",
type=check_positive,
default=14,
help="window length",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
rolling_view.display_skew(
symbol="",
data=self.current_source_dataframe,
target=self.current_id,
window=ns_parser.n_window,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_kurtosis(self, other_args: List[str]):
"""Process kurtosis command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="kurtosis",
description="""
Kurtosis is a measure of the "tailedness" of the probability distribution
of a real-valued random variable. Like skewness, kurtosis describes the shape
of a probability distribution and there are different ways of quantifying it
for a theoretical distribution and corresponding ways of estimating it from
a sample from a population. Different measures of kurtosis may have different
interpretations.
""",
)
parser.add_argument(
"-w",
"--window",
action="store",
dest="n_window",
type=check_positive,
default=14,
help="window length",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
rolling_view.display_kurtosis(
symbol="",
data=self.current_source_dataframe,
target=self.current_id,
window=ns_parser.n_window,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_normality(self, other_args: List[str]):
"""Process normality command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="normality",
description="""
Normality tests
""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_normality(
data=self.current_source_dataframe,
target=self.current_id,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_qqplot(self, other_args: List[str]):
"""Process qqplot command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="qqplot",
description="""
Display QQ plot vs normal quantiles
""",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
qa_view.display_qqplot(
symbol="", data=self.current_source_dataframe, target=self.current_id
)
@log_start_end(log=logger)
def call_unitroot(self, other_args: List[str]):
"""Process unitroot command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="unitroot",
description="""
Unit root test / stationarity (ADF, KPSS)
""",
)
parser.add_argument(
"-r",
"--fuller_reg",
help="Type of regression. Can be ‘c’,’ct’,’ctt’,’nc’ 'c' - Constant and t - trend order",
choices=["c", "ct", "ctt", "nc"],
default="c",
type=str,
dest="fuller_reg",
)
parser.add_argument(
"-k",
"--kps_reg",
help="Type of regression. Can be ‘c’,’ct'",
choices=["c", "ct"],
type=str,
dest="kpss_reg",
default="c",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_unitroot(
data=self.current_source_dataframe,
target=self.current_id,
fuller_reg=ns_parser.fuller_reg,
kpss_reg=ns_parser.kpss_reg,
export=ns_parser.export,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/economy/quantitative_analysis/qa_controller.py | 0.55447 | 0.235482 | qa_controller.py | pypi |
```
import os
from datetime import datetime, timedelta
import ipywidgets as widgets
import plotly.graph_objs as go
import yfinance as yf
import pandas as pd
from IPython.display import display
interval_opts = [
"1m",
"2m",
"5m",
"15m",
"30m",
"60m",
"90m",
"1h",
"1d",
"5d",
"1wk",
"1mo",
"3mo",
]
rows = [
"sector",
"marketCap",
"beta",
"fiftyTwoWeekHigh",
"fiftyTwoWeekLow",
"floatShares",
"sharesShort",
"exDividendDate",
]
views = {
"Raw Data": lambda x, y: x,
"Percent Change": lambda x, y: x.pct_change(),
"Rolling Average": lambda x, y: x.rolling(y).mean(),
"Rolling Variance": lambda x, y: x.rolling(y).var(),
"Rolling Standard Deviation": lambda x, y: x.rolling(y).var() ** 0.5,
"Rolling Coefficient of Variation": lambda x, y: (x.rolling(y).var() ** 0.5)
/ (x.rolling(y).mean()),
}
clean_row = {
"sector": "Sector",
"marketCap": "M Cap",
"beta": "Beta",
"fiftyTwoWeekHigh": "52W High",
"fiftyTwoWeekLow": "52W Low",
"floatShares": "Floats",
"sharesShort": "Shorts",
"exDividendDate": "Ex-Div",
}
clean_data = {
"sector": lambda x: "N/A" if x is None else x,
"marketCap": lambda x: "N/A" if x is None else big_num(x),
"beta": lambda x: "N/A" if x is None else f"{round(x,2)}",
"fiftyTwoWeekHigh": lambda x: "N/A" if x is None else f"${round(x,2)}",
"fiftyTwoWeekLow": lambda x: "N/A" if x is None else f"${round(x,2)}",
"floatShares": lambda x: "N/A" if x is None else big_num(x),
"sharesShort": lambda x: "N/A" if x is None else big_num(x),
"exDividendDate": lambda x: "N/A"
if x is None
else datetime.fromtimestamp(x).strftime("%Y/%m/%d"),
}
def big_num(num):
if num > 1_000_000_000_000:
return f"{round(num/1_000_000_000_000,2)}T"
if num > 1_000_000_000:
return f"{round(num/1_000_000_000,2)}B"
if num > 1_000_000:
return f"{round(num/1_000_000,2)}M"
if num > 1_000:
return f"{num/round(1_000,2)}K"
return f"{round(num,2)}"
def clean_str(string):
new_str = ""
for letter in string:
if letter.isupper():
new_str += " "
new_str += letter
return new_str.title()
def format_plotly(fig, data, start, end, chart, calc=None):
fig.update_yaxes(title=None)
fig.update_xaxes(title=None)
start_t = start.strftime("%Y/%m/%d")
end_t = end.strftime("%Y/%m/%d")
if calc:
if len(calc) == 1:
fig_title = f"{calc[0]} of {data} from {start_t} to {end_t}"
else:
fig_title = f"{', '.join(calc)} of {data} from {start_t} to {end_t}"
else:
fig_title = "Volume"
height = 500 if chart == "main" else 300
fig.update_layout(
margin=dict(l=0, r=10, t=10, b=10),
autosize=False,
width=900,
height=height,
legend=dict(orientation="h"),
title={
"text": fig_title,
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
)
def create_line(visual, x, y, name, data, fig):
if visual == "line":
plot = go.Scatter(x=x, y=y[data], mode="lines", name=name, connectgaps=True)
if visual == "scatter":
plot = go.Scatter(x=x, y=y[data], mode="markers", name=name)
if visual == "candle":
plot = go.Candlestick(
x=x,
open=y["Open"],
close=y["Close"],
high=y["High"],
low=y["Low"],
name=name,
)
fig.add_trace(plot)
def show_fig(fig):
config = {"showTips": False, "scrollZoom": True}
if os.environ.get("SERVER_SOFTWARE", "jupyter").startswith("voila"):
fig.show(config=config, renderer="notebook")
else:
fig.show(config=config)
def table_data(infos):
cols = ["Ticker"] + list(infos)
data = pd.DataFrame(columns=cols)
data["Ticker"] = [clean_row[x] for x in rows]
for ticker in list(infos):
data[ticker] = [clean_data[x](infos[ticker].get(x, None)) for x in rows]
new_cols = {k: clean_str(k) for k in rows}
return data
class Chart:
def __init__(self):
self.last_tickers = ""
self.last_interval = "1d"
self.df = pd.DataFrame()
self.infos = {}
def create_stock(
self, calculation, data, rolling, start, end, interval, tickers, chart
):
if tickers and tickers[-1] == ",":
if tickers != self.last_tickers or interval != self.last_interval:
if interval in ["1d", "5d", "1wk", "1mo", "3mo"]:
self.df = yf.download(
tickers, period="max", interval=interval, progress=False
)
else:
end_date = end + timedelta(days=1)
self.df = yf.download(
tickers,
start=start,
end=end_date,
interval=interval,
progress=False,
)
if not self.df.empty:
self.df.index = self.df.index.tz_localize(None)
self.last_tickers = tickers
self.last_interval = interval
start_n = datetime(start.year, start.month, start.day)
end_n = datetime(end.year, end.month, end.day)
fig = go.Figure()
for item in calculation:
calcs = views[item](self.df, rolling)
if interval in ["1d", "5d", "1wk", "1mo", "3mo"]:
result = calcs.loc[
(calcs.index >= start_n) & (calcs.index <= end_n)
]
else:
result = calcs
if len(result.columns) == 6:
name = f"{tickers.split(',')[0]} {item}"
create_line(chart, result.index, result, name, data, fig)
else:
for val in result.columns.levels[1]:
vals = result.xs(val, axis=1, level=1, drop_level=True)
name = f"{val.upper()} {item}"
create_line(chart, result.index, vals, name, data, fig)
format_plotly(fig, data, start, end, "main", calculation)
show_fig(fig)
def create_volume(self, start, end, interval, tickers):
start_n = datetime(start.year, start.month, start.day)
end_n = datetime(end.year, end.month, end.day)
result = self.df.loc[(self.df.index >= start_n) & (self.df.index <= end_n)]
fig = go.Figure()
if len(result.columns) == 6:
name = f"{tickers.split(',')[0]}"
create_line("line", result.index, result, name, "Volume", fig)
else:
for val in result.columns.levels[1]:
vals = result.xs(val, axis=1, level=1, drop_level=True)
name = f"{val.upper()}"
create_line("line", result.index, vals, name, "Volume", fig)
format_plotly(fig, "Volume", start, end, "volume")
show_fig(fig)
def create_table(self, tickers):
if tickers and tickers[-1] == ",":
clean_tickers = [x for x in tickers.split(",") if x]
for ticker in clean_tickers:
if ticker not in self.infos:
self.infos[ticker] = yf.Ticker(ticker).info
delete = [ticker for ticker in self.infos if ticker not in tickers]
for ticker in delete:
self.infos.pop(ticker)
result = table_data(self.infos)
fig = go.Figure(
data=[
go.Table(
header=dict(
values=result.columns,
fill_color="lightgray",
font=dict(color="black"),
align="left",
),
cells=dict(
values=[result[x] for x in result.columns],
font=dict(color="black"),
align="left",
),
)
],
)
fig.update_layout(margin=dict(l=0, r=20, t=0, b=0), width=350)
show_fig(fig)
w_auto = widgets.Layout(width="auto")
calc_widget = widgets.SelectMultiple(
options=list(views.keys()), value=["Raw Data"], layout=w_auto
)
data_opts = ["Open", "Close", "High", "Low"]
data_widget = widgets.Dropdown(
options=data_opts, value="Close", layout=w_auto, description="Data"
)
rolling_widget = widgets.Dropdown(
options=list(range(2, 101)), value=60, layout=w_auto, description="Rolling"
)
base_date = (datetime.today() - timedelta(days=365)).date()
start_widget = widgets.DatePicker(value=base_date, layout=w_auto, description="Start")
end_widget = widgets.DatePicker(
value=datetime.today().date(), layout=w_auto, description="End"
)
interval_widget = widgets.Dropdown(
options=interval_opts, value="1d", layout=w_auto, description="Interval"
)
tickers_widget = widgets.Textarea(
value="TSLA,", layout=widgets.Layout(width="auto", height="100%")
)
chart_opts = ["line", "scatter", "candle"]
chart_widget = widgets.Dropdown(
options=chart_opts, value="line", layout=w_auto, description="Chart"
)
data_box = widgets.VBox([data_widget, rolling_widget, chart_widget])
date_box = widgets.VBox([start_widget, end_widget, interval_widget])
controls = widgets.HBox(
[tickers_widget, calc_widget, date_box, data_box],
layout=widgets.Layout(width="90%"),
)
chart = Chart()
stocks_view = widgets.interactive_output(
chart.create_stock,
{
"calculation": calc_widget,
"data": data_widget,
"rolling": rolling_widget,
"start": start_widget,
"end": end_widget,
"interval": interval_widget,
"tickers": tickers_widget,
"chart": chart_widget,
},
)
volume_view = widgets.interactive_output(
chart.create_volume,
{
"start": start_widget,
"end": end_widget,
"interval": interval_widget,
"tickers": tickers_widget,
},
)
table_view = widgets.interactive_output(chart.create_table, {"tickers": tickers_widget})
charts = widgets.VBox(
[stocks_view, volume_view],
layout=widgets.Layout(width="100%", padding="0", margin="0"),
)
figures = widgets.HBox(
[charts, table_view], layout=widgets.Layout(padding="0", margin="0")
)
title_html = "<h1>Stock Analysis Dashboard</h1>"
warning_html = '<p style="color:red"=>Use a comma after EVERY stock typed.</p>'
app_contents = [widgets.HTML(title_html), controls, widgets.HTML(warning_html), figures]
app = widgets.VBox(app_contents)
display(app)
```
| /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/dashboards/voila/stocks.ipynb | 0.422743 | 0.523968 | stocks.ipynb | pypi |
```
import io
import pandas as pd
import matplotlib.pyplot as plt
import requests
from datetime import datetime, timedelta
import ipywidgets as widgets
from IPython.display import display
%matplotlib widget
class ShortDataDash:
def __init__(self):
self.df = pd.DataFrame()
self.days_slider = widgets.IntSlider(
value=5,
min=5,
max=252,
step=1,
description="Days Back",
style={"description_width": "initial"},
)
self.count_slider = widgets.IntSlider(
value=10,
min=1,
max=25,
step=1,
description="Number to show.",
style={"description_width": "initial"},
)
self.output1 = widgets.Output()
self.output2 = widgets.Output()
self.load_button = widgets.Button(
description="Load Data", layout=widgets.Layout(width="200px", height="40px")
)
self.load_button.on_click(self.load_button_click)
self.show_button = widgets.Button(
description="Change Number Shown", layout=self.load_button.layout
)
self.show_button.on_click(self.show_button_click)
self.slider_box = widgets.HBox([self.days_slider, self.count_slider])
self.button_box = widgets.VBox([self.load_button, self.show_button])
self.stock_input = widgets.Text(
value="GME",
placeholder="GME",
description="Ticker:",
)
self.ticker_button = widgets.Button(description="Plot Ticker")
self.ticker_button.on_click(self.ticker_button_click)
def show_button_click(self, b):
self.output1.clear_output()
with self.output1:
self.update()
def load_button_click(self, b):
self.output1.clear_output()
self.output2.clear_output()
with self.output1:
print(f"Data Loading for {self.days_slider.value} days")
self.fetch_new_data()
self.update()
def ticker_button_click(self, b):
self.output2.clear_output()
with self.output2:
self.ticker_plot()
def fetch_new_data(self):
self.df = pd.DataFrame()
today = datetime.now().date()
idx = 0
len_df = 0
while len_df < self.days_slider.value:
date = today - timedelta(days=idx)
r = requests.get(
f"https://cdn.finra.org/equity/regsho/daily/CNMSshvol{date.strftime('%Y%m%d')}.txt"
)
if r.status_code == 200:
self.df = pd.concat(
[self.df, pd.read_csv(io.StringIO(r.text), sep="|")], axis=0
)
len_df += 1
idx += 1
self.df = self.df[self.df.Date > 20100101]
self.df.Date = self.df["Date"].apply(
lambda x: datetime.strptime(str(x), "%Y%m%d")
)
def update(self):
if not self.df.empty:
temp = (
self.df.groupby("Symbol")[["ShortVolume", "TotalVolume"]]
.agg("sum")
.sort_values(by="ShortVolume", ascending=False)
.head(self.count_slider.value)[::-1]
)
self.fig, self.ax = plt.subplots(figsize=(6, 6))
self.ax.barh(temp.index, temp.TotalVolume, alpha=0.4, label="Total Volume")
self.ax.barh(temp.index, temp.ShortVolume, label="Short Volume")
self.ax.set_title(
f"Top {self.count_slider.value} Short Volume in Last {self.days_slider.value} Days"
)
self.ax.legend()
self.fig.tight_layout()
plt.show()
def ticker_plot(self):
stock_data = self.df.copy().loc[
self.df.Symbol == self.stock_input.value,
["Date", "ShortVolume", "TotalVolume"],
]
self.fig2, self.ax2 = plt.subplots(figsize=(6, 6))
self.ax2.plot(
stock_data.Date, stock_data.TotalVolume, alpha=0.4, label="Total Volume"
)
self.ax2.plot(stock_data.Date, stock_data.ShortVolume, label="Short Volume")
self.ax2.set_title(
f"Stock Volume and Short Volume for {self.stock_input.value.upper()}"
)
self.ax2.legend()
self.fig2.autofmt_xdate()
self.fig2.tight_layout()
plt.show()
def build_app(self):
title_html = """
<h2>Finra Short Data</h2>
<p>This widget downloads the consolidated NMS short data from FINRA and aggregates the data by summing over the entire time period.</p>
<p>Note that clicking the 'Load Data' button will reload all data. This can get time consuming, so if you pick a few hundred days, expect a few minutes for loading time.</p>
"""
middle_html = """
Here we allow the user to query for a single stock. This will work with the loaded data. Note that if you want to reload the data, this will once again take some time.
"""
return [
widgets.HTML(
title_html, layout=widgets.Layout(margin="0 0 3em 0", max_width="800px")
),
self.slider_box,
self.button_box,
self.output1,
widgets.HTML(
middle_html,
layout=widgets.Layout(margin="0 0 3em 0", max_width="800px"),
),
self.stock_input,
self.ticker_button,
self.output2,
]
dash = ShortDataDash()
app = widgets.VBox(
dash.build_app(), layout=widgets.Layout(max_width="1024px", margin="0 auto 0 auto")
)
display(app)
```
| /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/dashboards/voila/shortdata.ipynb | 0.418816 | 0.226612 | shortdata.ipynb | pypi |
# Cryptocurrency exchange rates to USD ($)
```
import json
import os
import ipywidgets as ipw
from IPython.display import display
import pandas as pd
from openbb_terminal.sdk import openbb, widgets
ipw.HTML(f"<style>{widgets.price_card_stylesheet()}</style>")
COIN_LIST = (
"BTC,ETH,USDT,BNB,USDC,XRP,LUNA,ADA,"
+ "SOL,AVAX,DOT,BUSD,DOGE,UST,SHIB,WBTC,"
+ "MATIC,CRO,DAI,LTC,ATOM,NEAR,LINK,BCH,"
+ "UNI,TRX,FTT,ETC,LEO,ALGO,XLM,MANA,"
+ "BTCB,HBAR,EGLD,ICP,SAND,XMR,WAVES,VET,"
+ "APE,FIL,FTM,AXS,THETA,KLAY,XTZ,RUNE"
).split(",")
exchange_rates = {}
?openbb.crypto.load
def get_exchange_rate(coin: str, days: int = 2):
"""Get exchange rate for a cryptocurrency agains USD."""
current_df = openbb.crypto.load(
symbol=coin,
interval="1440",
source="CCXT",
)
price_color = "neutral_color"
if current_df["Close"].iloc[-1] > current_df["Close"].iloc[-2]:
price_color = "up_color"
elif current_df["Close"].iloc[-1] < current_df["Close"].iloc[-2]:
price_color = "down_color"
price = str(current_df["Close"].iloc[-1])[:7]
return price, price_color
def show_rates(btn):
"""Show exchange rates in grid box widget."""
grid.children = ()
for coin in COIN_LIST:
if coin not in exchange_rates.keys():
exchange_rates[coin] = {"price": None, "color": None}
try:
price, price_color = get_exchange_rate(coin=coin)
except Exception as e:
price, price_color = "-------", "neutral_color"
widgets = list(grid.children)
widgets.append(
ipw.HTML(
api.widgets.price_card(
ticker=coin,
price=price,
price_color=price_color,
)
)
)
grid.children = tuple(widgets)
update_rates = ipw.Button(description="Update rates")
update_rates.on_click(show_rates)
header = ipw.HBox([update_rates])
layout = ipw.Layout(grid_template_columns="1fr " * 8)
grid = ipw.GridBox(
[],
layout=layout,
)
display(header, grid)
```
| /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/dashboards/voila/crypto.ipynb | 0.461017 | 0.481149 | crypto.ipynb | pypi |
```
import os
from datetime import datetime, timedelta
import ipywidgets as widgets
import plotly.graph_objs as go
import yfinance as yf
import pandas as pd
from IPython.display import display
interval_opts = [
"1m",
"2m",
"5m",
"15m",
"30m",
"60m",
"90m",
"1h",
"1d",
"5d",
"1wk",
"1mo",
"3mo",
]
class Chart:
def __init__(self):
self.last_tickers = ""
self.last_interval = "1d"
self.df = pd.DataFrame()
def create(self, data, start, end, interval, tickers):
global last_tickers, df, last_interval
if tickers and tickers[-1] == ",":
if tickers != self.last_tickers or interval != self.last_interval:
if interval in ["1d", "5d", "1wk", "1mo", "3mo"]:
self.df = yf.download(
tickers, period="max", interval=interval, progress=False
)
else:
self.df = yf.download(
tickers, start=start, end=end, interval=interval, progress=False
)
self.last_tickers = tickers
self.last_interval = interval
start_n = datetime(start.year, start.month, start.day)
end_n = datetime(end.year, end.month, end.day)
df = self.df[data]
if not isinstance(df, pd.Series):
if interval in ["1d", "5d", "1wk", "1mo", "3mo"]:
result = df.loc[(df.index >= start_n) & (df.index <= end_n)].corr()
else:
result = df.corr()
base = [
[
"black" if x == 1 else "lightgreen" if x > 0 else "lightpink"
for x in result[y].tolist()
]
for y in result.columns
]
base = [["lightgray" for _ in range(result.shape[0])]] + base
result = result.reset_index()
result.rename(columns={"index": ""}, inplace=True)
fig = go.Figure(
data=[
go.Table(
header=dict(
values=list(result.columns),
fill_color="lightgray",
font=dict(color="black"),
align="left",
),
cells=dict(
values=[result[x] for x in result.columns],
fill_color=base,
format=[""]
+ [".2f" for _ in range(len(df.columns) - 1)],
font=dict(color="black"),
align="left",
),
)
],
)
fig.update_layout(
autosize=True,
height=600,
showlegend=False,
)
if os.environ.get("SERVER_SOFTWARE", "jupyter").startswith("voila"):
fig.show(config={"showTips": False}, renderer="notebook")
else:
fig.show(config={"showTips": False})
w_auto = widgets.Layout(width="auto")
data_opts = ["Open", "Close", "High", "Low", "Volume"]
data_widget = widgets.Dropdown(
options=data_opts, value="Close", layout=w_auto, description="Data"
)
base_date = (datetime.today() - timedelta(days=365)).date()
start_widget = widgets.DatePicker(value=base_date, layout=w_auto, description="Start")
end_widget = widgets.DatePicker(
value=datetime.today().date(), layout=w_auto, description="End"
)
interval_widget = widgets.Dropdown(
options=interval_opts, value="1d", layout=w_auto, description="Interval"
)
tickers_widget = widgets.Textarea(value="TSLA,AAPL,", layout=w_auto)
data_box = widgets.VBox([tickers_widget, data_widget])
date_box = widgets.VBox([start_widget, end_widget, interval_widget])
controls = widgets.HBox([data_box, date_box], layout=widgets.Layout(width="60%"))
chart = Chart()
stocks_view = widgets.interactive_output(
chart.create,
{
"data": data_widget,
"start": start_widget,
"end": end_widget,
"interval": interval_widget,
"tickers": tickers_widget,
},
)
title_html = "<h1>Correlation Analysis Dashboard</h1>"
warning_html = '<p style="color:red">Use a comma after EVERY stock typed.</p>'
app_contents = [
widgets.HTML(title_html),
controls,
widgets.HTML(warning_html),
stocks_view,
]
app = widgets.VBox(app_contents)
display(app)
```
| /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/dashboards/voila/correlation.ipynb | 0.412885 | 0.337067 | correlation.ipynb | pypi |
```
from datetime import datetime, timedelta
from typing import Callable, Any
from inspect import signature
import warnings, logging
import matplotlib.pyplot as plt
import matplotlib_inline.backend_inline
import ipywidgets as widgets
import yfinance as yf
import pandas as pd
from IPython.display import display
from openbb_terminal.sdk import openbb, theme
%matplotlib inline
matplotlib_inline.backend_inline.set_matplotlib_formats("svg")
theme.applyMPLstyle()
try:
has_forecast = True
model_opts = {
"expo": openbb.forecast.expo_chart,
"theta": openbb.forecast.theta_chart,
"linregr": openbb.forecast.linregr_chart,
"regr": openbb.forecast.regr_chart,
"rnn": openbb.forecast.rnn_chart,
"brnn": openbb.forecast.brnn_chart,
"nbeats": openbb.forecast.nbeats_chart,
"tcn": openbb.forecast.tcn_chart,
"trans": openbb.forecast.trans_chart,
"tft": openbb.forecast.tft_chart,
"nhits": openbb.forecast.nhits_chart,
}
feat_engs = {
"ema": openbb.forecast.ema,
"sto": openbb.forecast.sto,
"rsi": openbb.forecast.rsi,
"roc": openbb.forecast.roc,
"mom": openbb.forecast.mom,
"atr": openbb.forecast.atr,
"delta": openbb.forecast.delta,
"signal": openbb.forecast.signal,
}
except AttributeError as e:
print(e)
has_forecast = False
model_opts = {}
feat_engs = {}
interval_opts = [
"1m",
"2m",
"5m",
"15m",
"30m",
"60m",
"90m",
"1h",
"1d",
"5d",
"1wk",
"1mo",
"3mo",
]
def format_df(df: pd.DataFrame) -> pd.DataFrame:
if len(df.columns) != 6:
df.columns = ["_".join(col).strip() for col in df.columns.values]
df.reset_index(inplace=True)
df.columns = [x.lower() for x in df.columns]
return df
def has_parameter(func: Callable[..., Any], parameter: str) -> bool:
params = signature(func).parameters
parameters = params.keys()
return parameter in parameters
class Chart:
def __init__(self):
self.last_tickers = ""
self.last_interval = "1d"
self.df = pd.DataFrame()
self.result = pd.DataFrame()
self.infos = {}
def handle_changes(
self,
past_covariates,
start,
end,
interval,
tickers,
target_column,
model,
naive,
forecast_only,
):
if tickers and tickers[-1] == ",":
if tickers != self.last_tickers or interval != self.last_interval:
if interval in ["1d", "5d", "1wk", "1mo", "3mo"]:
self.df = yf.download(
tickers, period="max", interval=interval, progress=False
)
else:
end_date = end + timedelta(days=1)
self.df = yf.download(
tickers,
start=start,
end=end_date,
interval=interval,
progress=False,
)
self.df = format_df(self.df)
self.last_tickers = tickers
self.last_interval = interval
forecast_model = model_opts[model]
self.forecast_model = forecast_model
contains_covariates = has_parameter(forecast_model, "past_covariates")
# Update Inputs
if list(target_widget.options) != [
x for x in self.df.columns if x != "date"
]:
target_widget.options = [x for x in self.df.columns if x != "date"]
return
if list(past_covs_widget.options) != [
x for x in self.df.columns if x != "date"
]:
past_covs_widget.options = [x for x in self.df.columns if x != "date"]
past_covs_widget.disabled = not contains_covariates
return
if past_covs_widget.disabled == contains_covariates:
past_covs_widget.disabled = not contains_covariates
column_widget.options = [x for x in self.df.columns if x != "date"]
start_n = datetime(start.year, start.month, start.day)
end_n = datetime(end.year, end.month, end.day)
calcs = self.df
if interval in ["1d", "5d", "1wk", "1mo", "3mo"]:
result = calcs.loc[
(calcs["date"] >= start_n) & (calcs["date"] <= end_n)
]
else:
result = calcs
if not target_column:
target_column = self.df.columns[0]
kwargs = {}
if contains_covariates and past_covariates != ():
kwargs["past_covariates"] = ",".join(past_covariates)
if has_parameter(forecast_model, "naive"):
kwargs["naive"] = naive
if has_parameter(forecast_model, "forecast_only"):
kwargs["forecast_only"] = forecast_only
# This sets up everything to run the function on button click
self.result = result
self.target_column = target_column
self.n_predict = 5
self.kwargs = kwargs
def handle_click(self, to_run):
if to_run:
run_widget.value = False
else:
df = self.result.dropna()
if not df.empty:
self.forecast_model(
self.result,
target_column=self.target_column,
n_predict=5,
**self.kwargs
)
def handle_eng(self, target, feature):
self.feature_target = target
self.feature_model = feat_engs[feature]
def handle_click2(self, to_run):
if to_run:
add_widget.value = False
else:
kwargs = {}
if has_parameter(self.feature_model, "target_column"):
kwargs["target_column"] = self.feature_target
self.df = self.feature_model(self.df, **kwargs)
past_covs_widget.options = self.df.columns
w_auto = widgets.Layout(width="auto")
model_value = list(model_opts)[0] if model_opts else None
model_widget = widgets.Select(
options=list(model_opts),
value=model_value,
layout=widgets.Layout(
width="8%",
),
)
past_covs_widget = widgets.SelectMultiple(
options=[""],
value=[""],
layout=widgets.Layout(
width="8%",
),
)
base_date = (datetime.today() - timedelta(days=365)).date()
start_widget = widgets.DatePicker(value=base_date, layout=w_auto, description="Start")
end_widget = widgets.DatePicker(
value=datetime.today().date(), layout=w_auto, description="End"
)
target_widget = widgets.Dropdown(options=[""], value="", description="Target")
interval_widget = widgets.Dropdown(
options=interval_opts, value="1d", layout=w_auto, description="Interval"
)
tickers_widget = widgets.Textarea(
value="TSLA,", layout=widgets.Layout(width="auto", height="100%")
)
# Output box
naive_widget = widgets.ToggleButton(value=False, description="Show Naive")
forecast_only_widget = widgets.ToggleButton(value=False, description="Forecast Only")
run_widget = widgets.ToggleButton(value=False, description="Run Model")
# feat_box
feat_value = list(feat_engs.keys())[0] if feat_engs else None
column_widget = widgets.Dropdown(options=[""], value="", description="Target")
feat_widget = widgets.Dropdown(
options=list(feat_engs.keys()), value=feat_value, description="Feature"
)
add_widget = widgets.ToggleButton(
value=False, description="Add Feature", layout=widgets.Layout(align="flex_end")
) # get this to work
selection_box = widgets.VBox([tickers_widget, target_widget])
date_box = widgets.VBox([start_widget, end_widget, interval_widget])
output_box = widgets.VBox([naive_widget, forecast_only_widget, run_widget])
feat_box = widgets.VBox([column_widget, feat_widget, add_widget])
controls = widgets.HBox(
[selection_box, model_widget, past_covs_widget, date_box, output_box, feat_box],
)
if has_forecast:
chart = Chart()
widgets.interactive_output(
chart.handle_changes,
{
"past_covariates": past_covs_widget,
"start": start_widget,
"end": end_widget,
"interval": interval_widget,
"tickers": tickers_widget,
"target_column": target_widget,
"model": model_widget,
"naive": naive_widget,
"forecast_only": forecast_only_widget,
},
)
widgets.interactive_output(
chart.handle_eng, {"target": column_widget, "feature": feat_widget}
)
widgets.interactive_output(chart.handle_click2, {"to_run": add_widget})
stocks_view = widgets.interactive_output(chart.handle_click, {"to_run": run_widget})
title_html = "<h1>Timeseries Forecasting Dashboard</h1>"
warning_html = '<p style="color:red"=>Use a comma after EVERY stock typed.</p>'
app_contents = [
widgets.HTML(title_html),
controls,
widgets.HTML(warning_html),
stocks_view,
]
app = widgets.VBox(app_contents)
else:
title_html = "<h1>Timeseries Forecasting Dashboard</h1>"
warning_html = (
'<p style="color:red"=>The forecasting dependencies are not installed.</p>'
)
app = widgets.VBox([widgets.HTML(title_html), widgets.HTML(warning_html)])
display(app)
```
| /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/dashboards/voila/forecast.ipynb | 0.656768 | 0.350408 | forecast.ipynb | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
)
from openbb_terminal.mutual_funds import investpy_model
from openbb_terminal.rich_config import console
from openbb_terminal.config_terminal import theme
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_search(
by: str = "name",
value: str = "",
country: str = "united states",
limit: int = 10,
sortby: str = "",
ascend: bool = False,
):
"""Display results of searching for Mutual Funds
Parameters
----------
by : str
Field to match on. Can be name, issuer, isin or symbol
value : str
String that will be searched for
country: str
Country to filter on
limit: int
Number to show
sortby: str
Column to sort by
ascend: bool
Flag to sort in ascending order
"""
searches = investpy_model.search_funds(by, value)
if searches.empty:
console.print("No matches found.\n")
return
if country:
searches = searches[searches.country == country]
if searches.empty:
console.print(f"No matches found in {country}.\n")
return
searches = searches.drop(columns=["country", "underlying"])
if sortby:
searches = searches.sort_values(by=sortby, ascending=ascend)
print_rich_table(
searches.head(limit),
show_index=False,
title=f"[bold]Mutual Funds with {by} matching {value}[/bold]",
)
@log_start_end(log=logger)
def display_overview(country: str = "united states", limit: int = 10, export: str = ""):
"""Displays an overview of the main funds from a country.
Parameters
----------
country: str
Country to get overview for
limit: int
Number to show
export : str
Format to export data
"""
overview = investpy_model.get_overview(country=country, limit=limit)
if overview.empty:
return
overview["Assets (1B)"] = overview.total_assets / 1_000_000_000
overview = overview.drop(columns=["country", "total_assets"])
print_rich_table(
overview, title=f"[bold]Fund overview for {country.title()}[/bold]"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
f"overview_{country.replace(' ','_')}",
overview,
)
@log_start_end(log=logger)
def display_fund_info(name: str, country: str = "united states"):
"""Display fund information. Finds name from symbol first if name is false
Parameters
----------
name: str
Fund name to get info for
country : str
Country of fund
"""
info = (
investpy_model.get_fund_info(name, country)
.reset_index(drop=False)
.applymap(lambda x: np.nan if not x else x)
.dropna()
)
if info.empty:
return
# redact inception date if it appears castable to a float
try:
float(info[0].loc[info["index"] == "Inception Date"].values[0])
info.loc[info["index"] == "Inception Date", 0] = "-"
except ValueError:
pass
print_rich_table(
info,
title=f"[bold]{name.title()} Information[/bold]",
show_index=False,
headers=["Info", "Value"],
)
@log_start_end(log=logger)
def display_historical(
data: pd.DataFrame,
name: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display historical fund price
Parameters
----------
data: pd.DataFrame
Dataframe containing historical data
name: str
Fund symbol or name
export: str
Format to export data
external_axes:Optional[List[plt.Axes]]:
External axes to plot on
"""
console.print()
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
if data.empty:
return
ax.plot(data.index, data.Close)
ax.set_xlim([data.index[0], data.index[-1]])
ax.set_xlabel("Date")
ax.set_ylabel("Close Price")
ax.set_title(f"{name.title()} Price History")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "historical", data) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/mutual_funds/investpy_view.py | 0.758063 | 0.336113 | investpy_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
import matplotlib.pyplot as plt
import pandas as pd
from openbb_terminal import feature_flags as obbff
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
)
from openbb_terminal.mutual_funds import yfinance_model
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_sector(name: str, min_pct_to_display: float = 5, export: str = ""):
"""Display sector weightings for fund
Parameters
----------
name: str
Fund symbol
min_pct_to_display: float
Minimum percentage to display sector
export: str
Type of format to export data
"""
sector_weights = yfinance_model.get_information(name)
if "sectorWeightings" not in sector_weights.keys():
console.print(
f"Sector Weights are not found for {name}. Either the symbol is incorrect or there "
"is an issue in pulling from yahoo.\n"
)
return
sector_weights = sector_weights["sectorWeightings"]
weights = {}
for weight in sector_weights:
weights.update(weight)
df_weight = pd.DataFrame.from_dict(weights, orient="index")
if df_weight.empty:
console.print("No sector data found.\n")
df_weight = df_weight.apply(lambda x: round(100 * x, 3))
df_weight.columns = ["Weight"]
df_weight.sort_values(by="Weight", inplace=True, ascending=False)
df_weight.index = [
"Real Estate" if x == "realestate" else x.replace("_", " ").title()
for x in df_weight.index
]
print_rich_table(
df_weight,
show_index=True,
index_name="Sector",
headers=["Weight (%)"],
title=f"[bold]{name.upper()} Sector Weightings[/bold] ",
)
main_holdings = df_weight[df_weight.Weight > min_pct_to_display].to_dict()[
df_weight.columns[0]
]
if len(main_holdings) < len(df_weight):
main_holdings["Others"] = 100 - sum(main_holdings.values())
legend, values = zip(*main_holdings.items())
leg = [f"{le}\n{round(v, 2)}%" for le, v in zip(legend, values)]
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax.pie(
values,
labels=leg,
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
startangle=90,
)
ax.set_title(f"Sector holdings of {name.upper()}")
fig.tight_layout()
if obbff.USE_ION:
plt.ion()
plt.show()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "sector", df_weight)
@log_start_end(log=logger)
def display_equity(name: str):
"""Display equity holdings for fund
Parameters
----------
name: str
Fund symbol
"""
title_map = {
"priceToCashflow": "Price To Cash Flow",
"priceToSales": "Price To Sales",
"priceToBookCat": "Price To Book Cat",
"priceToEarningsCat": "Price To Earnings Cat",
"medianMarketCapCat": "Median Market Cap Cat",
"threeYearEarningsGrowthCat": "3Yr Earnings Growth Cat",
"threeYearEarningsGrowth": "3Y Earnings Growth",
"medianMarketCap": "Median Market Cap",
"priceToEarnings": "Price To Earnings",
"priceToBook": "Price To Book",
"priceToSalesCat": "Price To Sales Cat",
"priceToCashflowCat": "Price To Cashflow Cat",
}
equity_hold = yfinance_model.get_information(name)["equityHoldings"]
df_weight = pd.DataFrame.from_dict(equity_hold, orient="index")
df_weight = df_weight.apply(lambda x: round(100 * x, 3))
df_weight.index = df_weight.index.map(title_map)
print_rich_table(
df_weight,
show_index=True,
index_name="Equity",
headers=["Holding"],
title=f"[bold]{name.upper()} Equity Holdings[/bold] ",
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/mutual_funds/yfinance_view.py | 0.637821 | 0.368832 | yfinance_view.py | pypi |
__docformat__ = "numpy"
import argparse
import logging
import os
from datetime import datetime, timedelta
from typing import List
import investpy
import pandas as pd
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal import feature_flags as obbff
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_BOTH_RAW_DATA_AND_FIGURES,
EXPORT_ONLY_FIGURES_ALLOWED,
EXPORT_ONLY_RAW_DATA_ALLOWED,
check_non_negative_float,
check_positive,
valid_date,
)
from openbb_terminal.menu import session
from openbb_terminal.mutual_funds import (
investpy_model,
investpy_view,
yfinance_view,
avanza_view,
)
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.rich_config import console, MenuText
logger = logging.getLogger(__name__)
class FundController(BaseController):
"""Fund Controller class"""
CHOICES_COMMANDS = [
"resources",
"country",
"search",
"info",
"load",
"plot",
"sector",
"equity",
"alswe",
"infoswe",
"forecast",
]
fund_countries = investpy.funds.get_fund_countries()
search_by_choices = ["name", "issuer", "isin", "symbol"]
search_cols = [
"country",
"name",
"symbol",
"issuer",
"isin",
"asset_class",
"currency",
"underlying",
]
focus_choices = ["all", "country", "sector", "holding"]
PATH = "/funds/"
FILE_PATH = os.path.join(os.path.dirname(__file__), "README.md")
def __init__(self, queue: List[str] = None):
"""Constructor"""
super().__init__(queue)
self.country = "united states"
self.data = pd.DataFrame()
self.fund_name = ""
self.fund_symbol = ""
self.TRY_RELOAD = True
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = {c: {} for c in self.controller_choices}
one_to_hundred: dict = {str(c): {} for c in range(1, 100)}
choices["country"] = {c: {} for c in self.fund_countries}
choices["overview"] = {
"--limit": None,
"-l": "--limit",
}
choices["search"] = {
"--by": {c: {} for c in self.search_by_choices},
"-b": "--by",
"--fund": None,
"--sortby": {c: None for c in self.search_cols},
"-s": "--sortby",
"--limit": None,
"-l": "--limit",
"--reverse": {},
"-r": "--reverse",
}
choices["load"] = {
"--fund": None,
"--name": {},
"-n": "--name",
"--start": None,
"-s": "--start",
"--end": None,
"-e": "--end",
}
choices["sector"] = {
"--min": one_to_hundred,
"-m": "--min",
}
choices["alswe"] = {"--focus": {c: {} for c in self.focus_choices}}
choices["support"] = self.SUPPORT_CHOICES
choices["about"] = self.ABOUT_CHOICES
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
if self.fund_name:
if self.fund_symbol:
fund_string = f"{self.fund_name} ({self.fund_symbol})"
else:
fund_string = f"{self.fund_name}"
else:
fund_string = ""
mt = MenuText("funds/")
mt.add_cmd("country")
mt.add_raw("\n")
mt.add_param("_country", self.country.title())
mt.add_raw("\n")
mt.add_cmd("search")
mt.add_cmd("load")
mt.add_raw("\n")
mt.add_param("_fund", fund_string)
mt.add_raw("\n")
mt.add_cmd("info", self.fund_symbol)
mt.add_cmd("plot", self.fund_symbol)
if self.country == "united states":
mt.add_cmd("sector", self.fund_symbol)
mt.add_cmd("equity", self.fund_symbol)
if self.country == "sweden":
mt.add_cmd("alswe", self.fund_symbol)
mt.add_cmd("infoswe", self.fund_symbol)
mt.add_cmd("forecast", self.fund_symbol)
console.print(text=mt.menu_text, menu="Mutual Funds")
def custom_reset(self):
"""Class specific component of reset command"""
if self.fund_name:
return ["funds", f"load {self.fund_name} --name"]
return []
@log_start_end(log=logger)
def call_country(self, other_args: List[str]):
"""Process country command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="country",
description="Set a country for funds",
)
parser.add_argument(
"-n",
"--name",
type=str,
dest="name",
nargs="+",
help="country to select",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-n")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
country_candidate = " ".join(ns_parser.name)
if country_candidate.lower() in self.fund_countries:
self.country = " ".join(ns_parser.name)
else:
console.print(
f"{country_candidate.lower()} not a valid country to select."
)
return self.queue
@log_start_end(log=logger)
def call_search(self, other_args: List[str]):
"""Process country command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="search",
description="Search mutual funds in selected country based on selected field.",
)
parser.add_argument(
"-b",
"--by",
choices=self.search_by_choices,
default="name",
dest="by",
help="Field to search by",
)
parser.add_argument(
"--fund",
help="Fund string to search for",
dest="fund",
type=str,
nargs="+",
required="-h" not in other_args,
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
choices=self.search_cols,
help="Column to sort by",
default="name",
)
parser.add_argument(
"-l",
"--limit",
help="Number of search results to show",
type=check_positive,
dest="limit",
default=10,
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "--fund")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
search_string = " ".join(ns_parser.fund)
investpy_view.display_search(
by=ns_parser.by,
value=search_string,
country=self.country,
limit=ns_parser.limit,
sortby=ns_parser.sortby,
ascend=ns_parser.reverse,
)
return self.queue
@log_start_end(log=logger)
def call_overview(self, other_args: List[str]):
"""Process country command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="overview",
description="Show overview of funds from selected country.",
)
parser.add_argument(
"-l",
"--limit",
help="Number of search results to show",
type=check_positive,
dest="limit",
default=10,
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
investpy_view.display_overview(
self.country, limit=ns_parser.limit, export=ns_parser.export
)
return self.queue
@log_start_end(log=logger)
def call_info(self, other_args: List[str]):
"""Process country command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="info",
description="Get fund information.",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if not self.fund_name:
console.print(
"No fund loaded. Please use `load` first to plot.\n", style="bold"
)
return self.queue
investpy_view.display_fund_info(self.fund_name, country=self.country)
return self.queue
@log_start_end(log=logger)
def call_load(self, other_args: List[str]):
"""Process country command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="load",
description="Get historical data.",
)
parser.add_argument(
"--fund",
help="Fund string to search for",
dest="fund",
type=str,
nargs="+",
required="-h" not in other_args,
)
parser.add_argument(
"-n",
"--name",
action="store_true",
default=False,
dest="name",
help="Flag to indicate name provided instead of symbol.",
)
# Keeping the date format constant for investpy even though it needs to be reformatted in model
parser.add_argument(
"-s",
"--start",
type=valid_date,
default=(datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d"),
dest="start",
help="The starting date (format YYYY-MM-DD) of the fund",
)
parser.add_argument(
"-e",
"--end",
type=valid_date,
default=datetime.now().strftime("%Y-%m-%d"),
dest="end",
help="The ending date (format YYYY-MM-DD) of the fund",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "--fund")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
parsed_fund = " ".join(ns_parser.fund)
(
self.data,
self.fund_name,
self.fund_symbol,
self.country,
) = investpy_model.get_fund_historical(
name=parsed_fund,
by_name=ns_parser.name,
country=self.country,
start_date=ns_parser.start,
end_date=ns_parser.end,
)
if self.data.empty:
console.print(
"""No data found.
Potential errors
-- Incorrect country specified
-- ISIN supplied instead of symbol
-- Name used, but --name flag not passed"""
)
return self.queue
@log_start_end(log=logger)
def call_plot(self, other_args: List[str]):
"""Process country command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="plot",
description="Plot historical data.",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_FIGURES_ALLOWED
)
if ns_parser:
if not self.fund_symbol:
console.print(
"No fund loaded. Please use `load` first to plot.\n", style="bold"
)
return self.queue
investpy_view.display_historical(
self.data, name=self.fund_name, export=ns_parser.export
)
return self.queue
@log_start_end(log=logger)
def call_sector(self, other_args: List[str]):
"""Process sector command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="sector",
description="Show fund sector weighting.",
)
parser.add_argument(
"-m",
"--min",
type=check_non_negative_float,
dest="min",
help="Minimum positive float to display sector",
default=5,
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
if self.country != "united states":
console.print(
"YFinance implementation currently only supports funds from united states"
)
return self.queue
if not self.fund_symbol or not self.fund_name:
console.print(
"No fund loaded. Please use `load` first to plot.\n", style="bold"
)
return self.queue
yfinance_view.display_sector(
self.fund_symbol,
min_pct_to_display=ns_parser.min,
export=ns_parser.export,
)
return self.queue
@log_start_end(log=logger)
def call_equity(self, other_args: List[str]):
"""Process equity command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="equity",
description="Show fund equity holdings.",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if self.country != "united states":
console.print(
"YFinance implementation currently only supports funds from united states"
)
return self.queue
if not self.fund_symbol or not self.fund_name:
console.print(
"No fund loaded. Please use `load` first to plot.\n", style="bold"
)
return self.queue
yfinance_view.display_equity(self.fund_symbol)
return self.queue
@log_start_end(log=logger)
def call_alswe(self, other_args: List[str]):
"""Process alswe command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="alswe",
description="Show allocation of a swedish fund.",
)
parser.add_argument(
"--focus",
dest="focus",
type=str,
choices=self.focus_choices,
default="all",
help="The focus of the funds exposure/allocation",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
ava_fund = pd.read_csv(
os.path.join("openbb_terminal", "mutual_funds", "avanza_fund_ID.csv"),
index_col=0,
)
if self.country != "sweden":
console.print(
"Avanza implementation currently only supports funds from sweden."
)
return self.queue
if self.fund_name == "":
if self.fund_symbol != "":
self.fund_symbol = investpy_model.get_fund_name_from_symbol(
self.fund_symbol
)
else:
console.print(
"No fund loaded. Please use `load` first.\n", style="bold"
)
return self.queue
if self.fund_name.upper() not in ava_fund.index.str.upper().to_numpy():
console.print("No fund data. Please use another fund", style="bold")
return self.queue
avanza_view.display_allocation(self.fund_name, ns_parser.focus)
return self.queue
@log_start_end(log=logger)
def call_infoswe(self, other_args: List[str]):
"""Process infoswe command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="infoswe",
description="Show fund info of a swedish fund.",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
ava_fund = pd.read_csv(
os.path.join("openbb_terminal", "mutual_funds", "avanza_fund_ID.csv"),
index_col=0,
)
if self.country != "sweden":
console.print(
"Avanza implementation currently only supports funds from sweden."
)
return self.queue
if self.fund_name == "":
if self.fund_symbol != "":
self.fund_symbol = investpy_model.get_fund_name_from_symbol(
self.fund_symbol
)
else:
console.print(
"No fund loaded. Please use `load` first.\n", style="bold"
)
return self.queue
if self.fund_name.upper() not in ava_fund.index.str.upper().to_numpy():
console.print("No fund data. Please use another fund", style="bold")
return self.queue
avanza_view.display_info(self.fund_name)
return self.queue
@log_start_end(log=logger)
def call_forecast(self, _):
"""Process forecast command"""
# pylint: disable=import-outside-toplevel
from openbb_terminal.forecast import forecast_controller
self.queue = self.load_class(
forecast_controller.ForecastController,
self.fund_name,
self.data,
self.queue,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/mutual_funds/mutual_fund_controller.py | 0.519765 | 0.159708 | mutual_fund_controller.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.helper_funcs import print_rich_table
from openbb_terminal.mutual_funds import avanza_model
logger = logging.getLogger(__name__)
sector_dict = {
"Industri": "Industry",
"Konsument, cyklisk": "Consumer goods, cyclical",
"Finans": "Finance",
"Konsument, stabil": "Consumer goods, stable",
"Sjukvård": "Health Care",
"Teknik": "Technology",
"Fastigheter": "Real Estate",
"Råvaror": "Commodities",
"Kommunikation": "Telecommunication",
"Allmännyttigt": "Utilities",
"Energi": "Energy",
}
@log_start_end(log=logger)
def display_allocation(name: str, focus: str):
"""Displays the allocation of the selected swedish fund
Parameters
----------
name: str
Full name of the fund
focus: str
The focus of the displayed allocation/exposure of the fund
"""
# Taken from: https://github.com/northern-64bit/Portfolio-Report-Generator/tree/main
fund_data = avanza_model.get_data(name.upper())
if focus in ["holding", "all"]:
table_row = []
console.print("")
for data in fund_data["holdingChartData"]:
table_row_temp = []
table_row_temp.append(data["name"])
table_row_temp.append(str(data["y"]))
table_row_temp.append(data["countryCode"])
table_row.append(table_row_temp)
header = ["Holding", "Allocation in %", "Country"]
holding_data = pd.DataFrame(table_row, columns=header)
print_rich_table(holding_data, title=f"{name}'s Holdings", headers=header)
if focus in ["sector", "all"]:
table_row = []
console.print("")
for data in fund_data["sectorChartData"]:
table_row_temp = []
table_row_temp.append(sector_dict[data["name"]])
table_row_temp.append(str(data["y"]))
table_row.append(table_row_temp)
header = ["Sector", "Allocation in %"]
sector_data = pd.DataFrame(table_row, columns=header)
print_rich_table(
sector_data, title=f"{name}'s Sector Weighting", headers=header
)
if focus in ["country", "all"]:
table_row = []
console.print("")
for data in fund_data["countryChartData"]:
table_row_temp = []
table_row_temp.append(data["countryCode"])
table_row_temp.append(str(data["y"]))
table_row.append(table_row_temp)
header = ["Country", "Allocation in %"]
country_data = pd.DataFrame(table_row, columns=header)
print_rich_table(
country_data, title=f"{name}'s Country Weighting", headers=header
)
@log_start_end(log=logger)
def display_info(name: str):
"""Displays info of swedish funds
Parameters
----------
name: str
Full name of the fund
"""
fund_data = avanza_model.get_data(name.upper())
text = f"\nSwedish Description:\n\n{fund_data['description']}\n\nThe fund is managed by:\n"
for manager in fund_data["fundManagers"]:
text = text + f"\t- {manager['name']} since {manager['startDate']}\n"
text = (
text
+ f"from {fund_data['adminCompany']['name']}.\nFund currency is {fund_data['currency']}"
f" and it the fund started {fund_data['startDate']}."
)
if fund_data["indexFund"]:
text = text + " It is a index fund."
else:
text = text + " It is not a index fund."
text = (
text
+ f" The fund manages {str(fund_data['capital'])} {fund_data['currency']}. The "
f"standard deviation of the fund is {str(fund_data['standardDeviation'])} and the sharpe "
f"ratio is {str(fund_data['sharpeRatio'])}.\n"
)
console.print(text) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/mutual_funds/avanza_view.py | 0.495117 | 0.174164 | avanza_view.py | pypi |
__docformat__ = "numpy"
import logging
from datetime import datetime, timedelta
from typing import Tuple
import investpy
import investiny
import pandas as pd
from requests.exceptions import RequestException
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def search_funds(by: str = "name", value: str = "") -> pd.DataFrame:
"""Search investpy for matching funds
Parameters
----------
by : str
Field to match on. Can be name, issuer, isin or symbol
value : str
String that will be searched for
Returns
-------
pd.DataFrame
Dataframe containing matches
"""
try:
return investpy.funds.search_funds(by=by, value=value)
except RuntimeError as e:
logger.exception(str(e))
return pd.DataFrame()
except RequestException:
console.print("[red]The request to the website failed[/red]\n")
return pd.DataFrame()
except ConnectionError:
console.print("[red]The request to the website failed[/red]\n")
return pd.DataFrame()
@log_start_end(log=logger)
def get_overview(country: str = "united states", limit: int = 20) -> pd.DataFrame:
"""
Parameters
----------
country: str
Country to get overview for
limit: int
Number of results to get
Returns
-------
pd.DataFrame
Dataframe containing overview
"""
try:
return investpy.funds.get_funds_overview(
country=country, as_json=False, n_results=limit
)
except RequestException:
console.print("[red]The request to the website failed[/red]\n")
return pd.DataFrame()
except ConnectionError:
console.print("[red]The request to the website failed[/red]\n")
return pd.DataFrame()
@log_start_end(log=logger)
def get_fund_symbol_from_name(name: str) -> Tuple[str, str]:
"""Get fund symbol from name through investpy
Parameters
----------
Name: str
Name to get fund symbol of
Returns
-------
Tuple[str, str]
Name of Symbol matching provided name, Country in which matching symbol was found
"""
try:
name_search_results = investpy.search_funds(by="name", value=name)
except RequestException:
console.print("[red]The request to the website failed[/red]\n")
return "", ""
except ConnectionError:
console.print("[red]The request to the website failed[/red]\n")
return "", ""
if name_search_results.empty:
return "", ""
symbol = name_search_results.loc[:, "symbol"][0]
country = name_search_results.country.values[0]
console.print(
f"Name: [cyan][italic]{symbol.upper()}[/italic][/cyan] found for {name} in country: {country.title()}."
)
return symbol, country
@log_start_end(log=logger)
def get_fund_name_from_symbol(symbol: str) -> Tuple[str, str]:
"""Get fund name from symbol from investpy
Parameters
----------
symbol: str
Symbol to get fund name of
Returns
-------
Tuple[str, str]
Name of fund matching provided symbol, Country matching symbol
"""
try:
symbol_search_results = investpy.search_funds(by="symbol", value=symbol)
except RequestException:
console.print("[red]The request to the website failed[/red]\n")
return "", ""
except ConnectionError:
console.print("[red]The request to the website failed[/red]\n")
return "", ""
if symbol_search_results.empty:
return "", ""
name = symbol_search_results.loc[:, "name"][0]
country = symbol_search_results.loc[:, "country"][0]
console.print(
f"Name: [cyan][italic]{name.title()}[/italic][/cyan] found for {symbol} in country: {country.title()}."
)
return name, country
@log_start_end(log=logger)
def get_fund_info(name: str, country: str = "united states") -> pd.DataFrame:
"""
Parameters
----------
name: str
Name of fund (not symbol) to get information
country: str
Country of fund
Returns
-------
pd.DataFrame
Dataframe of fund information
"""
try:
return investpy.funds.get_fund_information(name, country).T
except RequestException:
console.print("[red]The request to the website failed[/red]\n")
return pd.DataFrame()
except ConnectionError:
console.print("[red]The request to the website failed[/red]\n")
return pd.DataFrame()
@log_start_end(log=logger)
def get_fund_historical(
name: str,
country: str = "united states",
by_name: bool = False,
start_date: datetime = (datetime.now() - timedelta(days=366)),
end_date: datetime = datetime.now(),
) -> Tuple[pd.DataFrame, str, str, str]:
"""Get historical fund data
Parameters
----------
name: str
Fund to get data for. If using fund name, include `name=True`
country: str
Country of fund
by_name : bool
Flag to search by name instead of symbol
start_date: datetime
Start date of data in format YYYY-MM-DD
end_date: datetime
End date of data in format YYYY-MM-DD
Returns
-------
Tuple[pd.DataFrame, str, str, str]
Dataframe of OHLC prices, Fund name, Fund symbol, Country that matches search results
"""
try:
if by_name:
fund_name = name
fund_symbol, matching_country = get_fund_symbol_from_name(name)
else:
fund_symbol = name
fund_name, matching_country = get_fund_name_from_symbol(name)
except RuntimeError as e:
logger.exception(str(e))
return pd.DataFrame(), name, "", country
except RequestException:
console.print("[red]The request to the website failed[/red]\n")
return pd.DataFrame(), "", "", ""
except ConnectionError:
console.print("[red]The request to the website failed[/red]\n")
return pd.DataFrame(), "", "", ""
# Note that dates for investpy need to be in the format mm/dd/yyyy
from_date = start_date.strftime("%m/%d/%Y")
to_date = end_date.strftime("%m/%d/%Y")
search_country = matching_country if matching_country else country
try:
# Using investiny to get around the 403 error with investment.com api
# We need to get an ID number
id_number = int(investiny.search_assets(name, limit=1)[0]["ticker"])
data = investiny.historical_data(
id_number, from_date=from_date, to_date=to_date
)
df = pd.DataFrame.from_dict(data).set_index("date")
df.columns = [col.title() for col in df.columns]
df.index = pd.to_datetime(df.index)
return (df, fund_name, fund_symbol, matching_country)
except RuntimeError as e:
console.print("[red]Error connecting to the data source.[/red]\n")
logger.exception(str(e))
return pd.DataFrame(), fund_name, fund_symbol, search_country
except RequestException as e:
console.print("[red]Error connecting to the data source.[/red]\n")
logger.exception(str(e))
return pd.DataFrame(), fund_name, fund_symbol, search_country
except ConnectionError as e:
console.print("[red]Error connecting to the data source.[/red]\n")
logger.exception(str(e))
return pd.DataFrame(), fund_name, fund_symbol, search_country | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/mutual_funds/investpy_model.py | 0.753467 | 0.235043 | investpy_model.py | pypi |
__docformat__ = "numpy"
# pylint: disable=eval-used
import logging
import warnings
from itertools import combinations
from typing import Any, Dict, Tuple, Optional, Union
import pandas as pd
import statsmodels.api as sm
from scipy import stats
from statsmodels.tsa.stattools import adfuller, grangercausalitytests, kpss
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_options(
datasets: Dict[str, pd.DataFrame], dataset_name: str = ""
) -> Dict[Union[str, Any], pd.DataFrame]:
"""Obtain columns-dataset combinations from loaded in datasets that can be used in other commands
Parameters
----------
datasets: dict
The available datasets.
dataset_name: str
The dataset you wish to show the options for.
Returns
-------
Dict[Union[str, Any], pd.DataFrame]
A dictionary with a DataFrame for each option. With dataset_name set, only shows one
options table.
"""
option_tables = {}
if dataset_name:
columns = datasets[dataset_name].columns
option_tables[dataset_name] = pd.DataFrame(
{
"column": columns,
"option": [f"{dataset_name}.{column}" for column in columns],
}
)
else:
for dataset, data_values in datasets.items():
columns = data_values.columns
option_tables[dataset] = pd.DataFrame(
{
"column": columns,
"option": [f"{dataset}.{column}" for column in columns],
}
)
return option_tables
@log_start_end(log=logger)
def clean(
dataset: pd.DataFrame,
fill: str = "",
drop: str = "",
limit: Optional[int] = None,
) -> pd.DataFrame:
"""Clean up NaNs from the dataset
Parameters
----------
dataset : pd.DataFrame
The dataset you wish to clean
fill : str
The method of filling NaNs. Choose from:
rfill, cfill, rbfill, cbfill, rffill, cffill
drop : str
The method of dropping NaNs. Choose from:
rdrop, cdrop
limit : int
The maximum limit you wish to apply that can be forward or backward filled
Returns
-------
pd.DataFrame
Dataframe with cleaned up data
"""
fill_dict = {
"rfill": lambda x: x.fillna(axis="index", value=0),
"cfill": lambda x: x.fillna(axis="columns", value=0),
"rbfill": lambda x: x.fillna(axis="index", method="bfill", limit=limit),
"cbfill": lambda x: x.fillna(axis="columns", method="bfill", limit=limit),
"rffill": lambda x: x.fillna(axis="index", method="ffill", limit=limit),
"cffill": lambda x: x.fillna(axis="columns", method="ffill", limit=limit),
}
if fill and fill in fill_dict:
dataset = fill_dict[fill](dataset)
if drop:
if drop == "rdrop":
dataset = dataset.dropna(how="any", axis="index")
elif drop == "cdrop":
dataset = dataset.dropna(how="any", axis="columns")
return dataset
@log_start_end(log=logger)
def get_normality(data: pd.Series) -> pd.DataFrame:
"""
The distribution of returns and generate statistics on the relation to the normal curve.
This function calculates skew and kurtosis (the third and fourth moments) and performs both
a Jarque-Bera and Shapiro Wilk test to determine if data is normally distributed.
Parameters
----------
data : pd.Series
A series or column of a DataFrame to test normality for
Returns
-------
pd.DataFrame
Dataframe containing statistics of normality
"""
# Kurtosis: measures height and sharpness of the central peak relative to standard bell curve
k, kpval = stats.kurtosistest(data)
kpval = round(kpval, 5)
# Skewness: measure of the asymmetry of the probability distribution of a random variable about its mean
s, spval = stats.skewtest(data)
spval = round(spval, 5)
# Jarque-Bera goodness of fit test on sample data: tests if the sample data has the skewness and kurtosis
# matching a normal distribution
jb, jbpval = stats.jarque_bera(data)
jbpval = round(jbpval, 5)
# The Shapiro-Wilk test: tests the null hypothesis that the data was drawn from a normal distribution.
sh, shpval = stats.shapiro(data)
shpval = round(shpval, 5)
# Kolmogorov-Smirnov: the one-sample test compares the underlying distribution F(x) of a sample
# against a given distribution G(x). Comparing to normal here.
ks, kspval = stats.kstest(data, "norm")
kspval = round(kspval, 5)
l_statistic = [k, s, jb, sh, ks]
l_pvalue = [kpval, spval, jbpval, shpval, kspval]
return pd.DataFrame(
[l_statistic, l_pvalue],
columns=[
"Kurtosis",
"Skewness",
"Jarque-Bera",
"Shapiro-Wilk",
"Kolmogorov-Smirnov",
],
index=["Statistic", "p-value"],
)
@log_start_end(log=logger)
def get_root(
data: pd.Series, fuller_reg: str = "c", kpss_reg: str = "c"
) -> pd.DataFrame:
"""Calculate test statistics for unit roots
Parameters
----------
data : pd.Series
Series or column of DataFrame of target variable
fuller_reg : str
Type of regression of ADF test
kpss_reg : str
Type of regression for KPSS test
Returns
-------
pd.DataFrame
Dataframe with results of ADF test and KPSS test
"""
# The Augmented Dickey-Fuller test: used to test for a unit root in a univariate process in the
# presence of serial correlation.
result = adfuller(data, regression=fuller_reg)
cols = ["Test Statistic", "P-Value", "NLags", "Nobs", "ICBest"]
vals = [result[0], result[1], result[2], result[3], result[5]]
data = pd.DataFrame(data=vals, index=cols, columns=["ADF"])
# Kwiatkowski-Phillips-Schmidt-Shin test: test for level or trend stationarity
# Note: this test seems to produce an Interpolation Error which says
# The test statistic is outside the range of p-values available in the
# look-up table. The actual p-value is greater than the p-value returned.
# Wrap this in catch_warnings to prevent this
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res2 = kpss(data, regression=kpss_reg, nlags="auto")
vals2 = [res2[0], res2[1], res2[2], 0, 0]
data["KPSS"] = vals2
return data
@log_start_end(log=logger)
def get_granger_causality(
dependent_series: pd.Series, independent_series: pd.Series, lags: int = 3
) -> dict:
"""Calculate granger tests
Parameters
----------
dependent_series: Series
The series you want to test Granger Causality for.
independent_series: Series
The series that you want to test whether it Granger-causes time_series_y
lags : int
The amount of lags for the Granger test. By default, this is set to 3.
Returns
-------
dict
Dictionary containing results of Granger test
"""
granger_set = pd.concat([dependent_series, independent_series], axis=1)
granger = grangercausalitytests(granger_set, [lags], verbose=False)
for test in granger[lags][0]:
# As ssr_chi2test and lrtest have one less value in the tuple, we fill
# this value with a '-' to allow the conversion to a DataFrame
if len(granger[lags][0][test]) != 4:
pars = granger[lags][0][test]
granger[lags][0][test] = (pars[0], pars[1], "-", pars[2])
granger_df = pd.DataFrame(
granger[lags][0], index=["F-test", "P-value", "Count", "Lags"]
).T
return granger_df
# TODO: Maybe make a new function to return z instead of having this flag.
# TODO: Allow for numpy arrays as well
def get_coint_df(
*datasets: pd.Series, return_z: bool = False
) -> Union[pd.DataFrame, Dict]:
"""Calculate cointegration tests between variable number of input series
Parameters
----------
datasets : pd.Series
Input series to test cointegration for
return_z : bool
Flag to return the z data to plot
Returns
-------
Union[pd.DataFrame,Dict]
Dataframe with results of cointegration tests or a Dict of the z results
"""
result: Dict[str, list] = {}
z_values: Dict[str, pd.Series] = {}
# The *datasets lets us pass in a variable number of arguments
# Here we are getting all possible combinations of unique inputs
pairs = list(combinations(datasets, 2))
for x, y in pairs:
if sum(y.isnull()) > 0:
console.print(
f"The Series {y} has nan-values. Please consider dropping or filling these "
f"values with 'clean'."
)
elif sum(x.isnull()) > 0:
console.print(
f"The Series {x.name} has nan-values. Please consider dropping or filling these "
f"values with 'clean'."
)
elif not y.index.equals(x.index):
console.print(
f"The Series {y.name} and {x.name} do not have the same index."
)
(
c,
gamma,
alpha,
z,
adfstat,
pvalue,
) = get_engle_granger_two_step_cointegration_test(x, y)
result[f"{x.name}/{y.name}"] = [c, gamma, alpha, adfstat, pvalue]
z_values[f"{x.name}/{y.name}"] = z
if result and z_values:
if return_z:
return z_values
df = pd.DataFrame.from_dict(
result,
orient="index",
columns=["Constant", "Gamma", "Alpha", "Dickey-Fuller", "P Value"],
)
return df
return pd.DataFrame()
def get_engle_granger_two_step_cointegration_test(
dependent_series: pd.Series, independent_series: pd.Series
) -> Tuple[float, float, float, pd.Series, float, float]:
"""Estimates long-run and short-run cointegration relationship for series y and x and apply
the two-step Engle & Granger test for cointegration.
Uses a 2-step process to first estimate coefficients for the long-run relationship
y_t = c + gamma * x_t + z_t
and then the short-term relationship,
y_t - y_(t-1) = alpha * z_(t-1) + epsilon_t,
with z the found residuals of the first equation.
Then tests cointegration by Dickey-Fuller phi=1 vs phi < 1 in
z_t = phi * z_(t-1) + eta_t
If this implies phi < 1, the z series is stationary is concluded to be
stationary, and thus the series y and x are concluded to be cointegrated.
Parameters
----------
dependent_series : pd.Series
The first time series of the pair to analyse.
independent_series : pd.Series
The second time series of the pair to analyse.
Returns
-------
Tuple[float, float, float, pd.Series, float, float]
c : float
The constant term in the long-run relationship y_t = c + gamma * x_t + z_t. This
describes the static shift of y with respect to gamma * x.
gamma : float
The gamma term in the long-run relationship y_t = c + gamma * x_t + z_t. This
describes the ratio between the const-shifted y and x.
alpha : float
The alpha term in the short-run relationship y_t - y_(t-1) = alpha * z_(t-1) + epsilon. This
gives an indication of the strength of the error correction toward the long-run mean.
z : pd.Series
Series of residuals z_t from the long-run relationship y_t = c + gamma * x_t + z_t, representing
the value of the error correction term.
dfstat : float
The Dickey Fuller test-statistic for phi = 1 vs phi < 1 in the second equation. A more
negative value implies the existence of stronger cointegration.
pvalue : float
The p-value corresponding to the Dickey Fuller test-statistic. A lower value implies
stronger rejection of no-cointegration, thus stronger evidence of cointegration.
"""
warnings.simplefilter(action="ignore", category=FutureWarning)
long_run_ols = sm.OLS(dependent_series, sm.add_constant(independent_series))
warnings.simplefilter(action="default", category=FutureWarning)
long_run_ols_fit = long_run_ols.fit()
c, gamma = long_run_ols_fit.params
z = long_run_ols_fit.resid
short_run_ols = sm.OLS(dependent_series.diff().iloc[1:], (z.shift().iloc[1:]))
short_run_ols_fit = short_run_ols.fit()
alpha = short_run_ols_fit.params[0]
# NOTE: The p-value returned by the adfuller function assumes we do not estimate z
# first, but test stationarity of an unestimated series directly. This assumption
# should have limited effect for high N, however. Critical values taking this into
# account more accurately are provided in e.g. McKinnon (1990) and Engle & Yoo (1987).
adfstat, pvalue, _, _, _ = adfuller(z, maxlag=1, autolag=None)
return c, gamma, alpha, z, adfstat, pvalue | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/econometrics/econometrics_model.py | 0.927683 | 0.447219 | econometrics_model.py | pypi |
__docformat__ = "numpy"
# pylint: disable=too-many-lines, too-many-branches, inconsistent-return-statements
import argparse
import logging
import os
from itertools import chain
from pathlib import Path
from typing import Any, Dict, List
import numpy as np
import pandas as pd
from openbb_terminal import feature_flags as obbff
from openbb_terminal.common import common_model
from openbb_terminal.core.config.paths import (
USER_CUSTOM_IMPORTS_DIRECTORY,
USER_EXPORTS_DIRECTORY,
)
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal.decorators import log_start_end
from openbb_terminal.econometrics import (
econometrics_model,
econometrics_view,
regression_model,
regression_view,
)
from openbb_terminal.helper_funcs import (
EXPORT_BOTH_RAW_DATA_AND_FIGURES,
EXPORT_ONLY_FIGURES_ALLOWED,
EXPORT_ONLY_RAW_DATA_ALLOWED,
NO_EXPORT,
check_list_values,
check_positive,
check_positive_float,
export_data,
print_rich_table,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.rich_config import MenuText, console
logger = logging.getLogger(__name__)
# pylint: disable=R0902,C0302
class EconometricsController(BaseController):
"""Econometrics class"""
CHOICES_COMMANDS: List[str] = [
"load",
"export",
"remove",
"plot",
"show",
"type",
"desc",
"index",
"clean",
"add",
"delete",
"combine",
"rename",
"ols",
"norm",
"root",
"panel",
"compare",
"dwat",
"bgod",
"bpag",
"granger",
"coint",
]
CHOICES_MENUS: List[str] = [
"qa",
]
pandas_plot_choices = [
"line",
"scatter",
"bar",
"barh",
"hist",
"box",
"kde",
"area",
"pie",
"hexbin",
]
PANEL_CHOICES = [
"pols",
"re",
"bols",
"fe",
"fdols",
"POLS",
"RE",
"BOLS",
"FE",
"FDOLS",
]
PATH = "/econometrics/"
loaded_dataset_cols = "\n"
list_dataset_cols: List = list()
def __init__(self, queue: List[str] = None):
"""Constructor"""
super().__init__(queue)
self.files: List[str] = list()
self.datasets: Dict[str, pd.DataFrame] = dict()
self.regression: Dict[Any[Dict, Any], Any] = dict()
self.DATA_TYPES: List[str] = ["int", "float", "str", "bool", "category", "date"]
for regression in [
"OLS",
"POLS",
"BOLS",
"RE",
"FE",
"FE_EE",
"FE_IE",
"FE_EE_IE",
"FDOLS",
]:
self.regression[regression] = {
"data": {},
"independent": {},
"dependent": {},
"model": {},
}
self.signs: Dict[Any, Any] = {
"div": "/",
"mul": "*",
"add": "+",
"sub": "-",
"mod": "%",
"pow": "**",
}
self.DATA_FILES = {
filepath.name: filepath
for file_type in common_model.file_types
for filepath in chain(
Path(USER_EXPORTS_DIRECTORY).rglob(f"*.{file_type}"),
Path(USER_CUSTOM_IMPORTS_DIRECTORY / "econometrics").rglob(
f"*.{file_type}"
),
)
if filepath.is_file()
}
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = {c: {} for c in self.controller_choices}
choices["load"] = {
"--file": {c: {} for c in self.DATA_FILES.keys()},
"-f": "--file",
"-alias": None,
"-a": "-alias",
"--examples": None,
"-e": "--examples",
}
for feature in ["export", "show", "desc", "clear", "index"]:
choices[feature] = {c: {} for c in self.files}
for feature in [
"type",
"plot",
"norm",
"root",
"granger",
"coint",
]:
choices[feature] = dict()
# Inititialzie this for regressions to be able to use -h flag
choices["regressions"] = {}
self.choices = choices
choices["support"] = self.SUPPORT_CHOICES
choices["about"] = self.ABOUT_CHOICES
choices["panel"]["-r"] = {c: {} for c in self.PANEL_CHOICES}
self.completer = NestedCompleter.from_nested_dict(choices)
def update_runtime_choices(self):
if session and obbff.USE_PROMPT_TOOLKIT:
dataset_columns = {
f"{dataset}.{column}": {}
for dataset, dataframe in self.datasets.items()
for column in dataframe.columns
}
for feature in [
"plot",
"norm",
"root",
"coint",
"regressions",
"ols",
"panel",
"delete",
]:
self.choices[feature] = dataset_columns
for feature in [
"export",
"show",
"clean",
"index",
"remove",
"combine",
"rename",
]:
self.choices[feature] = {c: {} for c in self.files}
self.choices["type"] = {
c: {} for c in self.files + list(dataset_columns.keys())
}
self.choices["desc"] = {
c: {} for c in self.files + list(dataset_columns.keys())
}
pairs_timeseries = list()
for dataset_col in list(dataset_columns.keys()):
pairs_timeseries += [
f"{dataset_col},{dataset_col2}"
for dataset_col2 in list(dataset_columns.keys())
if dataset_col != dataset_col2
]
self.choices["granger"] = {c: {} for c in pairs_timeseries}
self.completer = NestedCompleter.from_nested_dict(self.choices)
def print_help(self):
"""Print help"""
mt = MenuText("econometrics/")
mt.add_param(
"_data_loc",
f"\n\t{str(USER_EXPORTS_DIRECTORY)}\n\t{str(USER_CUSTOM_IMPORTS_DIRECTORY/'econometrics')}",
)
mt.add_raw("\n")
mt.add_cmd("load")
mt.add_cmd("remove", self.files)
mt.add_raw("\n")
mt.add_param("_loaded", self.loaded_dataset_cols)
mt.add_info("_exploration_")
mt.add_cmd("show", self.files)
mt.add_cmd("plot", self.files)
mt.add_cmd("type", self.files)
mt.add_cmd("desc", self.files)
mt.add_cmd("index", self.files)
mt.add_cmd("clean", self.files)
mt.add_cmd("add", self.files)
mt.add_cmd("delete", self.files)
mt.add_cmd("combine", self.files)
mt.add_cmd("rename", self.files)
mt.add_cmd("export", self.files)
mt.add_info("_tests_")
mt.add_cmd("norm", self.files)
mt.add_cmd("root", self.files)
mt.add_cmd("granger", self.files)
mt.add_cmd("coint", self.files)
mt.add_info("_regression_")
mt.add_cmd("ols", self.files)
mt.add_cmd("panel", self.files)
mt.add_cmd("compare", self.files)
mt.add_info("_regression_tests_")
mt.add_cmd("dwat", self.files and self.regression["OLS"]["model"])
mt.add_cmd("bgod", self.files and self.regression["OLS"]["model"])
mt.add_cmd("bpag", self.files and self.regression["OLS"]["model"])
console.print(text=mt.menu_text, menu="Econometrics")
console.print()
def custom_reset(self):
"""Class specific component of reset command"""
return ["econometrics"]
def update_loaded(self):
self.list_dataset_cols = []
if not self.files:
self.loaded_dataset_cols = "\n"
self.list_dataset_cols.append("")
return
maxfile = max(len(file) for file in self.files)
self.loaded_dataset_cols = "\n"
for dataset, data in self.datasets.items():
max_files = (maxfile - len(dataset)) * " "
self.loaded_dataset_cols += (
f"\t{dataset} {max_files}: {', '.join(data.columns)}\n"
)
for col in data.columns:
self.list_dataset_cols.append(f"{dataset}.{col}")
@log_start_end(log=logger)
def call_load(self, other_args: List[str]):
"""Process load"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="load",
description="Load dataset (from previous export, custom imports or StatsModels).",
)
parser.add_argument(
"-f",
"--file",
help="File to load data in (can be custom import, "
"may have been exported before or can be from Statsmodels)",
type=str,
)
parser.add_argument(
"-a",
"--alias",
help="Alias name to give to the dataset",
type=str,
)
parser.add_argument(
"-e",
"--examples",
help="Use this argument to show examples of Statsmodels to load in. "
"See: https://www.statsmodels.org/devel/datasets/index.html",
action="store_true",
default=False,
dest="examples",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-f")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
# show examples from statsmodels
if ns_parser.examples:
df = pd.DataFrame.from_dict(common_model.DATA_EXAMPLES, orient="index")
print_rich_table(
df,
headers=list(["description"]),
show_index=True,
index_name="file name",
title="Examples from Statsmodels",
)
return
if not ns_parser.file:
return
possible_data = list(common_model.DATA_EXAMPLES.keys()) + list(
self.DATA_FILES.keys()
)
if ns_parser.file not in possible_data:
file = ""
# Try to see if the user is just missing the extension
for file_ext in list(self.DATA_FILES.keys()):
if file_ext.startswith(ns_parser.file):
# found the correct file
file = file_ext
break
if not file:
console.print(
"[red]The file/dataset selected does not exist.[/red]"
)
return
else:
file = ns_parser.file
if ns_parser.alias:
alias = ns_parser.alias
else:
if "." in ns_parser.file:
alias = ".".join(ns_parser.file.split(".")[:-1])
else:
alias = ns_parser.file
# check if this dataset has been added already
if alias in self.files:
console.print(
"[red]The file/dataset selected has already been loaded.[/red]"
)
return
data = common_model.load(file, self.DATA_FILES, common_model.DATA_EXAMPLES)
if not data.empty:
data.columns = data.columns.map(lambda x: x.lower().replace(" ", "_"))
self.files.append(alias)
self.datasets[alias] = data
self.update_runtime_choices()
self.update_loaded()
@log_start_end(log=logger)
def call_export(self, other_args: List[str]):
"""Process export command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="export",
description="Export dataset to Excel",
)
parser.add_argument(
"-n",
"--name",
dest="name",
help="The name of the dataset you wish to export",
type=str,
)
parser.add_argument(
"-t",
"--type",
help="The file type you wish to export to",
dest="type",
choices=common_model.file_types,
type=str,
default="xlsx",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-n")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=NO_EXPORT
)
if ns_parser:
if not ns_parser.name or ns_parser.name not in self.datasets:
console.print("Please enter a valid dataset.")
else:
export_data(
ns_parser.type,
os.path.dirname(os.path.abspath(__file__)),
ns_parser.name,
self.datasets[ns_parser.name],
)
console.print()
@log_start_end(log=logger)
def call_remove(self, other_args: List[str]):
"""Process clear"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="remove",
description="Remove a dataset from the loaded dataset list",
)
parser.add_argument(
"-n",
"--name",
help="The name of the dataset you want to remove",
dest="name",
type=str,
choices=list(self.datasets.keys()),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-n")
ns_parser = self.parse_known_args_and_warn(parser, other_args, NO_EXPORT)
if not ns_parser:
return
if not ns_parser.name:
console.print("Please enter a valid dataset.\n")
return
if ns_parser.name not in self.datasets:
console.print(f"[red]'{ns_parser.name}' is not a loaded dataset.[/red]\n")
return
del self.datasets[ns_parser.name]
self.files.remove(ns_parser.name)
self.update_runtime_choices()
self.update_loaded()
@log_start_end(log=logger)
def call_plot(self, other_args: List[str]):
"""Process plot command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="plot",
description="Plot data based on the index",
)
parser.add_argument(
"-v",
"--values",
help="Dataset.column values to be displayed in a plot",
dest="values",
type=check_list_values(self.choices["plot"]),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-v")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_FIGURES_ALLOWED
)
if ns_parser and ns_parser.values:
data: Dict = {}
for datasetcol in ns_parser.values:
dataset, col = datasetcol.split(".")
data[datasetcol] = self.datasets[dataset][col]
econometrics_view.display_plot(
data,
ns_parser.export,
)
@log_start_end(log=logger)
def call_show(self, other_args: List[str]):
"""Process show command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="show",
description="Show a portion of the DataFrame",
)
parser.add_argument(
"-n",
"--name",
type=str,
choices=self.files,
dest="name",
help="The name of the database you want to show data for",
)
parser.add_argument(
"-s",
"--sortby",
help="Sort based on a column in the DataFrame",
type=str,
dest="sortby",
default="",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-n")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED, limit=10
)
if ns_parser:
if not ns_parser.name:
dataset_names = list(self.datasets.keys())
else:
dataset_names = [ns_parser.name]
for name in dataset_names:
df = self.datasets[name]
if name in self.datasets and self.datasets[name].empty:
return console.print(
f"[red]No data available for {ns_parser.name}.[/red]\n"
)
if ns_parser.sortby:
sort_column = ns_parser.sortby
if sort_column not in self.datasets[name].columns:
console.print(
f"[red]{sort_column} not a valid column. Showing without sorting.\n[/red]"
)
else:
df = df.sort_values(by=sort_column, ascending=ns_parser.reverse)
print_rich_table(
df.head(ns_parser.limit),
headers=list(df.columns),
show_index=True,
title=f"Dataset {name} | Showing {ns_parser.limit} of {len(df)} rows",
)
export_data(
ns_parser.export,
os.path.dirname(os.path.abspath(__file__)),
f"{ns_parser.name}_show",
df.head(ns_parser.limit),
)
@log_start_end(log=logger)
def call_desc(self, other_args: List[str]):
"""Process desc command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="desc",
description="Show the descriptive statistics of the dataset",
)
parser.add_argument(
"-n",
"--name",
type=str,
choices=self.choices["desc"],
dest="name",
help="The name of the dataset.column you want to show the descriptive statistics",
required="-h" not in other_args,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-n")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if "." in ns_parser.name:
dataset, col = ns_parser.name.split(".")
df = self.datasets[dataset][col].describe()
print_rich_table(
df.to_frame(),
headers=[col],
show_index=True,
title=f"Statistics for dataset: '{dataset}'",
)
export_data(
ns_parser.export,
os.path.dirname(os.path.abspath(__file__)),
f"{dataset}_{col}_desc",
df,
)
else:
df = self.datasets[ns_parser.name]
if not df.empty:
df = df.describe()
print_rich_table(
df,
headers=self.datasets[ns_parser.name].columns,
show_index=True,
title=f"Statistics for dataset: '{ns_parser.name}'",
)
export_data(
ns_parser.export,
os.path.dirname(os.path.abspath(__file__)),
f"{ns_parser.name}_desc",
df,
)
else:
console.print("Empty dataset")
@log_start_end(log=logger)
def call_type(self, other_args: List[str]):
"""Process type"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="type",
description="Show the type of the columns of the dataset and/or change the type of the column",
)
parser.add_argument(
"-n",
"--name",
type=str,
dest="name",
help="Provide dataset.column series to change type or dataset to see types.",
choices=self.choices["type"],
)
parser.add_argument(
"--format",
type=str,
choices=self.DATA_TYPES,
dest="format",
help=(
"Set the format for the dataset.column defined. This can be: "
"date, int, float, str, bool or category"
),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-n")
ns_parser = self.parse_known_args_and_warn(parser, other_args, NO_EXPORT)
if ns_parser:
if ns_parser.name:
if "." in ns_parser.name:
dataset, column = ns_parser.name.split(".")
if ns_parser.format:
if ns_parser.format == "date":
self.datasets[dataset][column] = pd.to_datetime(
self.datasets[dataset][column].values,
)
else:
self.datasets[dataset][column] = self.datasets[dataset][
column
].astype(ns_parser.format)
console.print(
f"Update '{ns_parser.name}' with type '{ns_parser.format}'"
)
else:
console.print(
f"The type of '{ns_parser.name}' is '{self.datasets[dataset][column].dtypes}'"
)
else:
print_rich_table(
pd.DataFrame(self.datasets[ns_parser.name].dtypes),
headers=list(["dtype"]),
show_index=True,
index_name="column",
title=str(ns_parser.name),
)
else:
for dataset_name, data in self.datasets.items():
print_rich_table(
pd.DataFrame(data.dtypes),
headers=list(["dtype"]),
show_index=True,
index_name="column",
title=str(dataset_name),
)
console.print()
@log_start_end(log=logger)
def call_index(self, other_args: List[str]):
"""Process index"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="index",
description="Set a (multi) index for the dataset",
)
parser.add_argument(
"-n",
"--name",
type=str,
dest="name",
choices=list(self.datasets.keys()),
help="Name of dataset to select index from",
required="-h" not in other_args,
)
parser.add_argument(
"-i",
"--index",
type=str,
dest="index",
help="Columns from the dataset the user wishes to set as default",
default="",
)
parser.add_argument(
"-a",
"--adjustment",
help="Whether to allow for making adjustments to the dataset to align it with the use case for "
"Timeseries and Panel Data regressions",
dest="adjustment",
action="store_true",
default=False,
)
parser.add_argument(
"-d",
"--drop",
help="Whether to drop the column(s) the index is set for.",
dest="drop",
action="store_true",
default=False,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-n")
ns_parser = self.parse_known_args_and_warn(parser, other_args, NO_EXPORT)
if ns_parser:
name = ns_parser.name
index = ns_parser.index
if index:
if "," in index:
values_found = [val.strip() for val in index.split(",")]
else:
values_found = [index]
columns = list()
for value in values_found:
# check if the value is valid
if value in self.datasets[name].columns:
columns.append(value)
else:
console.print(f"[red]'{value}' is not valid.[/red]")
dataset = self.datasets[name]
if not pd.Index(np.arange(0, len(dataset))).equals(dataset.index):
console.print(
"As an index has been set, resetting the current index."
)
if dataset.index.name in dataset.columns:
dataset = dataset.reset_index(drop=True)
else:
dataset = dataset.reset_index(drop=False)
for column in columns:
if column not in dataset.columns:
console.print(
f"[red]The column '{column}' is not available in the dataset {name}."
f"Please choose one of the following: {', '.join(dataset.columns)}[/red]"
)
return
if ns_parser.adjustment:
if len(columns) > 1 and dataset[columns[0]].isnull().any():
null_values = dataset[dataset[columns[0]].isnull()]
console.print(
f"The column '{columns[0]}' contains {len(null_values)} NaN "
"values. As multiple columns are provided, it is assumed this "
"column represents entities (i), the NaN values are forward "
"filled. Remove the -a argument to disable this."
)
dataset[columns[0]] = dataset[columns[0]].fillna(method="ffill")
if dataset[columns[-1]].isnull().any():
# This checks whether NaT (missing values) exists within the DataFrame
null_values = dataset[dataset[columns[-1]].isnull()]
console.print(
f"The time index '{columns[-1]}' contains {len(null_values)} "
"NaNs which are removed from the dataset. Remove the -a argument to disable this."
)
dataset = dataset[dataset[columns[-1]].notnull()]
self.datasets[name] = dataset.set_index(columns, drop=ns_parser.drop)
console.print(
f"Successfully updated '{name}' index to be '{', '.join(columns)}'\n"
)
self.update_runtime_choices()
else:
print_rich_table(
self.datasets[name].head(3),
headers=list(self.datasets[name].columns),
show_index=True,
title=f"Dataset '{name}'",
)
@log_start_end(log=logger)
def call_clean(self, other_args: List[str]):
"""Process clean"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="clean",
description="Clean a dataset by filling and dropping NaN values.",
)
parser.add_argument(
"-n",
"--name",
help="The name of the dataset you want to clean up",
dest="name",
type=str,
choices=list(self.datasets.keys()),
)
parser.add_argument(
"--fill",
help="The method of filling NaNs. This has options to fill rows (rfill, rbfill, rffill) or fill "
"columns (cfill, cbfill, cffill). Furthermore, it has the option to forward fill and backward fill "
"(up to --limit) which refer to how many rows/columns can be set equal to the last non-NaN value",
dest="fill",
choices=["rfill", "cfill", "rbfill", "cbfill", "rffill", "bffill"],
default="",
)
parser.add_argument(
"-d",
"--drop",
help="The method of dropping NaNs. This either has the option rdrop (drop rows that contain NaNs) "
"or cdrop (drop columns that contain NaNs)",
dest="drop",
choices=["rdrop", "cdrop"],
default="",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-n")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, NO_EXPORT, limit=5
)
if ns_parser:
self.datasets[ns_parser.name] = econometrics_model.clean(
self.datasets[ns_parser.name],
ns_parser.fill,
ns_parser.drop,
ns_parser.limit,
)
console.print(f"Successfully cleaned '{ns_parser.name}' dataset")
console.print()
@log_start_end(log=logger)
def call_add(self, other_args: List[str]):
"""Process add"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="add",
description="Add columns to your dataframe with the option to use formulas. E.g."
" newdatasetcol = basedatasetcol sign criteriaordatasetcol"
" thesis.high_revenue = thesis.revenue > 1000"
" dataset.debt_ratio = dataset.debt div dataset2.assets",
)
parser.add_argument(
"-n",
"--newdatasetcol",
help="New dataset column to be added with format: dataset.column",
dest="newdatasetcol",
type=str,
required="-h" not in other_args,
)
parser.add_argument(
"-b",
"--basedatasetcol",
help="Base dataset column to be used as base with format: dataset.column",
dest="basedatasetcol",
type=str,
required="-h" not in other_args,
)
parser.add_argument(
"-s",
"--sign",
help="Sign to be applied to the base dataset column",
dest="sign",
choices=list(self.signs.keys()) + [">", "<", ">=", "<=", "=="],
required="-h" not in other_args,
type=str,
)
parser.add_argument(
"-c",
"--criteriaordatasetcol",
help="Either dataset column to be applied on top of base dataset or criteria",
dest="criteriaordatasetcol",
required="-h" not in other_args,
type=str,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-n")
ns_parser = self.parse_known_args_and_warn(parser, other_args, NO_EXPORT)
if ns_parser:
dataset, new_column = ns_parser.newdatasetcol.split(".")
dataset2, existing_column = ns_parser.basedatasetcol.split(".")
for sign, operator in self.signs.items():
if sign == ns_parser.sign:
ns_parser.sign = operator
if dataset not in self.datasets:
console.print(
f"Not able to find the dataset {dataset}. Please choose one of "
f"the following: {', '.join(self.datasets)}"
)
elif dataset2 not in self.datasets:
console.print(
f"Not able to find the dataset {dataset2}. Please choose one of "
f"the following: {', '.join(self.datasets)}"
)
elif existing_column not in self.datasets[dataset2]:
console.print(
f"Not able to find the column {existing_column}. Please choose one of "
f"the following: {', '.join(self.datasets[dataset2].columns)}"
)
elif len(ns_parser.criteriaordatasetcol.split(".")) > 1:
dataset3, existing_column2 = ns_parser.criteriaordatasetcol.split(".")
if dataset3 not in self.datasets:
console.print(
f"Not able to find the dataset {dataset3}. Please choose one of "
f"the following: {', '.join(self.datasets)}"
)
elif existing_column2 not in self.datasets[dataset3]:
console.print(
f"Not able to find the column {existing_column2}. Please choose one of "
f"the following: {', '.join(self.datasets[dataset3].columns)}"
)
else:
pd.eval(
f"{new_column} = self.datasets[dataset2][existing_column] "
f"{ns_parser.sign} self.datasets[dataset3][existing_column2]",
target=self.datasets[dataset],
inplace=True,
)
else:
pd.eval(
f"{new_column} = self.datasets[dataset2][existing_column] "
f"{ns_parser.sign} {ns_parser.criteriaordatasetcol}",
target=self.datasets[dataset],
inplace=True,
)
self.update_runtime_choices()
self.update_loaded()
console.print()
@log_start_end(log=logger)
def call_delete(self, other_args: List[str]):
"""Process add"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="delete",
description="The column you want to delete from a dataset.",
)
parser.add_argument(
"-d",
"--delete",
help="The columns you want to delete from a dataset. Use format: <dataset.column> or"
" multiple with <dataset.column>,<dataset.column2>",
dest="delete",
type=check_list_values(self.choices["delete"]),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-d")
ns_parser = self.parse_known_args_and_warn(parser, other_args, NO_EXPORT)
if ns_parser:
for option in ns_parser.delete:
dataset, column = option.split(".")
if dataset not in self.datasets:
console.print(
f"Not able to find the dataset {dataset}. Please choose one of "
f"the following: {', '.join(self.datasets)}"
)
elif column not in self.datasets[dataset]:
console.print(
f"Not able to find the column {column}. Please choose one of "
f"the following: {', '.join(self.datasets[dataset].columns)}"
)
else:
del self.datasets[dataset][column]
self.update_runtime_choices()
console.print()
@log_start_end(log=logger)
def call_combine(self, other_args: List[str]):
"""Process combine"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="combine",
description="The columns you want to add to a dataset. The first argument is the dataset to add columns in"
"and the remaining could be: <datasetX.column2>,<datasetY.column3>",
)
parser.add_argument(
"-d",
"--dataset",
help="Dataset to add columns to",
dest="dataset",
choices=self.choices["combine"],
)
parser.add_argument(
"-c",
"--columns",
help="The columns we want to add <dataset.column>,<dataset.column2>",
dest="columns",
type=check_list_values(self.choices["delete"]),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-d")
ns_parser = self.parse_known_args_and_warn(parser, other_args, NO_EXPORT)
if ns_parser:
if ns_parser.dataset not in self.datasets:
console.print(
f"Not able to find the dataset {ns_parser.dataset}. Please choose one of "
f"the following: {', '.join(self.datasets)}"
)
return
data = self.datasets[ns_parser.dataset]
for option in ns_parser.columns:
dataset, column = option.split(".")
if dataset not in self.datasets:
console.print(
f"Not able to find the dataset {dataset}. Please choose one of "
f"the following: {', '.join(self.datasets)}"
)
elif column not in self.datasets[dataset]:
console.print(
f"Not able to find the column {column}. Please choose one of "
f"the following: {', '.join(self.datasets[dataset].columns)}"
)
else:
data[f"{dataset}_{column}"] = self.datasets[dataset][column]
self.update_runtime_choices()
console.print()
@log_start_end(log=logger)
def call_rename(self, other_args: List[str]):
"""Process rename"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="rename",
description="The column you want to rename from a dataset.",
)
parser.add_argument(
"-d",
"--dataset",
help="Dataset that will get a column renamed",
dest="dataset",
choices=self.choices["rename"],
type=str,
)
parser.add_argument(
"-o",
"--oldcol",
help="Old column from dataset to be renamed",
dest="oldcol",
type=str,
required="-h" not in other_args,
)
parser.add_argument(
"-n",
"--newcol",
help="New column from dataset to be renamed",
dest="newcol",
type=str,
required="-h" not in other_args,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-d")
ns_parser = self.parse_known_args_and_warn(parser, other_args, NO_EXPORT)
if ns_parser:
dataset = ns_parser.dataset
column_old = ns_parser.oldcol
column_new = ns_parser.newcol
if dataset not in self.datasets:
console.print(
f"Not able to find the dataset {dataset}. Please choose one of "
f"the following: {', '.join(self.datasets)}"
)
elif column_old not in self.datasets[dataset]:
console.print(
f"Not able to find the column {column_old}. Please choose one of "
f"the following: {', '.join(self.datasets[dataset].columns)}"
)
else:
self.datasets[dataset] = self.datasets[dataset].rename(
columns={column_old: column_new}
)
self.update_runtime_choices()
console.print()
@log_start_end(log=logger)
def call_ols(self, other_args: List[str]):
"""Process ols command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ols",
description="Performs an OLS regression on timeseries data.",
)
parser.add_argument(
"-d",
"--dependent",
type=str,
dest="dependent",
help="The dependent variable on the regression you would like to perform",
required="-h" not in other_args,
)
parser.add_argument(
"-i",
"--independent",
type=check_list_values(self.choices["regressions"]),
dest="independent",
help=(
"The independent variables on the regression you would like to perform. "
"E.g. historical.high,historical.low"
),
required="-h" not in other_args,
)
parser.add_argument(
"--no-output",
action="store_true",
default=False,
help="Hide the output of the regression",
dest="no_output",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-d")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if "," in ns_parser.dependent:
console.print(
"It appears you have selected multiple variables for the dependent variable. "
"Please select one.\nDid you intend to include these variables as independent "
f"variables? Use -i {ns_parser.dependent} in this case.\n"
)
elif ns_parser.dependent in self.choices["regressions"]:
(
regression_df,
dependent_variable,
independent_variables,
) = regression_model.get_regression_data(
[ns_parser.dependent] + ns_parser.independent,
self.datasets,
"OLS",
)
self.regression["OLS"]["data"] = regression_df
self.regression["OLS"]["dependent"] = dependent_variable
self.regression["OLS"]["independent"] = independent_variables
model = regression_model.get_ols(
regression_df[dependent_variable],
regression_df[independent_variables],
)
self.regression["OLS"]["model"] = model
if not ns_parser.no_output:
console.print(model.summary())
else:
console.print(
f"{ns_parser.dependent} not in {','.join(self.choices['regressions'])}\n"
f"Please choose a valid dataset and column combination.\n"
)
@log_start_end(log=logger)
def call_norm(self, other_args: List[str]):
"""Process normality command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="norm",
description="Test whether the used data is normally distributed.",
)
parser.add_argument(
"-v",
"--value",
type=str,
choices=self.choices["norm"],
dest="column",
help="The dataset.column you want to test normality for",
required="-h" not in other_args,
)
parser.add_argument(
"-p",
"--plot",
dest="plot",
help="Whether you wish to plot a histogram to visually depict normality",
action="store_true",
default=False,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-v")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser and ns_parser.column:
dataset, column = ns_parser.column.split(".")
if isinstance(self.datasets[dataset][column].index, pd.MultiIndex):
return console.print(
f"The column '{column}' in '{dataset}' is a MultiIndex. To test for normality"
", make sure to set a singular time index.\n"
)
if dataset in self.datasets:
if isinstance(self.datasets[dataset], pd.Series):
data = self.datasets[dataset]
elif isinstance(self.datasets[dataset], pd.DataFrame):
data = self.datasets[dataset][column]
else:
return console.print(
f"The type of {dataset} ({type(dataset)} is not an option."
)
else:
return console.print(f"Can not find {dataset}. Did you load the data?")
econometrics_view.display_norm(
data, dataset, column, ns_parser.plot, ns_parser.export
)
@log_start_end(log=logger)
def call_root(self, other_args: List[str]):
"""Process unit root command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="root",
description="Show unit root tests of a column of a dataset",
)
parser.add_argument(
"-v",
"--value",
type=str,
choices=self.choices["root"],
dest="column",
help="The column and name of the database you want test unit root for",
required="-h" not in other_args,
)
parser.add_argument(
"-r",
"--fuller_reg",
help="Type of regression. Can be 'c','ct','ctt','nc'. c - Constant and t - trend order",
choices=["c", "ct", "ctt", "n"],
default="c",
type=str,
dest="fuller_reg",
)
parser.add_argument(
"-k",
"--kps_reg",
help="Type of regression. Can be 'c', 'ct'. c - Constant and t - trend order",
choices=["c", "ct"],
type=str,
dest="kpss_reg",
default="c",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-v")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser and ns_parser.column:
if "." in ns_parser.column:
dataset, column = ns_parser.column.split(".")
else:
console.print(
"[red]Column must be formatted as 'dataset.column'[/red]\n"
)
if isinstance(self.datasets[dataset][column].index, pd.MultiIndex):
console.print(
f"The column '{column}' from the dataset '{dataset}' is a MultiIndex. To test for unitroot in a "
"timeseries, make sure to set a singular time index.\n"
)
else:
if isinstance(self.datasets[dataset], pd.Series):
data = self.datasets[dataset]
elif isinstance(self.datasets[dataset], pd.DataFrame):
data = self.datasets[dataset][column]
else:
return console.print(
"Can not select data due to the data not being a DataFrame or Series."
)
econometrics_view.display_root(
data,
dataset,
column,
ns_parser.fuller_reg,
ns_parser.kpss_reg,
ns_parser.export,
)
@log_start_end(log=logger)
def call_panel(self, other_args: List[str]):
"""Process panel command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="panel",
description="Performs regression analysis on Panel Data. There are a multitude of options to select "
"from to fit the needs of restrictions of the dataset.",
)
parser.add_argument(
"-d",
"--dependent",
type=str,
dest="dependent",
help="The dependent variable on the regression you would like to perform",
required="-h" not in other_args,
)
parser.add_argument(
"-i",
"--independent",
type=check_list_values(self.choices["regressions"]),
dest="independent",
help=(
"The independent variables on the regression you would like to perform. "
"E.g. wage_panel.married,wage_panel.union"
),
required="-h" not in other_args,
)
parser.add_argument(
"-r",
"--regression",
type=str,
choices=self.PANEL_CHOICES,
dest="type",
help="The type of regression you wish to perform. This can be either pols (Pooled OLS), "
"re (Random Effects), bols (Between OLS), fe (Fixed Effects) or fdols (First Difference OLS)",
default="pols",
)
parser.add_argument(
"-e",
"--entity_effects",
dest="entity_effects",
help="Using this command creates entity effects, which is equivalent to including dummies for each entity. "
"This is only used within Fixed Effects estimations (when type is set to 'fe')",
action="store_true",
default=False,
)
parser.add_argument(
"-t",
"--time_effects",
dest="time_effects",
help="Using this command creates time effects, which is equivalent to including dummies for each time. "
"This is only used within Fixed Effects estimations (when type is set to 'fe')",
action="store_true",
default=False,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-d")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if "," in ns_parser.dependent:
console.print(
"It appears you have selected multiple variables for the dependent variable. "
"The model only accepts one.\nDid you intend to include these variables as independent "
f"variables? Use -i {ns_parser.dependent} in this case.\n"
)
elif ns_parser.dependent in self.choices["regressions"]:
regression_vars = [ns_parser.dependent] + ns_parser.independent
if regression_vars and len(regression_vars) > 1:
for variable in regression_vars:
if "." not in variable:
console.print(
"[red]Please follow the format 'dataset.column'[/red]\n"
)
continue
dataset, column = variable.split(".")
if not isinstance(
self.datasets[dataset][column].index, pd.MultiIndex
):
other_column = (
self.datasets[dataset].drop(column, axis=1).columns[0]
)
return console.print(
f"The column '{column}' from the dataset '{dataset}' is not a MultiIndex. Make sure "
f"you set the index correctly with the index (e.g. index {dataset} -i {column},"
f"{other_column}) command where the first level is the entity (e.g. Tesla Inc.) and "
f"the second level the date (e.g. 2021-03-31)\n"
)
# Ensure that OLS is always ran to be able to perform tests
regression_types = [ns_parser.type.upper(), "OLS"]
for regression in regression_types:
regression_name = regression
if regression == "FE":
if ns_parser.entity_effects:
regression_name = regression_name + "_EE"
if ns_parser.time_effects:
regression_name = regression_name + "_IE"
(
regression_df,
dependent_variable,
independent_variables,
) = regression_model.get_regression_data(
[ns_parser.dependent] + ns_parser.independent,
self.datasets,
regression,
)
self.regression[regression]["data"] = regression_df
self.regression[regression]["dependent"] = dependent_variable
self.regression[regression][
"independent"
] = independent_variables
self.regression[regression_name][
"model"
] = regression_view.display_panel(
regression_df[dependent_variable],
regression_df[independent_variables],
regression,
ns_parser.entity_effects,
ns_parser.time_effects,
ns_parser.export,
)
else:
console.print(
f"{ns_parser.dependent} not in {','.join(self.choices['regressions'])}\n"
f"Please choose a valid dataset and column combination.\n"
)
@log_start_end(log=logger)
def call_compare(self, other_args: List[str]):
"""Process compare command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="compare",
description="Compare results between all activated Panel regression models",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
regression_model.get_comparison(self.regression, ns_parser.export)
console.print()
@log_start_end(log=logger)
def call_dwat(self, other_args: List[str]):
"""Process unitroot command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="dwat",
description=(
"Show autocorrelation tests from Durbin-Watson. "
"Needs OLS to be run in advance with independent and dependent variables"
),
)
parser.add_argument(
"-p",
"--plot",
help="Plot the residuals",
dest="plot",
action="store_true",
default=False,
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if not self.regression["OLS"]["model"]:
console.print(
"Please perform an OLS regression before estimating the Durbin-Watson statistic.\n"
)
else:
dependent_variable = self.regression["OLS"]["data"][
self.regression["OLS"]["dependent"]
]
regression_view.display_dwat(
self.regression["OLS"]["model"],
dependent_variable,
ns_parser.plot,
ns_parser.export,
)
@log_start_end(log=logger)
def call_bgod(self, other_args):
"""Process bgod command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="bgod",
description=(
"Show Breusch-Godfrey autocorrelation test results."
"Needs OLS to be run in advance with independent and dependent variables"
),
)
parser.add_argument(
"-l",
"--lags",
type=check_positive,
dest="lags",
help="The lags for the Breusch-Godfrey test",
default=3,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if not self.regression["OLS"]["model"]:
console.print(
"Perform an OLS regression before estimating the Breusch-Godfrey statistic.\n"
)
else:
regression_view.display_bgod(
self.regression["OLS"]["model"], ns_parser.lags, ns_parser.export
)
@log_start_end(log=logger)
def call_bpag(self, other_args):
"""Process bpag command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="bpag",
description=(
"Show Breusch-Pagan heteroscedasticity test results. "
"Needs OLS to be run in advance with independent and dependent variables"
),
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if not self.regression["OLS"]["model"]:
console.print(
"Perform an OLS regression before estimating the Breusch-Pagan statistic.\n"
)
else:
regression_view.display_bpag(
self.regression["OLS"]["model"], ns_parser.export
)
@log_start_end(log=logger)
def call_granger(self, other_args: List[str]):
"""Process granger command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="granger",
description="Show Granger causality between two timeseries",
)
parser.add_argument(
"-t",
"--timeseries",
choices=self.choices["granger"],
help="Requires two time series, the first time series is assumed to be Granger-caused "
"by the second time series.",
type=str,
dest="ts",
metavar="Available time series",
)
parser.add_argument(
"-l",
"--lags",
help="How many lags should be included",
type=int,
dest="lags",
default=3,
)
parser.add_argument(
"-c",
"--confidence",
help="Set the confidence level",
type=check_positive_float,
dest="confidence",
default=0.05,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-t")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser and ns_parser.ts:
datasetcol_y, datasetcol_x = ns_parser.ts.split(",")
dataset_y, column_y = datasetcol_y.split(".")
dataset_x, column_x = datasetcol_x.split(".")
econometrics_view.display_granger(
self.datasets[dataset_y][column_y].rename(datasetcol_y),
self.datasets[dataset_x][column_x].rename(datasetcol_x),
ns_parser.lags,
ns_parser.confidence,
ns_parser.export,
)
@log_start_end(log=logger)
def call_coint(self, other_args: List[str]):
"""Process coint command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="coint",
description="Show co-integration between two timeseries",
)
parser.add_argument(
"-t",
"--time_series",
help="The time series you wish to test co-integration on. E.g. historical.open,historical2.close.",
dest="ts",
type=check_list_values(self.choices["coint"]),
required="-h" not in other_args,
)
parser.add_argument(
"-p",
"--plot",
help="Plot Z-Values",
dest="plot",
action="store_true",
default=False,
)
parser.add_argument(
"-s",
"--significant",
help="Show only companies that have p-values lower than this percentage",
dest="significant",
type=float,
default=0,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-t")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
if ns_parser.ts:
# We are going to pass through a variable number of series, so datasets will be a list of series
if len(ns_parser.ts) > 1:
datasets = []
for series in ns_parser.ts:
if "." not in series:
console.print(
"[red]Invalid time series format. Should be dataset.column, "
"e.g. historical.open[/red]\n"
)
else:
dataset, column = series.split(".")
datasets.append(self.datasets[dataset][column])
econometrics_view.display_cointegration_test(
*datasets,
significant=ns_parser.significant,
plot=ns_parser.plot,
export=ns_parser.export,
)
else:
console.print(
"[red]More than one dataset.column must be provided.\n[/red]"
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/econometrics/econometrics_controller.py | 0.505859 | 0.227856 | econometrics_controller.py | pypi |
__docformat__ = "numpy"
import logging
import os
import warnings
from typing import Any, Dict, List, Tuple
import pandas as pd
import statsmodels
import statsmodels.api as sm
from linearmodels import PooledOLS
from linearmodels.panel import (
BetweenOLS,
FirstDifferenceOLS,
PanelOLS,
RandomEffects,
compare,
)
from pandas import DataFrame
from statsmodels.api import add_constant
from statsmodels.stats.api import het_breuschpagan
from statsmodels.stats.diagnostic import acorr_breusch_godfrey
from statsmodels.stats.stattools import durbin_watson
from openbb_terminal.decorators import log_start_end
from openbb_terminal.econometrics.econometrics_helpers import get_datasets
from openbb_terminal.helper_funcs import export_data
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
def get_regressions_results(
Y: pd.DataFrame,
X: pd.DataFrame,
regression_type: str = "OLS",
entity_effects: bool = False,
time_effects: bool = False,
) -> Any:
"""Based on the regression type, this function decides what regression to run.
Parameters
----------
Y: pd.DataFrame
Dataframe containing the dependent variable.
X: pd.DataFrame
Dataframe containing the independent variables.
regression_type: str
The type of regression you wish to execute.
entity_effects: bool
Whether to apply Fixed Effects on entities.
time_effects: bool
Whether to apply Fixed Effects on time.
Returns
-------
Any
A regression model
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.econometrics.load("wage_panel")
>>> df = df.set_index(["nr","year"])
>>> X = df[["exper","educ","union"]]
>>> Y = df["lwage"]
>>> pooled_ols_model = openbb.econometrics.panel(Y,X,"POLS")
>>> print(pooled_ols_model.summary)
PooledOLS Estimation Summary
================================================================================
Dep. Variable: lwage R-squared: 0.1634
Estimator: PooledOLS R-squared (Between): 0.1686
No. Observations: 4360 R-squared (Within): 0.1575
Date: Sun, Nov 13 2022 R-squared (Overall): 0.1634
Time: 13:04:02 Log-likelihood -3050.4
Cov. Estimator: Unadjusted
F-statistic: 283.68
Entities: 545 P-value 0.0000
Avg Obs: 8.0000 Distribution: F(3,4356)
Min Obs: 8.0000
Max Obs: 8.0000 F-statistic (robust): 283.68
P-value 0.0000
Time periods: 8 Distribution: F(3,4356)
Avg Obs: 545.00
Min Obs: 545.00
Max Obs: 545.00
Parameter Estimates
==============================================================================
Parameter Std. Err. T-stat P-value Lower CI Upper CI
------------------------------------------------------------------------------
const -0.0308 0.0620 -0.4965 0.6196 -0.1523 0.0908
exper 0.0561 0.0028 20.220 0.0000 0.0507 0.0616
educ 0.1080 0.0045 24.034 0.0000 0.0992 0.1168
union 0.1777 0.0172 10.344 0.0000 0.1441 0.2114
==============================================================================
"""
regressions = {
"OLS": lambda: get_ols(Y, X),
"POLS": lambda: get_pols(Y, X),
"RE": lambda: get_re(Y, X),
"BOLS": lambda: get_bols(Y, X),
"FE": lambda: get_fe(Y, X, entity_effects, time_effects),
"FDOLS": lambda: get_fdols(Y, X),
}
if regression_type in regressions:
return regressions[regression_type]()
return console.print(f"{regression_type} is not an option.")
def get_regression_data(
regression_variables: List[tuple],
data: Dict[str, pd.DataFrame],
regression_type: str = "",
) -> Tuple[DataFrame, Any, List[Any]]:
"""This function creates a DataFrame with the required regression data as
well sets up the dependent and independent variables.
Parameters
----------
regression_variables : list
The regressions variables entered where the first variable is
the dependent variable.
data : dict
A dictionary containing the datasets.
regression_type: str
The type of regression that is executed.
Returns
-------
Tuple[DataFrame, Any, List[Any]]
The dataset used,
Dependent variable,
Independent variable,
OLS model.
"""
datasets = get_datasets(data)
regression = {}
independent_variables = []
dependent_variable = None
for variable in regression_variables:
column, dataset = datasets[variable].keys()
regression[f"{dataset}.{column}"] = data[dataset][column]
if variable == regression_variables[0]:
dependent_variable = f"{dataset}.{column}"
elif variable in regression_variables[1:]:
independent_variables.append(f"{dataset}.{column}")
regression_df = pd.DataFrame(regression)
nan_values = regression_df.isnull().sum().sum()
if nan_values > 0:
regression_df = regression_df.dropna(how="any", axis="index")
if regression_df.empty:
console.print(
f"The resulting DataFrame only consists of NaN values. This is usually due to an index "
f"mismatch. Therefore, no {regression_type} regression can be performed. Consider revisiting your "
"dataset(s) and adjust accordingly."
)
else:
console.print(
f"The resulting DataFrame has {nan_values} NaN values. This is usually due to "
f"an index mismatch. Rows that contain NaNs are dropped to be able to perform the {regression_type} "
f"regression. Consider revisiting your dataset(s) and adjust accordingly."
)
return regression_df, dependent_variable, independent_variables
@log_start_end(log=logger)
def get_ols(Y: pd.DataFrame, X: pd.DataFrame) -> Any:
"""Performs an OLS regression on timeseries data. [Source: Statsmodels]
Parameters
----------
Y: pd.DataFrame
Dependent variable series.
X: pd.DataFrame
Dataframe of independent variables series.
Returns
-------
statsmodels.regression.linear_model.RegressionResultsWrapper
Regression model wrapper from statsmodels.
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.econometrics.load("wage_panel")
>>> OLS_model = openbb.econometrics.ols(df["lwage"], df[["educ", "exper", "expersq"]])
>>> print(OLS_model.summary())`
OLS Regression Results
=======================================================================================
Dep. Variable: lwage R-squared (uncentered): 0.920
Model: OLS Adj. R-squared (uncentered): 0.919
Method: Least Squares F-statistic: 1.659e+04
Date: Thu, 10 Nov 2022 Prob (F-statistic): 0.00
Time: 15:28:11 Log-Likelihood: -3091.3
No. Observations: 4360 AIC: 6189.
Df Residuals: 4357 BIC: 6208.
Df Model: 3
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
educ 0.0986 0.002 39.879 0.000 0.094 0.103
exper 0.1018 0.009 10.737 0.000 0.083 0.120
expersq -0.0034 0.001 -4.894 0.000 -0.005 -0.002
==============================================================================
Omnibus: 1249.642 Durbin-Watson: 0.954
Prob(Omnibus): 0.000 Jarque-Bera (JB): 9627.436
Skew: -1.152 Prob(JB): 0.00
Kurtosis: 9.905 Cond. No. 86.4
==============================================================================
Notes:
[1] R² is computed without centering (uncentered) since the model does not contain a constant.
[2] Standard Errors assume that the covariance matrix of the errors is correctly specified.
"""
if X.empty or Y.empty:
model = None
else:
with warnings.catch_warnings(record=True) as warning_messages:
model = sm.OLS(Y, X).fit()
if len(warning_messages) > 0:
console.print("Warnings:")
for warning in warning_messages:
console.print(f"[red]{warning.message}[/red]".replace("\n", ""))
return model
@log_start_end(log=logger)
def get_pols(Y: pd.DataFrame, X: pd.DataFrame) -> Any:
"""PooledOLS is just plain OLS that understands that various panel data structures.
It is useful as a base model. [Source: LinearModels]
Parameters
----------
regression_variables : list
The regressions variables entered where the first variable is
the dependent variable.
data : dict
A dictionary containing the datasets.
Returns
-------
Tuple[DataFrame, Any, List[Any], Any]
The dataset used,
Dependent variable,
Independent variable,
PooledOLS model
"""
if Y.empty or X.empty:
model = None
else:
with warnings.catch_warnings(record=True) as warning_messages:
exogenous = add_constant(X)
model = PooledOLS(Y, exogenous).fit()
if len(warning_messages) > 0:
console.print("Warnings:")
for warning in warning_messages:
console.print(f"[red]{warning.message}[/red]".replace("\n", ""))
return model
@log_start_end(log=logger)
def get_re(Y: pd.DataFrame, X: pd.DataFrame) -> Any:
"""The random effects model is virtually identical to the pooled OLS model except that is accounts for the
structure of the model and so is more efficient. Random effects uses a quasi-demeaning strategy which
subtracts the time average of the within entity values to account for the common shock. [Source: LinearModels]
Parameters
----------
regression_variables : list
The regressions variables entered where the first variable is
the dependent variable.
data : dict
A dictionary containing the datasets.
Returns
-------
Tuple[DataFrame, Any, List[Any], Any]
The dataset used,
Dependent variable,
Independent variable,
RandomEffects model
"""
if X.empty or Y.empty:
model = None
else:
with warnings.catch_warnings(record=True) as warning_messages:
exogenous = add_constant(X)
model = RandomEffects(Y, exogenous).fit()
if len(warning_messages) > 0:
console.print("Warnings:")
for warning in warning_messages:
console.print(f"[red]{warning.message}[/red]".replace("\n", ""))
return model
@log_start_end(log=logger)
def get_bols(Y: pd.DataFrame, X: pd.DataFrame) -> Any:
"""The between estimator is an alternative, usually less efficient estimator, can can be used to
estimate model parameters. It is particular simple since it first computes the time averages of
y and x and then runs a simple regression using these averages. [Source: LinearModels]
Parameters
----------
regression_variables : list
The regressions variables entered where the first variable is
the dependent variable.
data : dict
A dictionary containing the datasets.
Returns
-------
Tuple[DataFrame, Any, List[Any], Any]
The dataset used,
Dependent variable,
Independent variable,
Between OLS model.
"""
if Y.empty or X.empty:
model = None
else:
with warnings.catch_warnings(record=True) as warning_messages:
exogenous = add_constant(X)
model = BetweenOLS(Y, exogenous).fit()
if len(warning_messages) > 0:
console.print("Warnings:")
for warning in warning_messages:
console.print(f"[red]{warning.message}[/red]".replace("\n", ""))
return model
@log_start_end(log=logger)
def get_fe(
Y: pd.DataFrame,
X: pd.DataFrame,
entity_effects: bool = False,
time_effects: bool = False,
) -> Any:
"""When effects are correlated with the regressors the RE and BE estimators are not consistent.
The usual solution is to use Fixed Effects which are called entity_effects when applied to
entities and time_effects when applied to the time dimension. [Source: LinearModels]
Parameters
----------
regression_variables : list
The regressions variables entered where the first variable is
the dependent variable.
data : dict
A dictionary containing the datasets.
entity_effects : bool
Whether to include entity effects
time_effects : bool
Whether to include time effects
Returns
-------
Tuple[DataFrame, Any, List[Any], Any]
The dataset used,
Dependent variable,
Independent variable,
PanelOLS model with Fixed Effects
"""
if X.empty or Y.empty:
model = None
else:
with warnings.catch_warnings(record=True) as warning_messages:
exogenous = add_constant(X)
model = PanelOLS(
Y, exogenous, entity_effects=entity_effects, time_effects=time_effects
).fit()
if len(warning_messages) > 0:
console.print("Warnings:")
for warning in warning_messages:
console.print(f"[red]{warning.message}[/red]".replace("\n", ""))
return model
@log_start_end(log=logger)
def get_fdols(Y: pd.DataFrame, X: pd.DataFrame) -> Any:
"""First differencing is an alternative to using fixed effects when there might be correlation.
When using first differences, time-invariant variables must be excluded. Additionally,
only one linear time-trending variable can be included since this will look like a constant.
This variable will soak up all time-trends in the data, and so interpretations of
these variable can be challenging. [Source: LinearModels]
Parameters
----------
regression_variables : list
The regressions variables entered where the first variable is
the dependent variable.
data : dict
A dictionary containing the datasets.
Returns
-------
Tuple[DataFrame, Any, List[Any], Any]
The dataset used,
Dependent variable,
Independent variable,
First Difference OLS model
"""
if X.empty or Y.empty:
model = None
else:
with warnings.catch_warnings(record=True) as warning_messages:
model = FirstDifferenceOLS(Y, X).fit()
if len(warning_messages) > 0:
console.print("Warnings:")
for warning in warning_messages:
console.print(f"[red]{warning.message}[/red]".replace("\n", ""))
return model
@log_start_end(log=logger)
def get_comparison(regressions: Dict, export: str = ""):
"""Compare regression results between Panel Data regressions.
Parameters
----------
regressions : Dict
Dictionary with regression results.
export : str
Format to export data
Returns
-------
dict
Returns a PanelModelComparison which shows an overview of the different regression results.
"""
comparison = {}
for regression_type, data in regressions.items():
if regression_type == "OLS":
continue
if data["model"]:
comparison[regression_type] = data["model"]
if not comparison:
# When the dictionary is empty, it means no Panel regression
# estimates are available and thus the function will have no output
return console.print(
"No Panel regression estimates available. Please use the "
"command 'panel' before using this command."
)
comparison_result = compare(comparison)
console.print(comparison_result)
if export:
results_as_html = comparison_result.summary.tables[0].as_html()
df = pd.read_html(results_as_html, header=0, index_col=0)[0]
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"regressions_compare",
df,
)
return comparison_result
@log_start_end(log=logger)
def get_dwat(
model: statsmodels.regression.linear_model.RegressionResultsWrapper,
) -> float:
"""Calculate test statistics for Durbin Watson autocorrelation
Parameters
----------
model : statsmodels.regression.linear_model.RegressionResultsWrapper
Previously fit statsmodels OLS.
Returns
-------
float
Test statistic of the Durbin Watson test.
Notes
------
When using chart = True, the dependent variable in the regression must be passed to view the residuals
Example
-------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.econometrics.load("wage_panel")
>>> Y, X = df["lwage"], df[["exper","educ"]]
>>> model = openbb.econometrics.ols(Y,X)
>>> durbin_watson_value = openbb.econometrics.dwat(model)
0.96
"""
# Durbin Watson test: The test statistic is approximately equal to 2*(1-r) where r is the
# sample autocorrelation of the residuals. Thus, for r == 0, indicating no serial correlation,
# the test statistic equals 2. This statistic will always be between 0 and 4. The closer
# to 0 the statistic, the more evidence for positive serial correlation. The closer to 4,
# the more evidence for negative serial correlation.
result = durbin_watson(model.resid)
return round(result, 2)
@log_start_end(log=logger)
def get_bgod(model: pd.DataFrame, lags: int = 3) -> Tuple[float, float, float, float]:
"""Calculate test statistics for autocorrelation
Parameters
----------
model : OLS Model
OLS model that has been fit.
lags : int
The amount of lags.
Returns
-------
pd.DataFrame
Test results from the Breusch-Godfrey Test
"""
lm_stat, p_value, f_stat, fp_value = acorr_breusch_godfrey(model, nlags=lags)
return pd.DataFrame(
[lm_stat, p_value, f_stat, fp_value],
index=["lm-stat", "p-value", "f-stat", "fp-value"],
)
@log_start_end(log=logger)
def get_bpag(
model: statsmodels.regression.linear_model.RegressionResultsWrapper,
) -> pd.DataFrame:
"""Calculate test statistics for heteroscedasticity
Parameters
----------
model : OLS Model
Model containing residual values.
Returns
-------
pd.DataFrame
Test results from the Breusch-Pagan Test
"""
lm_stat, p_value, f_stat, fp_value = het_breuschpagan(model.resid, model.model.exog)
return pd.DataFrame(
[lm_stat, p_value, f_stat, fp_value],
index=["lm-stat", "p-value", "f-stat", "fp-value"],
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/econometrics/regression_model.py | 0.876529 | 0.424293 | regression_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Dict, List, Optional, Union
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.units import ConversionError
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.econometrics import econometrics_model
from openbb_terminal.econometrics.econometrics_helpers import get_ending
from openbb_terminal.helper_funcs import export_data, plot_autoscale, print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def show_options(
datasets: Dict[str, pd.DataFrame],
dataset_name: str = None,
export: str = "",
):
"""Plot custom data
Parameters
----------
datasets: dict
The loaded in datasets
dataset_name: str
The name of the dataset you wish to show options for
export: str
Format to export image
"""
if not datasets:
console.print(
"Please load in a dataset by using the 'load' command before using this feature."
)
else:
option_tables = econometrics_model.get_options(datasets, dataset_name)
for dataset, data_values in option_tables.items():
print_rich_table(
data_values,
headers=list(data_values.columns),
show_index=False,
title=f"Options for dataset: '{dataset}'",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
f"{dataset}_options",
data_values.set_index("column"),
)
@log_start_end(log=logger)
def display_plot(
data: Union[pd.Series, pd.DataFrame, Dict[str, pd.DataFrame]],
export: str = "",
external_axes: Optional[List[plt.axes]] = None,
):
"""Plot data from a dataset
Parameters
----------
data: Union[pd.Series, pd.DataFrame, Dict[str: pd.DataFrame]
Dictionary with key being dataset.column and dataframes being values
export: str
Format to export image
external_axes:Optional[List[plt.axes]]
External axes to plot on
"""
if isinstance(data, pd.Series):
data = {data.name: data}
elif isinstance(data, pd.DataFrame):
data = {x: data[x] for x in data.columns}
for dataset_col in data:
if isinstance(data[dataset_col].index, pd.MultiIndex):
console.print(
"The index appears to be a multi-index. "
"Therefore, it is not possible to plot the data."
)
del data[dataset_col]
# Check that there's at least a valid dataframe
if data:
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
for dataset_col in data:
try:
if isinstance(data[dataset_col], pd.Series):
ax.plot(data[dataset_col].index, data[dataset_col].values)
elif isinstance(data[dataset_col], pd.DataFrame):
ax.plot(data[dataset_col])
except ConversionError:
print(f"Could not convert column: {dataset_col}")
except TypeError:
print(f"Could not convert column: {dataset_col}")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
ax.legend(list(data.keys()))
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"plot",
)
@log_start_end(log=logger)
def display_norm(
data: pd.Series,
dataset: str = "",
column: str = "",
plot: bool = True,
export: str = "",
external_axes: Optional[List[plt.axes]] = None,
):
"""Determine the normality of a timeseries.
Parameters
----------
data: pd.Series
Series of custom data
dataset: str
Dataset name
column: str
Column for y data
plot : bool
Whether you wish to plot a histogram
export: str
Format to export data.
external_axes: Optional[List[plt.axes]]
External axes to plot on
"""
if data.dtype not in [int, float]:
console.print(
f"The column type must be numeric. The provided column type is {data.dtype}. "
f"Consider using the command 'type' to change this.\n"
)
else:
results = econometrics_model.get_normality(data)
ending = get_ending(dataset, column)
print_rich_table(
results,
headers=list(results.columns),
show_index=True,
title=f"Normality test{ending}",
)
if plot:
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
ax.hist(data, bins=100)
ax.set_title(f"Histogram{ending}")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
if export:
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
f"{column}_{dataset}_norm",
results,
)
else:
console.print()
@log_start_end(log=logger)
def display_root(
data: pd.Series,
dataset: str = "",
column: str = "",
fuller_reg: str = "c",
kpss_reg: str = "c",
export: str = "",
):
"""Determine the normality of a timeseries.
Parameters
----------
data : pd.Series
Series of target variable
dataset: str
Name of the dataset
column: str
Name of the column
fuller_reg : str
Type of regression of ADF test. Choose c, ct, ctt, or nc
kpss_reg : str
Type of regression for KPSS test. Choose c or ct
export: str
Format to export data.
"""
if data.dtype not in [int, float]:
console.print(
f"The column type must be numeric. The provided "
f"type is {data.dtype}. Consider using the command 'type' to change this.\n"
)
else:
results = econometrics_model.get_root(data, fuller_reg, kpss_reg)
ending = get_ending(dataset, column)
print_rich_table(
results,
headers=list(results.columns),
show_index=True,
title=f"Unitroot {ending}",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
f"{dataset}_{column}_root",
results,
)
@log_start_end(log=logger)
def display_granger(
dependent_series: pd.Series,
independent_series: pd.Series,
lags: int = 3,
confidence_level: float = 0.05,
export: str = "",
):
"""Show granger tests
Parameters
----------
dependent_series: Series
The series you want to test Granger Causality for.
independent_series: Series
The series that you want to test whether it Granger-causes dependent_series
lags : int
The amount of lags for the Granger test. By default, this is set to 3.
confidence_level: float
The confidence level you wish to use. By default, this is set to 0.05.
export : str
Format to export data
"""
if dependent_series.dtype not in [int, float]:
console.print(
f"The time series {dependent_series.name} needs to be numeric but is type "
f"{dependent_series.dtype}. Consider using the command 'type' to change this."
)
elif independent_series.dtype not in [int, float]:
console.print(
f"The time series {independent_series.name} needs to be numeric but is type "
f"{independent_series.dtype}. Consider using the command 'type' to change this."
)
else:
granger_df = econometrics_model.get_granger_causality(
dependent_series, independent_series, lags
)
print_rich_table(
granger_df,
headers=list(granger_df.columns),
show_index=True,
title=f"Granger Causality Test [Y: {dependent_series.name} | X: {independent_series.name} | Lags: {lags}]",
)
result_ftest = round(granger_df.loc["params_ftest"]["P-value"], 3)
if result_ftest > confidence_level:
console.print(
f"As the p-value of the F-test is {result_ftest}, we can not reject the null hypothesis at "
f"the {confidence_level} confidence level.\n"
)
else:
console.print(
f"As the p-value of the F-test is {result_ftest}, we can reject the null hypothesis at "
f"the {confidence_level} confidence level and find the Series '{independent_series.name}' "
f"to Granger-cause the Series '{dependent_series.name}'\n"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
f'{dependent_series.name.replace(".","-")}_{independent_series.name.replace(".","-")}_granger',
granger_df,
)
@log_start_end(log=logger)
def display_cointegration_test(
*datasets: pd.Series,
significant: bool = False,
plot: bool = True,
export: str = "",
external_axes: Optional[List[plt.axes]] = None,
):
"""Estimates long-run and short-run cointegration relationship for series y and x and apply
the two-step Engle & Granger test for cointegration.
Uses a 2-step process to first estimate coefficients for the long-run relationship
y_t = c + gamma * x_t + z_t
and then the short-term relationship,
y_t - y_(t-1) = alpha * z_(t-1) + epsilon_t,
with z the found residuals of the first equation.
Then tests co-integration with the Dickey-Fuller phi=1 vs phi < 1 in
z_t = phi * z_(t-1) + eta_t
If this implies phi < 1, the z series is stationary is concluded to be
stationary, and thus the series y and x are concluded to be cointegrated.
Parameters
----------
datasets: pd.Series
Variable number of series to test for cointegration
significant: float
Show only companies that have p-values lower than this percentage
plot: bool
Whether you wish to plot the z-values of all pairs.
export : str
Format to export data
external_axes:Optional[List[plt.axes]]
External axes to plot on
"""
if len(datasets) < 2:
console.print("[red]Co-integration requires at least two time series.[/red]")
return
df: pd.DataFrame = econometrics_model.get_coint_df(*datasets)
if significant:
console.print(
f"Only showing pairs that are statistically significant ({significant} > p-value)."
)
df = df[significant > df["P Value"]]
console.print()
print_rich_table(
df,
headers=list(df.columns),
show_index=True,
index_name="Pairs",
title="Cointegration Tests",
)
if plot:
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
z_values = econometrics_model.get_coint_df(*datasets, return_z=True)
for pair, values in z_values.items():
ax.plot(values, label=pair)
ax.legend()
ax.set_title("Error correction terms")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"coint",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/econometrics/econometrics_view.py | 0.876476 | 0.454956 | econometrics_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import pandas as pd
from matplotlib import pyplot as plt
import statsmodels
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.econometrics import regression_model
from openbb_terminal.helper_funcs import export_data, plot_autoscale, print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_panel(
Y: pd.DataFrame,
X: pd.DataFrame,
regression_type: str = "OLS",
entity_effects: bool = False,
time_effects: bool = False,
export: str = "",
):
"""Based on the regression type, this function decides what regression to run.
Parameters
----------
data : dict
A dictionary containing the datasets.
regression_variables : list
The regressions variables entered where the first variable is
the dependent variable.
each column/dataset combination.
regression_type: str
The type of regression you wish to execute. Choose from:
OLS, POLS, RE, BOLS, FE
entity_effects: bool
Whether to apply Fixed Effects on entities.
time_effects: bool
Whether to apply Fixed Effects on time.
export : str
Format to export data
Returns
-------
The dataset used, the dependent variable, the independent variable and
the regression model.
"""
model = regression_model.get_regressions_results(
Y,
X,
regression_type,
entity_effects,
time_effects,
)
if regression_type != "OLS":
console.print(model)
if export:
results_as_html = model.summary.tables[1].as_html()
df = pd.read_html(results_as_html, header=0, index_col=0)[0]
dependent = Y.columns[0]
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
f"{dependent}_{regression_type}_regression",
df,
)
return model
@log_start_end(log=logger)
def display_dwat(
model: statsmodels.regression.linear_model.RegressionResultsWrapper,
dependent_variable: pd.Series,
plot: bool = True,
export: str = "",
external_axes: Optional[List[plt.axes]] = None,
):
"""Show Durbin-Watson autocorrelation tests
Parameters
----------
model : OLS Model
A fit statsmodels OLS model.
dependent_variable : pd.Series
The dependent variable for plotting
plot : bool
Whether to plot the residuals
export : str
Format to export data
external_axes: Optional[List[plt.axes]]
External axes to plot on
"""
autocorr = regression_model.get_dwat(model)
if 1.5 < autocorr < 2.5:
console.print(
f"The result {autocorr} is within the range 1.5 and 2.5 which therefore indicates "
f"autocorrelation not to be problematic."
)
else:
console.print(
f"The result {autocorr} is outside the range 1.5 and 2.5 and could "
f"be problematic. Please consider lags of the dependent or independent variable."
)
if plot:
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
ax.scatter(dependent_variable, model.resid)
ax.axhline(y=0, color="r", linestyle="-")
ax.set_ylabel("Residual")
ax.set_xlabel(dependent_variable.name.capitalize())
ax.set_title("Plot of Residuals")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
f"{dependent_variable.name}_dwat",
autocorr,
)
@log_start_end(log=logger)
def display_bgod(
model: statsmodels.regression.linear_model.RegressionResultsWrapper,
lags: int = 3,
export: str = "",
):
"""Show Breusch-Godfrey autocorrelation test
Parameters
----------
model : OLS Model
OLS model that has been fit.
lags : int
The amount of lags included.
export : str
Format to export data
"""
df = regression_model.get_bgod(model, lags)
print_rich_table(
df,
headers=list(["Breusch-Godfrey"]),
show_index=True,
title=f"Breusch-Godfrey autocorrelation test [Lags: {lags}]",
)
p_value = df.loc["p-value"][0]
if p_value > 0.05:
console.print(
f"{round(p_value, 2)} indicates the autocorrelation. Consider re-estimating with "
"clustered standard errors and applying the Random Effects or Fixed Effects model."
)
else:
console.print(
f"The result {round(p_value, 2)} indicates no existence of autocorrelation."
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "results_bgod", df)
console.print()
@log_start_end(log=logger)
def display_bpag(
model: statsmodels.regression.linear_model.RegressionResultsWrapper,
export: str = "",
):
"""Show Breusch-Pagan heteroscedasticity test
Parameters
----------
model : OLS Model
OLS model that has been fit.
export : str
Format to export data
"""
df = regression_model.get_bpag(model)
print_rich_table(
df,
headers=list(["Breusch-Pagan"]),
show_index=True,
title="Breusch-Pagan heteroscedasticity test",
)
p_value = df.loc["p-value"][0]
if p_value > 0.05:
console.print(
f"{round(p_value, 2)} indicates heteroscedasticity. Consider taking the log "
f"or a rate for the dependent variable."
)
else:
console.print(
f"The result {round(p_value, 2)} indicates no existence of heteroscedasticity."
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "results_bpag", df)
console.print() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/econometrics/regression_view.py | 0.836855 | 0.540499 | regression_view.py | pypi |
import logging
import time
from datetime import datetime, timedelta
from enum import Enum
from math import ceil
from threading import Thread
from typing import Callable, Optional
class Precision(Enum):
hour = 3600
minute = 60
class LoggingClock(Thread):
"""
Like a Talking Clock but for logs.
Usage example :
import logging
from openbb_terminal.log_collection.logging_clock import LoggingClock, Precision
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
logging_clock = LoggingClock(
logger=logger,
precision=Precision.minute,
)
logging_clock.start()
logging_clock.join()
"""
@classmethod
def calculate_next_sharp(
cls,
current_time: datetime,
precision: Precision,
) -> datetime:
if precision is Precision.hour:
sharp_time = cls.calculate_next_sharp_hour(current_time=current_time)
elif precision is Precision.minute:
sharp_time = cls.calculate_next_sharp_minute(current_time=current_time)
else:
raise AttributeError(f"Unknown precision {precision}")
return sharp_time
@staticmethod
def calculate_next_sharp_hour(current_time: datetime) -> datetime:
current_truncated_time = datetime(
current_time.year,
current_time.month,
current_time.day,
current_time.hour,
)
next_sharp_hour = current_truncated_time + timedelta(minutes=1)
return next_sharp_hour
@staticmethod
def calculate_next_sharp_minute(current_time: datetime) -> datetime:
current_truncated_time = datetime(
current_time.year,
current_time.month,
current_time.day,
current_time.hour,
current_time.minute,
)
next_sharp_minute = current_truncated_time + timedelta(minutes=1)
return next_sharp_minute
@classmethod
def do_action_every_sharp(
cls,
action: Callable,
precision: Precision = Precision.hour,
):
next_hour = cls.calculate_next_sharp(
current_time=datetime.now(),
precision=precision,
)
while True:
current_time = datetime.now()
delta = current_time - next_hour
delta_seconds = delta.total_seconds()
if delta_seconds > 0:
action()
next_hour = cls.calculate_next_sharp(
current_time=current_time,
precision=precision,
)
else:
sleep_duration = ceil(abs(delta_seconds))
time.sleep(sleep_duration)
# OVERRIDE
def run(self):
action = self.__action
precision = self.__precision
self.do_action_every_sharp(
action=action,
precision=precision,
)
# OVERRIDE
def __init__(
self,
*args,
action: Optional[Callable] = None,
level: int = logging.INFO,
logger: Optional[logging.Logger] = None,
msg: str = "Logging Clock : %s",
precision: Precision = Precision.hour,
**kwargs,
):
super().__init__(*args, **kwargs)
self.__action = action or self.default_action
self.__level = level
self.__logger = logger or logging.getLogger(self.__module__)
self.__msg = msg
self.__precision = precision
def default_action(self):
level = self.__level
logger = self.__logger
msg = self.__msg % datetime.now()
logger.log(level=level, msg=msg) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/core/log/collection/logging_clock.py | 0.847905 | 0.171269 | logging_clock.py | pypi |
from argparse import ArgumentParser
from contextlib import contextmanager
from inspect import isfunction, unwrap
from os import environ
from types import MethodType
from typing import Callable, List
from unittest.mock import patch
from openbb_terminal.helper_funcs import check_file_type_saved, check_positive
from openbb_terminal.rich_config import get_ordered_list_sources
def __mock_parse_known_args_and_warn(
controller,
parser: ArgumentParser,
other_args: List[str],
export_allowed: int = 0,
raw: bool = False,
limit: int = 0,
) -> None:
"""Add the arguments that would have normally added by :
- openbb_terminal.parent_classes.BaseController.parse_known_args_and_warn
Parameters
----------
parser: argparse.ArgumentParser
Parser with predefined arguments
other_args: List[str]
list of arguments to parse
export_allowed: int
Choose from 0, 1,
2 and EXPORT_BOTH_RAW_DATA_AND_FIGURES
raw: bool
Add the --raw flag
limit: int
Add a --limit flag with this number default
"""
_ = other_args
parser.add_argument(
"-h", "--help", action="store_true", help="show this help message"
)
if export_allowed > 0:
choices_export = []
help_export = "Does not export!"
if export_allowed == 1:
choices_export = ["csv", "json", "xlsx"]
help_export = "Export raw data into csv, json, xlsx"
elif export_allowed == 2:
choices_export = ["png", "jpg", "pdf", "svg"]
help_export = "Export figure into png, jpg, pdf, svg "
else:
choices_export = ["csv", "json", "xlsx", "png", "jpg", "pdf", "svg"]
help_export = "Export raw data into csv, json, xlsx and figure into png, jpg, pdf, svg "
parser.add_argument(
"--export",
default="",
type=check_file_type_saved(choices_export),
dest="export",
help=help_export,
choices=choices_export,
)
if raw:
parser.add_argument(
"--raw",
dest="raw",
action="store_true",
default=False,
help="Flag to display raw data",
)
if limit > 0:
parser.add_argument(
"-l",
"--limit",
dest="limit",
default=limit,
help="Number of entries to show in data.",
type=check_positive,
)
sources = get_ordered_list_sources(f"{controller.PATH}{parser.prog}")
# Allow to change source if there is more than one
if len(sources) > 1:
parser.add_argument(
"--source",
action="store",
dest="source",
choices=sources,
default=sources[0], # the first source from the list is the default
help="Data source to select from",
)
return None
def __mock_parse_simple_args(parser: ArgumentParser, other_args: List[str]) -> None:
"""Add the arguments that would have normally added by:
- openbb_terminal.parent_classes.BaseController.parse_simple_args
Parameters
----------
parser: argparse.ArgumentParser
Parser with predefined arguments
other_args: List[str]
List of arguments to parse
"""
parser.add_argument(
"-h", "--help", action="store_true", help="show this help message"
)
_ = other_args
return None
def __get_command_func(controller, command: str):
"""Get the function with the name `f"call_{command}"` from controller object.
Parameters
----------
controller: BaseController
Instance of the Terminal Controller.
command: str
A name from controller.CHOICES_COMMANDS
Returns
-------
Callable: Command function.
"""
if command not in controller.CHOICES_COMMANDS:
raise AttributeError(
f"The following command is not inside `CHOICES_COMMANDS` : '{command}'"
)
command = f"call_{command}"
command_func = getattr(controller, command)
command_func = unwrap(func=command_func)
if isfunction(command_func):
command_func = MethodType(command_func, controller)
return command_func
def contains_functions_to_patch(command_func: Callable) -> bool:
"""Check if a `command_func` actually contains the functions we want to mock, i.e.:
- parse_simple_args
- parse_known_args_and_warn
Parameters
----------
command_func: Callable
Function to check.
Returns
-------
bool: Whether or not `command_func` contains the mocked functions.
"""
co_names = command_func.__code__.co_names
if "parse_simple_args" in co_names:
in_command = True
elif "parse_known_args_and_warn" in co_names:
in_command = True
else:
in_command = False
return in_command
@contextmanager
def __patch_controller_functions(controller):
"""Patch the following function from a BaseController instance:
- parse_simple_args
- parse_known_args_and_warn
These functions take an 'argparse.ArgumentParser' object as parameter.
We want to intercept this 'argparse.ArgumentParser' object.
Parameters
----------
controller: BaseController
BaseController object that needs to be patched.
Returns
-------
List[Callable]: List of mocked functions.
"""
bound_mock_parse_known_args_and_warn = MethodType(
__mock_parse_known_args_and_warn,
controller,
)
rich = patch(
target="openbb_terminal.rich_config.ConsoleAndPanel.print",
return_value=None,
)
patcher_list = [
patch.object(
target=controller,
attribute="parse_simple_args",
side_effect=__mock_parse_simple_args,
return_value=None,
),
patch.object(
target=controller,
attribute="parse_known_args_and_warn",
side_effect=bound_mock_parse_known_args_and_warn,
return_value=None,
),
]
if environ.get("DEBUG_MODE", "false") != "true":
rich.start()
patched_function_list = []
for patcher in patcher_list:
patched_function_list.append(patcher.start())
yield patched_function_list
if environ.get("DEBUG_MODE", "false") != "true":
rich.stop()
for patcher in patcher_list:
patcher.stop()
def _get_argument_parser(
controller,
command: str,
) -> ArgumentParser:
"""Intercept the ArgumentParser instance from the command function.
A command function being a function starting with `call_`, like:
- call_help
- call_overview
- call_load
Parameters
----------
controller: BaseController
Instance of the Terminal Controller.
command: str
A name from `controller.CHOICES_COMMANDS`.
Returns
-------
ArgumentParser: ArgumentParser instance from the command function.
"""
command_func: Callable = __get_command_func(controller=controller, command=command)
if not contains_functions_to_patch(command_func=command_func):
raise AssertionError(
f"One of these functions should be inside `call_{command}`:\n"
" - parse_simple_args\n"
" - parse_known_args_and_warn\n"
)
with __patch_controller_functions(controller=controller) as patched_function_list:
command_func([])
call_count = 0
for patched_function in patched_function_list:
call_count += patched_function.call_count
if patched_function.call_count == 1:
args = patched_function.call_args.args
argument_parser = args[0]
if call_count != 1:
raise AssertionError(
f"One of these functions should be called once inside `call_{command}`:\n"
" - parse_simple_args\n"
" - parse_known_args_and_warn\n"
)
return argument_parser
def _build_command_choice_map(argument_parser: ArgumentParser) -> dict:
choice_map: dict = {}
for action in argument_parser._actions: # pylint: disable=protected-access
if len(action.option_strings) == 1:
long_name = action.option_strings[0]
short_name = ""
elif len(action.option_strings) == 2:
short_name = action.option_strings[0]
long_name = action.option_strings[1]
else:
raise AttributeError(f"Invalid argument_parser: {argument_parser}")
if hasattr(action, "choices") and action.choices:
choice_map[long_name] = {str(c): {} for c in action.choices}
else:
choice_map[long_name] = {}
if short_name and long_name:
choice_map[short_name] = long_name
return choice_map
def build_controller_choice_map(controller) -> dict:
command_list = controller.CHOICES_COMMANDS
controller_choice_map: dict = {c: {} for c in controller.controller_choices}
controller_choice_map["support"] = controller.SUPPORT_CHOICES
controller_choice_map["about"] = controller.ABOUT_CHOICES
for command in command_list:
try:
argument_parser = _get_argument_parser(
controller=controller,
command=command,
)
controller_choice_map[command] = _build_command_choice_map(
argument_parser=argument_parser
)
except Exception as exception:
if environ.get("DEBUG_MODE", "false") == "true":
raise Exception(
f"On command : `{command}`.\n{str(exception)}"
) from exception
return controller_choice_map | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/core/completer/choices.py | 0.882706 | 0.313328 | choices.py | pypi |
from typing import Any, Optional, List
import openbb_terminal.config_terminal as cfg
from openbb_terminal.core.library.metadata import Metadata
from openbb_terminal.core.library.trail_map import TrailMap
from openbb_terminal.core.library.operation import Operation
from openbb_terminal import feature_flags as obbff
# pylint: disable=import-outside-toplevel
class MetadataBuilder:
@staticmethod
def get_option_list(trail: str, trail_map: TrailMap) -> List[str]:
option_list = []
for key in trail_map.map_dict:
if trail == "":
option = key.split(".")[0]
elif key.startswith(trail) and key[len(trail)] == ".":
option = key[len(trail) + 1 :].split(".")[0]
else:
option = None
if option:
option_list.append(option)
return list(set(option_list))
@classmethod
def build_dir_list(cls, trail: str, trail_map: TrailMap) -> List[str]:
option_list = cls.get_option_list(trail=trail, trail_map=trail_map)
option_list_full = []
for option in option_list:
option_list_full.append(option)
option_view_trail = f"{trail}.{option}_chart"
if trail_map.get_view_function(trail=option_view_trail):
option_list_full.append(f"{option}_chart")
return option_list_full
@staticmethod
def build_docstring(trail: str, dir_list: List[str]) -> str:
if trail == "":
docstring = """This is the OpenBB Terminal SDK.
Use the SDK to get data directly into your jupyter notebook or directly use it in your application.
For more information see the official documentation at: https://openbb-finance.github.io/OpenBBTerminal/SDK/
"""
else:
docstring = (
trail.rsplit(".")[-1].upper()
+ " Menu\n\nThe SDK commands of the the menu:"
)
for command in dir_list:
docstring += f"\n\t<openbb>.{trail}.{command}"
return docstring
@classmethod
def build(cls, trail: str, trail_map: TrailMap) -> Metadata:
dir_list = cls.build_dir_list(trail=trail, trail_map=trail_map)
docstring = cls.build_docstring(trail=trail, dir_list=dir_list)
metadata = Metadata(
dir_list=dir_list,
docstring=docstring,
)
return metadata
class Breadcrumb:
__version__ = obbff.VERSION
def __init__(
self,
metadata: Optional[Metadata] = None,
trail: str = "",
trail_map: Optional[TrailMap] = None,
) -> None:
"""
Generates a 'trail' that allows accessing OpenBB Terminal SDK methods.
Example:
openbb.forex.get_currency_list()
Breadcrumb(trail="").Breadcrumb(trail="forex").Operation(trail="forex.get_currency_list")()
Args:
metadata (Optional[Metadata], optional):
Object to generate Breadcrumb's metadata (__dir__, __doc__).
Defaults to None.
trail (str, optional):
Current trail of the Breadcrumb.
Defaults to "".
trail_map (Optional[TrailMap], optional):
Mapping with all the trails available and matching models and views.
Defaults to None.
"""
trail_map = trail_map or TrailMap()
metadata = metadata or MetadataBuilder.build(trail=trail, trail_map=trail_map)
self._metadata = metadata
self._trail_map = trail_map
self._trail = trail
self.__doc__ = metadata.docstring
if trail == "":
BreadcrumbLogger()
def __dir__(self):
return self._metadata.dir_list
def __getattr__(self, name: str) -> Any:
trail = self._trail
trail_map = self._trail_map
if trail == "":
trail_next = name
else:
trail_next = f"{trail}.{name}"
if trail_map.get_model_function(
trail=trail_next
) or trail_map.get_view_function(trail=trail_next):
next_crumb: Any = Operation(
trail=trail_next,
trail_map=trail_map,
)
elif name in self._metadata.dir_list:
next_crumb = Breadcrumb(
metadata=MetadataBuilder.build(trail=trail_next, trail_map=trail_map),
trail=trail_next,
trail_map=trail_map,
)
else:
raise AttributeError(
f"Module or method '{trail}' has no attribute '{name}'."
)
return next_crumb
def about(self):
import webbrowser
trail = self._trail
url = "https://docs.openbb.co/sdk/reference/"
url += "/".join(trail.split("."))
webbrowser.open(url)
# pylint: disable=R0903
class BreadcrumbLogger:
def __init__(self) -> None:
self.__check_initialize_logging()
def __check_initialize_logging(self):
if not cfg.LOGGING_SUPPRESS:
self.__initialize_logging()
@staticmethod
def __initialize_logging() -> None:
from openbb_terminal.loggers import setup_logging # pylint: disable=C0415
from openbb_terminal.core.log.generation.settings_logger import ( # pylint: disable=C0415
log_all_settings,
)
cfg.LOGGING_SUB_APP = "sdk"
setup_logging()
log_all_settings() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/core/library/breadcrumb.py | 0.7659 | 0.191177 | breadcrumb.py | pypi |
from inspect import getmembers, isfunction
from typing import List, Callable, Tuple
from datetime import datetime
import importlib.util
from pathlib import Path
import os
import pandas as pd
try:
import darts # pylint: disable=W0611 # noqa: F401
# If you just import darts this will pass during pip install, this creates
# Failures later on, also importing utils ensures that darts is installed correctly
from darts import utils # pylint: disable=W0611 # noqa: F401
FORECASTING = True
except ImportError:
FORECASTING = False
base_path = Path(__file__).parent.parent.parent.parent / "openbb_terminal"
def load_modules(full_path: Path):
"""Loads a module from a given Path.
Parameter:
----------
full_path: Path
The path to the module
Returns:
----------
The python module
"""
mod_path = str(full_path).split("OpenBBTerminal" + os.sep)[1].replace(os.sep, ".")
spec = importlib.util.spec_from_file_location(mod_path, full_path)
if not spec:
raise ValueError(f"Could not import path: {full_path}")
mod = importlib.util.module_from_spec(spec)
if not mod:
raise ValueError(f"Could not import path: {full_path}")
if not spec.loader:
raise ValueError(f"Could not import path: {full_path}")
spec.loader.exec_module(mod)
return mod
def all_view_models() -> List[Path]:
"""Geta all files with 'view' or 'model' in the name.
Returns:
----------
List[Path]
All paths in openbb_terminal with 'view' or 'model' in the name
"""
file_list = []
all_files = os.walk(base_path)
for root, _, files in all_files:
for filename in files:
if filename.endswith(".py"):
if "view" in filename or "model" in filename:
file_list.append(f"{root}/{filename}")
clean_list = set(file_list)
return [Path(x) for x in clean_list]
def get_sdk(file_path: str = "miscellaneous/library/trail_map.csv") -> pd.DataFrame:
"""Reads the CSV that generates the sdk and converts it to a dataframe
Parameters
----------
file_path: str
The path to the sdk csv
Returns:
----------
pd.DataFrame
A dataframe with columns for sdk endpoint functiom and trail. Trails will be duplicated
because there is a model and view function
"""
df = pd.read_csv(base_path / file_path)
df_dups = len(df["trail"]) - len(df["trail"].drop_duplicates())
if df_dups > 0:
print(f"Number of duplicate sdk paths: {df_dups}")
print("This indicates that the same SDK trail is being used multiple times\n")
views = list(df[["view", "trail"]].itertuples(index=False, name=None))
models = list(df[["model", "trail"]].itertuples(index=False, name=None))
# Add in whether it is a view or a model in pandas
combined = [x + ("view",) for x in views] + [x + ("model",) for x in models]
final_df = pd.DataFrame()
final_df["name"] = [x[0] for x in combined]
final_df["trail"] = [x[1] for x in combined]
final_df["type"] = [x[2] for x in combined]
final_df = final_df.dropna()
final_df = final_df.set_index("name")
return final_df
def format_function(function: Callable) -> Tuple[str, str]:
"""Gives a function a pretty name
Parameters
----------
function: Callable
The function to get a pretty string for
Returns:
----------
Tuple[str, str]
The functions pretty name and docstring
"""
mod = str(function.__module__)[:-2].replace("/", ".")
name = function.__name__
if mod[-1] != ".":
mod = f"{mod}."
return f"{mod}{name}", str(function.__doc__)
def functions_df() -> pd.DataFrame:
"""Creates a dataframe for all functions in 'models' and 'views'.
Returns:
----------
pd.DataFrame
Information for all view and model functions
"""
modules = all_view_models()
all_formatted = []
for module in modules:
if not FORECASTING and "forecast" in str(module):
continue
loaded = load_modules(module)
# Gets all of a module's functions, but ignores imported functions
func_list = [
x[1]
for x in getmembers(loaded, isfunction)
if x[1].__module__ == loaded.__name__
]
formatted_list = [format_function(x) for x in func_list]
all_formatted.extend(formatted_list)
func_df = pd.DataFrame()
func_df["name"] = [x[0] for x in all_formatted]
func_df["docstring"] = [x[1] for x in all_formatted]
func_dups = len(func_df["name"]) - len(func_df["name"].drop_duplicates())
if func_dups > 0:
print(f"Number of duplicate functions found: {func_dups}")
print(
"This may indicate that functions are defined several times in the terminal.\n"
)
func_df = func_df.set_index("name")
return func_df
def save_df(data: pd.DataFrame) -> None:
timestamp = datetime.now().timestamp()
time_str = (str(timestamp)).replace(".", "")
output_path = f"{time_str}_sdk_audit.csv"
data.to_csv(output_path)
print(f"File saved to {output_path}")
def get_nonconforming_functions(data: pd.DataFrame) -> pd.DataFrame:
"""When we first check all functions we only look in model and view files. This means that any
function in the helpers will be ignored. This function checks any helper funcs in the sdk
treemap, so that we can get a clearer picture of whether or not functions exist
Parameters
----------
data: pd.DataFrame
The dataframe of all values, this will be searched
Returns
----------
pd.DataFrame
The dataframe with any functions that could not be found
"""
df = data[data["docstring"].isnull()]
names = df.index.tolist()
for name in names:
path_list = name.split(".")
file = "/".join(path_list[:-1])
mod_path = Path(__file__).parent.parent.parent.parent / f"{file}.py"
module = load_modules(mod_path)
command = path_list[-1]
function = getattr(module, command)
if function:
data.loc[name, "docstring"] = function.__doc__
return data
def main():
print(
"This tool checks all functions in a file with a name including 'view' or 'model'against\n"
"all functions in the sdk, which is gathered from 'trail_map.csv'. If the generated csv\n"
"has an entry for 'trail' that means it is in the SDK, and if it has an entry for\n"
"'docstring' it is in a model or view.\n"
)
funcs_df = functions_df()
sdk_df = get_sdk()
final_df = funcs_df.merge(sdk_df, how="outer", left_index=True, right_index=True)
final_df = final_df.sort_values("name")
final_df = get_nonconforming_functions(final_df)
# Do this so that the duplicated method also checks the name column
final_df = final_df.reset_index()
# Get further stats on bad data
no_doc_count = len(final_df[final_df["docstring"].isnull()].index)
if no_doc_count > 0:
print(f"The number of rows with blank docstrings is: {no_doc_count}")
print(
"This indicates a matching function does not exist, is not in a 'model' or 'view'\n"
"file, or that the trailmap does not import it from the place it is defined.\n"
)
dup_name_count = len(final_df[final_df.duplicated(keep=False)].index)
if dup_name_count > 0:
print(f"The number of duplicate functions after merge is: {dup_name_count}")
print(
"This most likely indicates that the same function is being used at "
"different SDK endpoints.\n"
)
save_df(final_df)
if __name__ == "__main__":
main() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/core/scripts/sdk_audit.py | 0.707304 | 0.318459 | sdk_audit.py | pypi |
__docformat__ = "numpy"
from typing import Any, Optional
import requests
from requests.adapters import HTTPAdapter
class PaprikaSession:
PAPRIKA_BASE_URL = "https://api.coinpaprika.com/v1"
ENDPOINTS = {
"global": "/global",
"coin": "/coins/{}",
"coins": "/coins",
"coin_tweeter": "/coins/{}/twitter",
"coin_events": "/coins/{}/events",
"coin_exchanges": "/coins/{}/exchanges",
"coin_markets": "/coins/{}/markets",
"ohlcv": "/coins/{}/ohlcv/latest",
"ohlcv_hist": "/coins/{}/ohlcv/historical",
"people": "/people/{}",
"tickers": "/tickers",
"ticker_info": "/tickers/{}",
"exchanges": "/exchanges",
"exchange_info": "/exchanges/{}",
"exchange_markets": "/exchanges/{}/markets",
"contract_platforms": "/contracts",
"contract_platform_addresses": "/contracts/{}",
"search": "/search",
}
def __init__(self, max_retries: int = 5):
self.session = requests.Session()
self.session.mount(self.PAPRIKA_BASE_URL, HTTPAdapter(max_retries=max_retries))
def make_request(
self, endpoint: str, payload: Optional[Any] = None, **kwargs: Any
) -> dict:
"""Helper method that handles request for coinpaprika api.
It prepares URL for given endpoint and payload if it's part of requests
Parameters
----------
endpoint: str,
it's an endpoint that we want to query. e.g. to get twitter data for given coin we need to use:
https://api.coinpaprika.com/v1/coins/{}/twitter
payload: dict
the body of your request. Contains the data send to the CoinPaprika API when making an API request
kwargs:
additional parameters that will be added to payload
Returns
----------
dict with response data
"""
url = f"{self.PAPRIKA_BASE_URL}{endpoint}/"
if payload is None:
payload = {}
if kwargs:
payload.update(kwargs)
return self.session.get(url, params=payload).json() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/coinpaprika_helpers.py | 0.839767 | 0.2258 | coinpaprika_helpers.py | pypi |
# pylint: disable=too-many-lines,too-many-return-statements
import difflib
import json
import logging
from typing import Union, Optional, List
import os
from datetime import datetime, timedelta
import ccxt
import matplotlib.pyplot as plt
import mplfinance as mpf
import numpy as np
import pandas as pd
import yfinance as yf
from matplotlib.ticker import LogLocator, ScalarFormatter
from pycoingecko import CoinGeckoAPI
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.config_terminal import theme
from openbb_terminal.cryptocurrency.discovery import pycoingecko_model
from openbb_terminal.cryptocurrency.due_diligence import coinpaprika_model
from openbb_terminal.cryptocurrency.due_diligence.pycoingecko_model import (
get_coin_tokenomics,
get_ohlc,
)
from openbb_terminal.helper_funcs import (
export_data,
is_valid_axes_count,
lambda_long_number_format,
lambda_long_number_format_y_axis,
plot_autoscale,
print_rich_table,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
__docformat__ = "numpy"
INTERVALS = ["1H", "3H", "6H", "1D"]
CCXT_INTERVAL_MAP = {
"1": "1m",
"15": "15m",
"30": "30m",
"60": "1h",
"240": "4h",
"1440": "1d",
"10080": "1w",
"43200": "1M",
}
SOURCES_INTERVALS = {
"Binance": [
"1day",
"3day",
"1hour",
"2hour",
"4hour",
"6hour",
"8hour",
"12hour",
"1week",
"1min",
"3min",
"5min",
"15min",
"30min",
"1month",
],
"Coinbase": [
"1min",
"5min",
"15min",
"1hour",
"6hour",
"24hour",
"1day",
],
"YahooFinance": [
"1min",
"2min",
"5min",
"15min",
"30min",
"60min",
"90min",
"1hour",
"1day",
"5day",
"1week",
"1month",
"3month",
],
}
YF_CURRENCY = [
"CAD",
"CNY",
"ETH",
"EUR",
"GBP",
"INR",
"JPY",
"KRW",
"RUB",
"USD",
"AUD",
"BTC",
]
def check_datetime(
ck_date: Union[datetime, Union[str, None]] = None, start: bool = True
) -> datetime:
"""Checks if given argument is string and attempts to convert to datetime.
Parameters
----------
ck_date : Union[datetime, Union[str, None]], optional
Date to check, by default None
start : bool, optional
If True and string is invalid, will return 1100 days ago
If False and string is invalid, will return today, by default True
Returns
-------
datetime
Datetime object
"""
error_catch = (datetime.now() - timedelta(days=1100)) if start else datetime.now()
try:
if ck_date is None:
return error_catch
if isinstance(ck_date, datetime):
return ck_date
if isinstance(ck_date, str):
return datetime.strptime(ck_date, "%Y-%m-%d")
except Exception:
console.print(
f"Invalid date format (YYYY-MM-DD), "
f"Using {error_catch.strftime('%Y-%m-%d')} for {ck_date}"
)
return error_catch
def _load_coin_map(file_name: str) -> pd.DataFrame:
if file_name.split(".")[1] != "json":
raise TypeError("Please load json file")
current_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(current_dir, "data", file_name)
with open(path, encoding="utf8") as f:
coins = json.load(f)
coins_df = pd.Series(coins).reset_index()
coins_df.columns = ["symbol", "id"]
return coins_df
def read_data_file(file_name: str):
if file_name.split(".")[1] != "json":
raise TypeError("Please load json file")
current_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(current_dir, "data", file_name)
with open(path, encoding="utf8") as f:
return json.load(f)
def load_coins_list(file_name: str, return_raw: bool = False) -> pd.DataFrame:
if file_name.split(".")[1] != "json":
raise TypeError("Please load json file")
current_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(current_dir, "data", file_name)
with open(path, encoding="utf8") as f:
coins = json.load(f)
if return_raw:
return coins
return pd.DataFrame(coins)
def load_binance_map():
return _load_coin_map("binance_gecko_map.json")
def load_coinbase_map():
return _load_coin_map("coinbase_gecko_map.json")
def prepare_all_coins_df() -> pd.DataFrame:
"""Helper method which loads coins from all sources: CoinGecko, CoinPaprika,
Binance, Yahoo Finance and merge those coins on keys:
CoinGecko - > name < - CoinPaprika
CoinGecko - > id <- Binance
Returns
-------
pd.DataFrame
CoinGecko - id for coin in CoinGecko API: uniswap
CoinPaprika - id for coin in CoinPaprika API: uni-uniswap
Binance - symbol (baseAsset) for coin in Binance API: UNI
Coinbase - symbol for coin in Coinbase Pro API e.g UNI
Yahoo Finance - symbol for coin in Yahoo Finance e.g. UNI1-USD
Symbol: uni
"""
gecko_coins_df = load_coins_list("coingecko_coins.json")
paprika_coins_df = load_coins_list("coinpaprika_coins.json")
paprika_coins_df = paprika_coins_df[paprika_coins_df["is_active"]]
paprika_coins_df = paprika_coins_df[["rank", "id", "name", "symbol", "type"]]
yahoofinance_coins_df = load_coins_list("yahoofinance_coins.json")
# TODO: Think about scheduled job, that once a day will update data
binance_coins_df = load_binance_map().rename(columns={"symbol": "Binance"})
coinbase_coins_df = load_coinbase_map().rename(columns={"symbol": "Coinbase"})
gecko_coins_df.symbol = gecko_coins_df.symbol.str.upper()
gecko_paprika_coins_df = pd.merge(
gecko_coins_df, paprika_coins_df, on="symbol", how="right"
)
df_merged = pd.merge(
left=gecko_paprika_coins_df,
right=binance_coins_df,
left_on="id_x",
right_on="id",
how="left",
)
df_merged.rename(
columns={
"id_x": "CoinGecko",
"symbol": "Symbol",
"id_y": "CoinPaprika",
},
inplace=True,
)
df_merged = pd.merge(
left=df_merged,
right=coinbase_coins_df,
left_on="CoinGecko",
right_on="id",
how="left",
)
yahoofinance_coins_df.rename(
columns={
"symbol": "Symbol",
},
inplace=True,
)
yahoofinance_coins_df.Symbol = yahoofinance_coins_df.Symbol.str.upper()
df_merged = pd.merge(
left=df_merged,
right=yahoofinance_coins_df[["Symbol", "id"]],
on="Symbol",
how="left",
)
df_merged.rename(
columns={
"id": "YahooFinance",
},
inplace=True,
)
return df_merged[
["CoinGecko", "CoinPaprika", "Binance", "Coinbase", "YahooFinance", "Symbol"]
]
def _create_closest_match_df(
coin: str, coins: pd.DataFrame, limit: int, cutoff: float
) -> pd.DataFrame:
"""Helper method. Creates a DataFrame with best matches for given coin found in given list of coins.
Based on difflib.get_close_matches func.
Parameters
----------
coin: str
coin you search for
coins: list
list of coins in which you want to find similarities
limit: int
limit of matches
cutoff: float
float between <0, 1>. Show only coins matches with score higher then cutoff.
Returns
-------
pd.DataFrame
index, id, name, symbol - > depends on source of data.
"""
coins_list = coins["id"].to_list()
sim = difflib.get_close_matches(coin, coins_list, limit, cutoff)
df = pd.Series(sim).to_frame().reset_index()
df.columns = ["index", "id"]
return df.merge(coins, on="id")
def get_coingecko_id(symbol: str):
client = CoinGeckoAPI()
coin_list = client.get_coins_list()
for coin in coin_list:
if coin["symbol"] == symbol.lower():
return coin["id"]
return None
def load_from_ccxt(
symbol: str,
start_date: datetime = (datetime.now() - timedelta(days=1100)),
interval: str = "1440",
exchange: str = "binance",
to_symbol: str = "usdt",
) -> pd.DataFrame:
"""Load crypto currency data [Source: https://github.com/ccxt/ccxt]
Parameters
----------
symbol: str
Coin to get
start_date: datetime
The datetime to start at
interval: str
The interval between data points in minutes.
Choose from: 1, 15, 30, 60, 240, 1440, 10080, 43200
exchange: str:
The exchange to get data from.
to_symbol: str
Quote Currency (Defaults to usdt)
Returns
-------
pd.DataFrame
Dataframe consisting of price and volume data
"""
df = pd.DataFrame()
pair = f"{symbol.upper()}/{to_symbol.upper()}"
try:
df = fetch_ccxt_ohlc(
exchange,
3,
pair,
CCXT_INTERVAL_MAP[interval],
int(datetime.timestamp(start_date)) * 1000,
1000,
)
if df.empty:
console.print(f"\nPair {pair} not found in {exchange}\n")
return pd.DataFrame()
except Exception:
console.print(f"\nPair {pair} not found on {exchange}\n")
return df
return df
def load_from_coingecko(
symbol: str,
start_date: datetime = (datetime.now() - timedelta(days=1100)),
to_symbol: str = "usdt",
) -> pd.DataFrame:
"""Load crypto currency data [Source: https://www.coingecko.com/]
Parameters
----------
symbol: str
Coin to get
start_date: datetime
The datetime to start at
to_symbol: str
Quote Currency (Defaults to usdt)
Returns
-------
pd.DataFrame
Dataframe consisting of price and volume data
"""
df = pd.DataFrame()
delta = datetime.now() - start_date
days = delta.days
if days > 365:
console.print("Coingecko free tier only allows a max of 365 days\n")
days = 365
coingecko_id = get_coingecko_id(symbol)
if not coingecko_id:
console.print(f"{symbol} not found in Coingecko\n")
return df
df = get_ohlc(coingecko_id, to_symbol, days)
df_coin = yf.download(
f"{symbol}-{to_symbol}",
end=datetime.now(),
start=start_date,
progress=False,
interval="1d",
).sort_index(ascending=False)
if not df_coin.empty:
df = pd.merge(df, df_coin[::-1][["Volume"]], left_index=True, right_index=True)
df.index.name = "date"
return df
def load_from_yahoofinance(
symbol: str,
start_date: datetime = (datetime.now() - timedelta(days=1100)),
interval: str = "1440",
to_symbol: str = "usdt",
end_date: datetime = datetime.now(),
) -> pd.DataFrame:
"""Load crypto currency data [Source: https://finance.yahoo.com/]
Parameters
----------
symbol: str
Coin to get
start_date: datetime
The datetime to start at
interval: str
The interval between data points in minutes.
Choose from: 1, 15, 30, 60, 240, 1440, 10080, 43200
to_symbol: str
Quote Currency (Defaults to usdt)
end_date: datetime
The datetime to end at
Returns
-------
pd.DataFrame
Dataframe consisting of price and volume data
"""
pair = f"{symbol}-{to_symbol}"
if int(interval) >= 1440:
YF_INTERVAL_MAP = {
"1440": "1d",
"10080": "1wk",
"43200": "1mo",
}
df = yf.download(
pair,
end=end_date,
start=start_date,
progress=False,
interval=YF_INTERVAL_MAP[interval],
).sort_index(ascending=True)
else:
s_int = str(interval) + "m"
d_granularity = {"1m": 6, "5m": 59, "15m": 59, "30m": 59, "60m": 729}
s_start_dt = datetime.utcnow() - timedelta(days=d_granularity[s_int])
s_date_start = s_start_dt.strftime("%Y-%m-%d")
df = yf.download(
pair,
start=s_date_start
if s_start_dt > start_date
else start_date.strftime("%Y-%m-%d"),
progress=False,
interval=s_int,
)
open_sum = df["Open"].sum()
if open_sum == 0:
console.print(f"\nPair {pair} has invalid data on Yahoo Finance\n")
return pd.DataFrame()
if df.empty:
console.print(f"\nPair {pair} not found in Yahoo Finance\n")
return pd.DataFrame()
df.index.name = "date"
return df
def load(
symbol: str,
start_date: Union[datetime, Union[str, None]] = None,
interval: Union[str, int] = "1440",
exchange: str = "binance",
to_symbol: str = "usdt",
end_date: Union[datetime, Union[str, None]] = None,
source: str = "CCXT",
) -> pd.DataFrame:
"""Load crypto currency to get data for
Parameters
----------
symbol: str
Coin to get
start_date: Union[datetime, Union[str, None]], optional
Start date to get data from with. - datetime or string format (YYYY-MM-DD)
interval: Union[str, int]
The interval between data points in minutes.
Choose from: 1, 15, 30, 60, 240, 1440, 10080, 43200
exchange: str:
The exchange to get data from.
to_symbol: str
Quote Currency (Defaults to usdt)
end_date: Union[datetime, Union[str, None]], optional
End date to get data from with. - datetime or string format (YYYY-MM-DD)
source: str
The source of the data
Choose from: CCXT, CoinGecko, YahooFinance
Returns
-------
pd.DataFrame
Dataframe consisting of price and volume data
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.crypto.load(symbol="btc",to_symbol="usd",start_date="2019-01-01",source="YahooFinance")
"""
if isinstance(interval, int):
interval = str(interval)
if start_date is None:
start_date = (datetime.now() - timedelta(days=1100)).strftime("%Y-%m-%d")
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
start_date = check_datetime(start_date)
end_date = check_datetime(end_date, start=False)
if source == "CCXT":
return load_from_ccxt(symbol, start_date, interval, exchange, to_symbol)
if source == "CoinGecko":
return load_from_coingecko(symbol, start_date, to_symbol)
if source == "YahooFinance":
return load_from_yahoofinance(symbol, start_date, interval, to_symbol, end_date)
console.print("[red]Invalid source sent[/red]\n")
return pd.DataFrame()
def show_quick_performance(
crypto_df: pd.DataFrame,
symbol: str,
current_currency: str,
source: str,
exchange: str,
interval: str,
):
"""Show quick performance stats of crypto prices. Daily prices expected"""
closes = crypto_df["Close"]
volumes = crypto_df["Volume"] if "Volume" in crypto_df else pd.DataFrame()
perfs = {}
if interval == "1440":
perfs = {
"1D": 100 * closes.pct_change(2)[-1],
"7D": 100 * closes.pct_change(7)[-1],
"1M": 100 * closes.pct_change(30)[-1],
"1Y": 100 * closes.pct_change(365)[-1],
}
first_day_current_year = str(datetime.now().date().replace(month=1, day=1))
if first_day_current_year in closes.index:
closes_ytd = closes[closes.index > first_day_current_year]
perfs["YTD"] = 100 * (closes_ytd[-1] - closes_ytd[0]) / closes_ytd[0]
else:
perfs["Period"] = 100 * (closes[-1] - closes[0]) / closes[0]
df = pd.DataFrame.from_dict(perfs, orient="index").dropna().T
df = df.applymap(lambda x: str(round(x, 2)) + " %")
df = df.applymap(lambda x: f"[red]{x}[/red]" if "-" in x else f"[green]{x}[/green]")
if len(closes) > 365:
df["Volatility (1Y)"] = (
str(round(100 * np.sqrt(365) * closes[-365:].pct_change().std(), 2)) + " %"
)
else:
df["Volatility (Ann)"] = (
str(round(100 * np.sqrt(365) * closes.pct_change().std(), 2)) + " %"
)
if len(volumes) > 7:
df["Volume (7D avg)"] = lambda_long_number_format(np.mean(volumes[-9:-2]), 2)
df.insert(0, f"\nPrice ({current_currency.upper()})", closes[-1])
try:
coingecko_id = get_coingecko_id(symbol)
coin_data_cg = get_coin_tokenomics(coingecko_id)
if not coin_data_cg.empty:
df.insert(
len(df.columns),
"Circulating Supply",
lambda_long_number_format(
int(
coin_data_cg.loc[
coin_data_cg["Metric"] == "Circulating Supply"
]["Value"]
)
),
)
except Exception:
pass
exchange_str = f"in {exchange.capitalize()}" if source == "ccxt" else ""
print_rich_table(
df,
show_index=False,
headers=df.columns,
title=f"{symbol.upper()}/{current_currency.upper()} Performance {exchange_str}",
)
console.print()
def load_yf_data(symbol: str, currency: str, interval: str, days: int):
df_coin = yf.download(
f"{symbol.upper()}-{currency.upper()}",
end=datetime.now(),
start=datetime.now() - timedelta(days=days),
progress=False,
interval=interval,
).sort_index(ascending=False)
df_coin.index.names = ["date"]
if df_coin.empty:
console.print(
f"Could not download data for {symbol}-{currency} from Yahoo Finance"
)
return pd.DataFrame(), currency
return df_coin[::-1], currency
def display_all_coins(
source: str, symbol: str, limit: int, skip: int, show_all: bool, export: str
) -> None:
"""Find similar coin by coin name,symbol or id.
If you don't remember exact name or id of the Coin at CoinGecko, CoinPaprika, Coinbase, Binance
you can use this command to display coins with similar name, symbol or id to your search query.
Example of usage: coin name is something like "polka". So I can try: find -c polka -k name -t 25
It will search for coin that has similar name to polka and display top 25 matches.
-c, --coin stands for coin - you provide here your search query
-t, --top it displays top N number of records.
Parameters
----------
limit: int
Number of records to display
symbol: str
Cryptocurrency
source: str
Data source of coins. CoinGecko (cg) or CoinPaprika (cp) or Binance (bin), Coinbase (cb)
skip: int
Skip N number of records
show_all: bool
Flag to show all sources of data
export : str
Export dataframe data to csv,json,xlsx file
"""
sources = ["CoinGecko", "CoinPaprika", "Binance", "Coinbase"]
limit, cutoff = 30, 0.75
coins_func_map = {
"CoinGecko": pycoingecko_model.get_coin_list,
"CoinPaprika": coinpaprika_model.get_coin_list,
"Binance": load_binance_map,
"Coinbase": load_coinbase_map,
}
if show_all:
coins_func = coins_func_map.get(source)
if coins_func:
df = coins_func()
else:
df = prepare_all_coins_df()
elif not source or source not in sources:
df = prepare_all_coins_df()
cg_coins_list = df["CoinGecko"].to_list()
sim = difflib.get_close_matches(symbol.lower(), cg_coins_list, limit, cutoff)
df_matched = pd.Series(sim).to_frame().reset_index()
df_matched.columns = ["index", "CoinGecko"]
df = df.merge(df_matched, on="CoinGecko")
df.drop("index", axis=1, inplace=True)
else:
if source == "CoinGecko":
coins_df = pycoingecko_model.get_coin_list().drop("index", axis=1)
df = _create_closest_match_df(symbol.lower(), coins_df, limit, cutoff)
df = df[["index", "id", "name"]]
elif source == "CoinPaprika":
coins_df = coinpaprika_model.get_coin_list()
df = _create_closest_match_df(symbol.lower(), coins_df, limit, cutoff)
df = df[["index", "id", "name"]]
elif source == "Binance":
coins_df_gecko = pycoingecko_model.get_coin_list()
coins_df_bin = load_binance_map()
coins_df_bin.columns = ["symbol", "id"]
coins_df = pd.merge(
coins_df_bin, coins_df_gecko[["id", "name"]], how="left", on="id"
)
df = _create_closest_match_df(symbol.lower(), coins_df, limit, cutoff)
df = df[["index", "symbol", "name"]]
df.columns = ["index", "id", "name"]
elif source == "Coinbase":
coins_df_gecko = pycoingecko_model.get_coin_list()
coins_df_cb = load_coinbase_map()
coins_df_cb.columns = ["symbol", "id"]
coins_df = pd.merge(
coins_df_cb, coins_df_gecko[["id", "name"]], how="left", on="id"
)
df = _create_closest_match_df(symbol.lower(), coins_df, limit, cutoff)
df = df[["index", "symbol", "name"]]
df.columns = ["index", "id", "name"]
else:
df = pd.DataFrame(columns=["index", "id", "symbol"])
console.print("Couldn't find any coins")
try:
df = df[skip : skip + limit] # noqa
except Exception as e:
logger.exception(str(e))
console.print(e)
print_rich_table(
df.fillna("N/A"),
headers=list(df.columns),
show_index=False,
title="Similar Coins",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"coins",
df,
)
def plot_chart(
prices_df: pd.DataFrame,
to_symbol: str = "",
from_symbol: str = "",
source: str = "",
exchange: str = "",
interval: str = "",
external_axes: Union[List[plt.Axes], None] = None,
yscale: str = "linear",
) -> None:
"""Load data for Technical Analysis
Parameters
----------
prices_df: pd.DataFrame
Cryptocurrency
to_symbol: str
Coin (only used for chart title), by default ""
from_symbol: str
Currency (only used for chart title), by default ""
yscale: str
Scale for y axis of plot Either linear or log
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> eth_df = openbb.crypto.load("ETH")
>>> openbb.crypto.chart(prices_df=eth_df, to_symbol="usdt", from_symbol="eth", source="binance")
"""
del interval
if prices_df.empty:
console.print("There is not data to plot chart\n")
return
exchange_str = f"/{exchange}" if source == "ccxt" else ""
title = (
f"{source}{exchange_str} - {to_symbol.upper()}/{from_symbol.upper()}"
f" from {prices_df.index[0].strftime('%Y/%m/%d')} "
f"to {prices_df.index[-1].strftime('%Y/%m/%d')}"
)
volume_mean = prices_df["Volume"].mean()
if volume_mean > 1_000_000:
prices_df["Volume"] = prices_df["Volume"] / 1_000_000
plot_candles(
symbol=to_symbol,
data=prices_df,
title=title,
volume=True,
ylabel="Volume [1M]" if volume_mean > 1_000_000 else "Volume",
external_axes=external_axes,
yscale=yscale,
)
console.print()
def plot_candles( # pylint: disable=too-many-arguments
symbol: str,
data: pd.DataFrame = None,
start_date: Union[datetime, Union[str, None]] = None,
end_date: Union[datetime, Union[str, None]] = None,
interval: Union[str, int] = "1440",
exchange: str = "binance",
to_symbol: str = "usdt",
source: str = "CCXT",
volume: bool = True,
ylabel: str = "",
title: str = "",
external_axes: Union[List[plt.Axes], None] = None,
yscale: str = "linear",
raw: bool = False,
) -> Optional[pd.DataFrame]:
"""Plot candle chart from dataframe. [Source: Binance]
Parameters
----------
symbol: str
Ticker name
data: pd.DataFrame
Dataframe containing time and OHLCV
start_date: Union[datetime, Union[str, None]]
Start date for data
end_date: Union[datetime, Union[str, None]]
End date for data
interval: Union[str, int]
Interval for data
exchange: str
Exchange to use
to_symbol: str
Currency to use
source: str
Source to use
volume: bool
If volume data shall be plotted, by default True
ylabel: str
Y-label of the graph, by default ""
title: str
Title of graph, by default ""
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
yscale : str
Scaling for y axis. Either linear or log
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.crypto.candle(symbol="eth")
>>> openbb.crypto.candle(symbol="btc", raw=True)
"""
if data is None:
data = load(
symbol=symbol,
start_date=start_date,
end_date=end_date,
interval=interval,
exchange=exchange,
to_symbol=to_symbol,
source=source,
)
if raw:
return data
candle_chart_kwargs = {
"type": "candle",
"style": theme.mpf_style,
"volume": volume,
"xrotation": theme.xticks_rotation,
"ylabel_lower": ylabel,
"scale_padding": {"left": 0.3, "right": 1, "top": 0.8, "bottom": 0.8},
"update_width_config": {
"candle_linewidth": 0.6,
"candle_width": 0.8,
"volume_linewidth": 0.8,
"volume_width": 0.8,
},
"warn_too_much_data": 10000,
"yscale": yscale,
}
# This plot has 2 axes
if external_axes is None:
candle_chart_kwargs["returnfig"] = True
candle_chart_kwargs["figratio"] = (10, 7)
candle_chart_kwargs["figscale"] = 1.10
candle_chart_kwargs["figsize"] = plot_autoscale()
fig, ax = mpf.plot(data, **candle_chart_kwargs)
fig.suptitle(
f"\n{symbol if title == '' else title}",
horizontalalignment="left",
verticalalignment="top",
x=0.05,
y=1,
)
if volume:
lambda_long_number_format_y_axis(data, "Volume", ax)
if yscale == "log":
ax[0].yaxis.set_major_formatter(ScalarFormatter())
ax[0].yaxis.set_major_locator(
LogLocator(base=100, subs=[1.0, 2.0, 5.0, 10.0])
)
ax[0].ticklabel_format(style="plain", axis="y")
theme.visualize_output(force_tight_layout=False)
else:
nr_external_axes = 2 if volume else 1
if not is_valid_axes_count(external_axes, nr_external_axes):
return None
if volume:
(ax, volume) = external_axes
candle_chart_kwargs["volume"] = volume
else:
ax = external_axes[0]
candle_chart_kwargs["ax"] = ax
mpf.plot(data, **candle_chart_kwargs)
return None
def plot_order_book(
bids: np.ndarray,
asks: np.ndarray,
coin: str,
external_axes: Union[List[plt.Axes], None] = None,
) -> None:
"""
Plots Bid/Ask. Can be used for Coinbase and Binance
Parameters
----------
bids : np.array
array of bids with columns: price, size, cumulative size
asks : np.array
array of asks with columns: price, size, cumulative size
coin : str
Coin being plotted
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(bids[:, 0], bids[:, 2], color=theme.up_color, label="bids")
ax.fill_between(bids[:, 0], bids[:, 2], color=theme.up_color, alpha=0.4)
ax.plot(asks[:, 0], asks[:, 2], color=theme.down_color, label="asks")
ax.fill_between(asks[:, 0], asks[:, 2], color=theme.down_color, alpha=0.4)
ax.legend()
ax.set_xlabel("Price")
ax.set_ylabel("Size (Coins)")
ax.set_title(f"Order Book for {coin}")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output(force_tight_layout=False)
def check_cg_id(symbol: str):
cg_id = get_coingecko_id(symbol)
if not cg_id:
print(f"\n{symbol} not found on CoinGecko")
return ""
return symbol
def fetch_ccxt_ohlc(exchange_id, max_retries, symbol, timeframe, since, limit):
exchange = getattr(ccxt, exchange_id)(
{
"enableRateLimit": True, # required by the Manual
}
)
if isinstance(since, str):
since = exchange.parse8601(since)
ohlcv = get_ohlcv(exchange, max_retries, symbol, timeframe, since, limit)
df = pd.DataFrame(ohlcv, columns=["date", "Open", "High", "Low", "Close", "Volume"])
df["date"] = pd.to_datetime(df.date, unit="ms")
df.set_index("date", inplace=True)
return df
def retry_fetch_ohlcv(exchange, max_retries, symbol, timeframe, since, limit):
num_retries = 0
try:
num_retries += 1
ohlcv = exchange.fetch_ohlcv(symbol, timeframe, since, limit)
return ohlcv
except Exception:
if num_retries > max_retries:
raise
return []
def get_ohlcv(exchange, max_retries, symbol, timeframe, since, limit):
timeframe_duration_in_seconds = exchange.parse_timeframe(timeframe)
timeframe_duration_in_ms = timeframe_duration_in_seconds * 1000
timedelta_ = limit * timeframe_duration_in_ms
now = exchange.milliseconds()
all_ohlcv = []
fetch_since = since
while fetch_since < now:
ohlcv = retry_fetch_ohlcv(
exchange, max_retries, symbol, timeframe, fetch_since, limit
)
fetch_since = (ohlcv[-1][0] + 1) if len(ohlcv) else (fetch_since + timedelta_)
all_ohlcv = all_ohlcv + ohlcv
return exchange.filter_by_since_limit(all_ohlcv, since, None, key=0)
def get_exchanges_ohlc():
arr = []
for exchange in ccxt.exchanges:
exchange_ccxt = getattr(ccxt, exchange)(
{
"enableRateLimit": True,
}
)
if exchange_ccxt.has["fetchOHLCV"]:
arr.append(exchange)
return arr | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/cryptocurrency_helpers.py | 0.630457 | 0.222236 | cryptocurrency_helpers.py | pypi |
__docformat__ = "numpy"
import math
import re
from typing import Union, Any, Optional
import textwrap
import pandas as pd
from openbb_terminal.helper_funcs import lambda_long_number_format
def wrap_text_in_df(df: pd.DataFrame, w: int = 55) -> pd.DataFrame: # pragma: no cover
"""
Parameters
----------
df: pd.DataFrame
Data Frame with some data
w: int
length of text in column after which text is wrapped into new line
Returns
-------
pd.DataFrame
"""
return df.applymap(
lambda x: "\n".join(textwrap.wrap(x, width=w)) if isinstance(x, str) else x
)
def percent_to_float(s: str) -> float:
"""Helper method to replace string pct like "123.56%" to float 1.2356
Parameters
----------
s: string
string to replace
Returns
-------
float
"""
s = str(float(s.rstrip("%")))
i = s.find(".")
if i == -1:
return int(s) / 100
if s.startswith("-"):
return -percent_to_float(s.lstrip("-"))
s = s.replace(".", "")
i -= 2
if i < 0:
return float("." + "0" * abs(i) + s)
return float(s[:i] + "." + s[i:])
def create_df_index(df: pd.DataFrame, name: str = "rank") -> None:
"""Helper method that creates new index for given data frame, with provided index name
Parameters
----------
df:
pd.DataFrame
name: str
index name
"""
df.index = df.index + 1
df.reset_index(inplace=True)
df.rename(columns={"index": name}, inplace=True)
def lambda_long_number_format_with_type_check(x: Union[int, float]) -> Union[str, Any]:
"""Helper which checks if type of x is int or float and it's smaller then 10^18.
If yes it apply long_num_format
Parameters
----------
x: int/float
number to apply long_number_format method
Returns
-------
Union[str, Any]
"""
if isinstance(x, (int, float)) and x < 10**18:
return lambda_long_number_format(x)
return x
def lambda_replace_underscores_in_column_names(string: str) -> str:
return string.title().replace("_", " ")
def lambda_very_long_number_formatter(num: Union[str, int, float]) -> str:
"""Apply nice string format for very big numbers like Trillions, Quadrillions, Billions etc.
Parameters
----------
num: Union[str, int, float]
number to format
Returns
-------
str:
formatted number
"""
if isinstance(num, str):
try:
num = float(num)
except (TypeError, ValueError):
return str(num)
if isinstance(num, (int, float)):
if math.isnan(num):
num = 0
num = int(num)
magnitude = 0
while abs(num) >= 1000 and magnitude <= 3:
magnitude += 1
num /= 1000.0
num = round(num, 1)
formatted_num = f"{num:f}".rstrip("0").rstrip(".")
return f'{formatted_num}{["", "K", "M", "B", "T"][magnitude]}'
return num
def prettify_paragraph(text):
# Add tab to the beginning of paragraph
text = "\t" + text
pat = "(?<!\n)\n(?!\n)"
# Add tab to double line break
pretty_text = re.sub("\n\n", "\n\n\t", text)
# Replace \n with None
even_more_pretty_text = re.sub(pat, "", pretty_text)
return even_more_pretty_text
def prettify_column_names(columns: list) -> list:
"""Helper method that change column names into more human readable format. E.g.
- tradeAmount => Trade amount,
- tokenValue => Token value
- mediumGasPrice => Medium Gas Price
Parameters
----------
columns: list
list of column names
Returns
-------
list with reformatted columns
"""
return [" ".join(re.findall(".[^A-Z]*", val)).capitalize() for val in columns]
def denominate_number(
number: Any, divider: int = 1000000, round_digits: Optional[int] = 4
) -> float:
"""Denominate numbers base on provided divider and round number by provided digit
Parameters
----------
number: Any
value to round
divider: int
divide by value
round_digits:
round number to n digits
Returns
-------
float:
denominated number
"""
if round_digits:
return round(float(number) / divider, round_digits)
return round(float(number) / divider)
def lambda_replace_unicode(x: Any) -> Any:
"""Replace unicode characters to ?
Parameters
----------
x: Any
value to replace unicode chars
Returns
-------
Any
replaced value
"""
if isinstance(x, str):
return x.encode("ascii", "replace").decode()
return x | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/dataframe_helpers.py | 0.898218 | 0.400368 | dataframe_helpers.py | pypi |
__docformat__ = "numpy"
import json
import math
import datetime as dt
from datetime import timezone
from typing import Sequence, Optional, Any, Dict, Tuple, Union, List
import textwrap
import logging
import requests
from bs4 import BeautifulSoup
import pandas as pd
from dateutil import parser
from requests.adapters import HTTPAdapter, RetryError
from urllib3.util.retry import Retry
from openbb_terminal.helper_funcs import get_user_agent
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
GECKO_BASE_URL = "https://www.coingecko.com"
DENOMINATION = ("usd", "btc", "eth")
def millify(n: Union[float, int]) -> str:
millnames = ["", "K", "M", "B", "T"]
n = float(n)
millidx = max(
0,
min(
len(millnames) - 1, int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3))
),
)
return f"{n / 10 ** (3 * millidx):.0f}{millnames[millidx]}"
def calc_change(current: Union[float, int], previous: Union[float, int]):
"""Calculates change between two different values"""
if current == previous:
return 0
try:
return ((current - previous) / previous) * 100.0
except ZeroDivisionError:
return float("inf")
def get_btc_price() -> float:
"""Get BTC/USD price from CoinGecko API
Returns
-------
str
latest bitcoin price in usd.
"""
req = requests.get(
"https://api.coingecko.com/api/v3/simple/"
"price?ids=bitcoin&vs_currencies=usd&include_market_cap"
"=false&include_24hr_vol"
"=false&include_24hr_change=false&include_last_updated_at=false"
)
return req.json()["bitcoin"]["usd"]
def _retry_session(
url: str, retries: int = 3, backoff_factor: float = 1.0
) -> requests.Session:
"""Helper methods that retries to make request to CoinGecko
Parameters
----------
url: str
Url to mount a session
retries: int
How many retries
backoff_factor: float
Backoff schema - time periods between retry
Returns
-------
requests.Session
Mounted session
"""
session = requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
status_forcelist=[500, 502, 503, 504],
backoff_factor=backoff_factor,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount(url, adapter)
return session
def scrape_gecko_data(url: str) -> BeautifulSoup:
"""Helper method that scrape Coin Gecko site.
Parameters
----------
url : str
coin gecko url to scrape e.g: "https://www.coingecko.com/en/discover"
Returns
-------
BeautifulSoup object
"""
headers = {"User-Agent": get_user_agent()}
session = _retry_session("https://www.coingecko.com")
try:
req = session.get(url, headers=headers, timeout=5)
except Exception as error:
logger.exception(error)
console.print(error)
raise RetryError(
"Connection error. Couldn't connect to CoinGecko and scrape the data. "
"Please visit CoinGecko site, and check if it's not under maintenance"
) from error
if req.status_code >= 400:
raise Exception(
f"Couldn't connect to {url}. Status code: {req.status_code}. Reason: {req.reason}"
)
return BeautifulSoup(req.text, features="lxml")
def replace_underscores_to_newlines(cols: list, line: int = 13) -> list:
"""Helper method that replace underscores to white space and breaks it to new line
Parameters
----------
cols
- list of columns names
line
- line length
Returns
-------
list of column names with replaced underscores
"""
return [
textwrap.fill(c.replace("_", " "), line, break_long_words=False)
for c in list(cols)
]
def find_discord(item: Optional[List[Any]]) -> Union[str, Any]:
if isinstance(item, list) and len(item) > 0:
discord = [chat for chat in item if "discord" in chat]
if len(discord) > 0:
return discord[0]
return None
def join_list_elements(elem):
if isinstance(elem, dict):
return ", ".join(k for k, v in elem.items())
if isinstance(elem, list):
return ", ".join(k for k in elem)
return None
def filter_list(lst: Optional[List[Any]]) -> Optional[List[Any]]:
if isinstance(lst, list) and len(lst) > 0:
return [i for i in lst if i != ""]
return lst
def calculate_time_delta(date: dt.datetime) -> int:
now = dt.datetime.now(timezone.utc)
if not isinstance(date, dt.datetime):
date = parser.parse(date)
return (now - date).days
def get_eth_addresses_for_cg_coins(file) -> pd.DataFrame: # pragma: no cover
with open(file, encoding="utf8") as f:
data = json.load(f)
df = pd.DataFrame(data)
df["ethereum"] = df["platforms"].apply(
lambda x: x.get("ethereum") if "ethereum" in x else None
)
return df
def clean_question_marks(dct: dict) -> None:
if isinstance(dct, dict):
for k, v in dct.items():
if v == "?":
dct[k] = None
def replace_qm(df: pd.DataFrame) -> pd.DataFrame:
df.replace({"?": None, " ?": None}, inplace=True)
return df
def get_url(url: str, elem: BeautifulSoup): # pragma: no cover
return url + elem.find("a")["href"]
def clean_row(row: BeautifulSoup) -> list:
"""Helper method that cleans whitespaces and newlines in text returned from BeautifulSoup
Parameters
----------
row
text returned from BeautifulSoup find method
Returns
-------
list of elements
"""
return [r for r in row.text.strip().split("\n") if r not in ["", " "]]
def convert(word: str) -> str:
return "".join(x.capitalize() or "_" for x in word.split("_") if word.isalpha())
def collateral_auditors_parse(
args: Any,
) -> Tuple[Any, Any]: # pragma: no cover
try:
if args and args[0] == "N/A":
collateral = args[1:]
auditors = []
else:
n_elem = int(args[0])
auditors = args[1 : n_elem + 1] # noqa: E203
collateral = args[n_elem + 1 :] # noqa: E203
return auditors, collateral
except ValueError:
return [], []
def swap_columns(df: pd.DataFrame) -> pd.DataFrame:
cols = list(df.columns)
cols = [cols[-1]] + cols[:-1]
df = df[cols]
return df
def changes_parser(changes: list) -> list:
if isinstance(changes, list) and len(changes) < 3:
for _ in range(3 - len(changes)):
changes.append(None)
else:
changes = [None for _ in range(3)]
return changes
def remove_keys(entries: tuple, the_dict: Dict[Any, Any]) -> None:
for key in entries:
if key in the_dict:
del the_dict[key]
def rename_columns_in_dct(dct: dict, mapper: dict) -> dict:
return {mapper.get(k, v): v for k, v in dct.items()}
def create_dictionary_with_prefixes(
columns: Sequence[Any], dct: Dict[Any, Any], constrains: Optional[Tuple] = None
): # type: ignore
results = {}
for column in columns:
ath_data = dct.get(column, {})
for element in ath_data: # type: ignore
if constrains: # type: ignore
if element in constrains: # type: ignore
results[f"{column}_" + element] = ath_data.get(element) # type: ignore
else:
results[f"{column}_" + element] = ath_data.get(element) # type: ignore
return results | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/pycoingecko_helpers.py | 0.716219 | 0.196942 | pycoingecko_helpers.py | pypi |
__docformat__ = "numpy"
# pylint: disable=R0904, C0302, R1710, W0622, C0201, C0301
import argparse
import logging
import os
from typing import List
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal.cryptocurrency import cryptocurrency_helpers, pyth_model, pyth_view
from openbb_terminal import feature_flags as obbff
from openbb_terminal.cryptocurrency.cryptocurrency_helpers import (
display_all_coins,
plot_chart,
)
from openbb_terminal.cryptocurrency.crypto_views import find
from openbb_terminal.cryptocurrency.due_diligence import (
binance_view,
coinpaprika_view,
finbrain_crypto_view,
pycoingecko_view,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_BOTH_RAW_DATA_AND_FIGURES,
EXPORT_ONLY_RAW_DATA_ALLOWED,
check_positive,
export_data,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import CryptoBaseController
from openbb_terminal.rich_config import console, MenuText
# pylint: disable=import-outside-toplevel
logger = logging.getLogger(__name__)
FIND_KEYS = ["id", "symbol", "name"]
CRYPTO_SOURCES = {
"Binance": "Binance",
"CoingGecko": "CoinGecko",
"CoinPaprika": "CoinPaprika",
"Coinbase": "Coinbase",
"YahooFinance": "YahooFinance",
}
class CryptoController(CryptoBaseController):
"""Crypto Controller"""
CHOICES_COMMANDS = [
"headlines",
"candle",
"load",
"find",
"prt",
"resources",
"price",
]
CHOICES_MENUS = [
"ta",
"dd",
"ov",
"disc",
"onchain",
"defi",
"tools",
"nft",
"qa",
"forecast",
]
DD_VIEWS_MAPPING = {
"CoingGecko": pycoingecko_view,
"CoinPaprika": coinpaprika_view,
"Binance": binance_view,
}
PATH = "/crypto/"
FILE_PATH = os.path.join(os.path.dirname(__file__), "README.md")
CHOICES_GENERATION = True
def __init__(self, queue: List[str] = None):
"""Constructor"""
super().__init__(queue)
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
choices["load"] = {
"--interval": {
c: {}
for c in [
"1",
"5",
"15",
"30",
"60",
"240",
"1440",
"10080",
"43200",
]
},
"-i": "--interval",
"--exchange": {c: {} for c in self.exchanges},
"--source": {c: {} for c in ["CCXT", "YahooFinance", "CoinGecko"]},
"--vs": {c: {} for c in ["usd", "eur"]},
"--start": None,
"-s": "--start",
"--end": None,
"-e": "--end",
}
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
mt = MenuText("crypto/")
mt.add_cmd("load")
mt.add_cmd("find")
mt.add_cmd("price", "Pyth")
mt.add_raw("\n")
mt.add_param(
"_symbol", f"{self.symbol.upper()}/{self.vs.upper()}" if self.symbol else ""
)
if self.source == "CCXT":
mt.add_param(
"_exchange", self.exchange if self.symbol and self.exchange else ""
)
mt.add_param("_source", self.source if self.symbol and self.source else "")
mt.add_param("_interval", self.current_interval)
mt.add_raw("\n")
mt.add_cmd("headlines")
mt.add_cmd("candle", self.symbol)
mt.add_cmd("prt", self.symbol)
mt.add_raw("\n")
mt.add_menu("disc")
mt.add_menu("ov")
mt.add_menu("onchain")
mt.add_menu("defi")
mt.add_menu("tools")
mt.add_menu("nft")
mt.add_menu("dd", self.symbol)
mt.add_menu("ta", self.symbol)
mt.add_menu("qa", self.symbol)
mt.add_menu("forecast", self.symbol)
console.print(text=mt.menu_text, menu="Cryptocurrency")
@log_start_end(log=logger)
def call_prt(self, other_args):
"""Process prt command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="prt",
description="Potential Returns Tool"
"Tool to check returns if loaded coin reaches provided price or other crypto market cap"
"Uses CoinGecko to grab coin data (price and market cap).",
)
parser.add_argument(
"--vs",
help="Coin to compare with",
dest="vs",
type=str,
# required="-h" not in other_args,
default=None,
)
parser.add_argument(
"-p",
"--price",
help="Desired price",
dest="price",
type=int,
default=None,
)
parser.add_argument(
"-t",
"--top",
help="Compare with top N coins",
dest="top",
type=int,
default=None,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "--vs")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if self.symbol:
num_args = 0
for arg in vars(ns_parser):
if getattr(ns_parser, arg):
num_args = num_args + 1
if num_args > 1:
console.print("[red]Please chose only one flag[/red]\n")
return
current_coin_id = cryptocurrency_helpers.get_coingecko_id(self.symbol)
if ns_parser.vs is not None:
coin_found = cryptocurrency_helpers.get_coingecko_id(ns_parser.vs)
if not coin_found:
console.print(
f"VS Coin '{ns_parser.vs}' not found in CoinGecko\n"
)
return
else:
coin_found = None
if (
ns_parser.vs is None
and ns_parser.top is None
and ns_parser.price is None
):
console.print(
"[red]Please chose a flag: --top, --vs, or --price [/red]\n"
)
return
pycoingecko_view.display_coin_potential_returns(
current_coin_id,
coin_found,
ns_parser.top,
ns_parser.price,
)
else:
console.print("[red]Please load a coin first![/red]\n")
@log_start_end(log=logger)
def call_price(self, other_args):
"""Process price command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="price",
description="""Display price and interval of confidence in real-time. [Source: Pyth]""",
)
parser.add_argument(
"-s",
"--symbol",
required="-h" not in other_args,
type=str,
dest="symbol",
help="Symbol of coin to load data for, ~100 symbols are available",
choices=pyth_model.ASSETS.keys(),
metavar="SYMBOL",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-s")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
upper_symbol = ns_parser.symbol.upper()
if "-USD" not in ns_parser.symbol:
upper_symbol += "-USD"
if upper_symbol in pyth_model.ASSETS.keys():
console.print(
"[param]If it takes too long, you can use 'Ctrl + C' to cancel.\n[/param]"
)
pyth_view.display_price(upper_symbol)
else:
console.print("[red]The symbol selected does not exist.[/red]\n")
@log_start_end(log=logger)
def call_candle(self, other_args):
"""Process candle command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="candle",
description="""Display chart for loaded coin. You can specify currency vs which you want
to show chart and also number of days to get data for.""",
)
parser.add_argument(
"--log",
help="Plot with y axis on log scale",
action="store_true",
default=False,
dest="logy",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
if not self.symbol:
console.print("No coin loaded. First use `load {symbol}`\n")
return
export_data(
ns_parser.export,
os.path.join(os.path.dirname(os.path.abspath(__file__))),
f"{self.symbol}",
self.current_df,
)
plot_chart(
exchange=self.exchange,
source=self.source,
to_symbol=self.symbol,
from_symbol=self.current_currency,
prices_df=self.current_df,
interval=self.current_interval,
yscale="log" if ns_parser.logy else "linear",
)
@log_start_end(log=logger)
def call_ta(self, _):
"""Process ta command"""
from openbb_terminal.cryptocurrency.technical_analysis.ta_controller import (
TechnicalAnalysisController,
)
# TODO: Play with this to get correct usage
if self.symbol:
if self.current_currency != "" and not self.current_df.empty:
self.queue = self.load_class(
TechnicalAnalysisController,
stock=self.current_df,
coin=self.symbol,
start=self.current_df.index[0],
interval="",
queue=self.queue,
)
else:
console.print("No coin selected. Use 'load' to load a coin.\n")
@log_start_end(log=logger)
def call_tools(self, _):
"""Process tools command"""
from openbb_terminal.cryptocurrency.tools.tools_controller import (
ToolsController,
)
self.queue = self.load_class(ToolsController, self.queue)
@log_start_end(log=logger)
def call_disc(self, _):
"""Process disc command"""
from openbb_terminal.cryptocurrency.discovery.discovery_controller import (
DiscoveryController,
)
self.queue = self.load_class(DiscoveryController, self.queue)
@log_start_end(log=logger)
def call_ov(self, _):
"""Process ov command"""
from openbb_terminal.cryptocurrency.overview.overview_controller import (
OverviewController,
)
self.queue = self.load_class(OverviewController, self.queue)
@log_start_end(log=logger)
def call_defi(self, _):
"""Process defi command"""
from openbb_terminal.cryptocurrency.defi.defi_controller import DefiController
self.queue = self.load_class(DefiController, self.queue)
@log_start_end(log=logger)
def call_headlines(self, other_args):
"""Process sentiment command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="headlines",
description="""Display sentiment analysis from FinBrain for chosen Cryptocurrencies""",
)
parser.add_argument(
"-c",
"--coin",
default="BTC",
type=str,
dest="coin",
help="Symbol of coin to load data for, ~100 symbols are available",
choices=finbrain_crypto_view.COINS,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-c")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
finbrain_crypto_view.display_crypto_sentiment_analysis(
symbol=ns_parser.coin, export=ns_parser.export
)
@log_start_end(log=logger)
def call_dd(self, _):
"""Process dd command"""
if self.symbol:
from openbb_terminal.cryptocurrency.due_diligence.dd_controller import (
DueDiligenceController,
)
self.queue = self.load_class(
DueDiligenceController,
self.symbol,
self.source,
queue=self.queue,
)
else:
console.print("No coin selected. Use 'load' to load a coin.\n")
@log_start_end(log=logger)
def call_qa(self, _):
"""Process qa command"""
if self.symbol:
from openbb_terminal.cryptocurrency.quantitative_analysis import (
qa_controller,
)
self.queue = self.load_class(
qa_controller.QaController,
self.symbol,
self.current_df,
self.queue,
)
@log_start_end(log=logger)
def call_onchain(self, _):
"""Process onchain command"""
from openbb_terminal.cryptocurrency.onchain.onchain_controller import (
OnchainController,
)
self.queue = self.load_class(OnchainController, self.queue)
@log_start_end(log=logger)
def call_nft(self, _):
"""Process nft command"""
from openbb_terminal.cryptocurrency.nft.nft_controller import NFTController
self.queue = self.load_class(NFTController, self.queue)
# TODO: merge the two views that this command calls. (find + previously called coins)
@log_start_end(log=logger)
def call_find(self, other_args):
"""Process find command"""
parser = argparse.ArgumentParser(
prog="find",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""
Find similar coin by name, symbol, or id. If you don't remember exact name or id of the Coin at CoinGecko,
Binance, Coinbase or CoinPaprika you can use this command to display coins with similar name, symbol or id
to your search query.
Example of usage: coin name is something like "polka". So I can try: find -c polka -k name -t 25
It will search for coin that has similar name to polka and display top 25 matches.
-c, --coin stands for coin - you provide here your search query
-k, --key it's a searching key. You can search by symbol, id or name of coin
-l, --limit it displays top N number of records.
coins: Shows list of coins available on CoinGecko, CoinPaprika and Binance.If you provide name of
coin then in result you will see ids of coins with best match for all mentioned services.
If you provide "ALL" in your coin search query, then all coins will be displayed. To move over coins you
can use pagination mechanism with skip, top params. E.g. coins ALL --skip 100 --limit 30 then all coins
from 100 to 130 will be displayed. By default skip = 0, limit = 10.
If you won't provide source of the data everything will be displayed (CoinGecko, CoinPaprika, Binance).
If you want to search only in given source then use --source flag. E.g. if you want to find coin with name
uniswap on CoinPaprika then use: coins uniswap --source cp --limit 10
""",
)
parser.add_argument(
"-c",
"--coin",
help="Symbol Name or Id of Coin",
dest="coin",
required="-h" not in other_args,
type=str,
)
parser.add_argument(
"-k",
"--key",
dest="key",
help="Specify by which column you would like to search: symbol, name, id",
type=str,
choices=FIND_KEYS,
default="symbol",
)
parser.add_argument(
"-l",
"--limit",
default=10,
dest="limit",
help="Number of records to display",
type=check_positive,
)
parser.add_argument(
"-s",
"--skip",
default=0,
dest="skip",
help="Skip n of records",
type=check_positive,
choices=range(1, 300),
metavar="SKIP",
)
if other_args and not other_args[0][0] == "-":
other_args.insert(0, "-c")
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
EXPORT_ONLY_RAW_DATA_ALLOWED,
)
# TODO: merge find + display_all_coins
if ns_parser:
if ns_parser.coin == "ALL":
display_all_coins(
symbol=ns_parser.coin,
source=ns_parser.source,
limit=ns_parser.limit,
skip=ns_parser.skip,
show_all=True,
export=ns_parser.export,
)
else:
find(
query=ns_parser.coin,
source=ns_parser.source,
key=ns_parser.key,
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_forecast(self, _):
"""Process forecast command"""
from openbb_terminal.forecast import forecast_controller
console.print(self.symbol)
self.queue = self.load_class(
forecast_controller.ForecastController,
self.symbol,
self.current_df,
self.queue,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/crypto_controller.py | 0.435061 | 0.178741 | crypto_controller.py | pypi |
import logging
from typing import Tuple
from pythclient.pythaccounts import PythPriceAccount, PythPriceStatus
from pythclient.solana import (
SolanaClient,
SolanaPublicKey,
SOLANA_DEVNET_HTTP_ENDPOINT,
SOLANA_DEVNET_WS_ENDPOINT,
)
logger = logging.getLogger(__name__)
ASSETS = {
"AAVE-USD": {"feedID": "FT7Cup6ZiFDF14uhFD3kYS3URMCf2RZ4iwfNEVUgndHW"},
"ADA-USD": {"feedID": "8oGTURNmSQkrBS1AQ5NjB2p8qY34UVmMA9ojrw8vnHus"},
"ALGO-USD": {"feedID": "c1A946dY5NHuVda77C8XXtXytyR3wK1SCP3eA9VRfC3"},
"ANC-USD": {"feedID": "5Si2Pdm7B87ojYvkegg7ct8Y446RHJyEjeREAyZEZcAV"},
"APE-USD": {"feedID": "EfnLcrwxCgwALc5vXr4cwPZMVcmotZAuqmHa8afG8zJe"},
"ATLAS-USD": {"feedID": "Dzs6SE1cssUqBpWCKzE4jeS5PrmRK1Fp2Kw1WMaDCiVR"},
"ATOM-USD": {"feedID": "7YAze8qFUMkBnyLVdKT4TFUUFui99EwS5gfRArMcrvFk"},
"AVAX-USD": {"feedID": "FVb5h1VmHPfVb1RfqZckchq18GxRv4iKt8T4eVTQAqdz"},
"BCH-USD": {"feedID": "4EQrNZYk5KR1RnjyzbaaRbHsv8VqZWzSUtvx58wLsZbj"},
"BETH-USD": {"feedID": "HyShqBUTtwAaCas9Dnib3ut6GmEDk9hTdKsrNfRffX8E"},
"BNB-USD": {"feedID": "GwzBgrXb4PG59zjce24SF2b9JXbLEjJJTBkmytuEZj1b"},
"BRZ-USD": {"feedID": "5g4XtpqLynP6YUSQwncw6CrdAEoy5a7QNDevgAgLsfyC"},
"BTC-USD": {"feedID": "HovQMDrbAgAYPCmHVSrezcSmkMtXSSUsLDFANExrZh2J"},
"BUSD-USD": {"feedID": "TRrB75VTpiojCy99S5BHmYkjARgtfBqZKk5JbeouUkV"},
"C98-USD": {"feedID": "Dxp7vob2NTGhmodyWyeEkqtNEpSfvSMoGKMYjmaY6pg1"},
"COPE-USD": {"feedID": "BAXDJUXtz6P5ARhHH1aPwgv4WENzHwzyhmLYK4daFwiM"},
"CUSD-USD": {"feedID": "DDwzo3aAjgYk8Vn8D3Zbxo62rTmBVdJv1WjaKQseiHKk"},
"DOGE-USD": {"feedID": "4L6YhY8VvUgmqG5MvJkUJATtzB2rFqdrJwQCmFLv4Jzy"},
"DOT-USD": {"feedID": "4dqq5VBpN4EwYb7wyywjjfknvMKu7m78j9mKZRXTj462"},
"ETH-USD": {"feedID": "EdVCmQ9FSPcVe5YySXDPCRmc8aDQLKJ9xvYBMZPie1Vw"},
"FIDA-USD": {"feedID": "7teETxN9Y8VK6uJxsctHEwST75mKLLwPH1jaFdvTQCpD"},
"FTM-USD": {"feedID": "BTwrLU4so1oJMViWA3BTzh8YmFwiLZ6CL4U3JryG7Q5S"},
"FTT-USD": {"feedID": "6vivTRs5ZPeeXbjo7dfburfaYDWoXjBtdtuYgQRuGfu"},
"GMT-USD": {"feedID": "EZy99wkoqohyyNxT1QCwW3epQtMQ1Dfqx4sXKqkHiSox"},
"GOFX-USD": {"feedID": "A9r7BHsXJQ2w9B7cdJV8BkfRoBWkxRichVGm72vVS1s5"},
"HXRO-USD": {"feedID": "6VrSw4Vxg5zs9shfdCxLqfUy2qSD3NCS9AsdBQUgbjnt"},
"INJ-USD": {"feedID": "44uRsNnT35kjkscSu59MxRr9CfkLZWf6gny8bWqUbVxE"},
"JET-USD": {"feedID": "3JnVPNY878pRH6TQ9f4wuwfNqGh6okyshmqmKsyvewMs"},
"LTC-USD": {"feedID": "BLArYBCUYhdWiY8PCUTpvFE21iaJq85dvxLk9bYMobcU"},
"LUNA-USD": {"feedID": "7xzCBiE2d9UwV9CYLV9vrbJPipJzMEaycPBoZg2LjhUf"},
"LUNC-USD": {"feedID": "8PugCXTAHLM9kfLSQWe2njE5pzAgUdpPk3Nx5zSm7BD3"},
"MATIC-USD": {"feedID": "FBirwuDFuRAu4iSGc7RGxN5koHB7EJM1wbCmyPuQoGur"},
"MER-USD": {"feedID": "6Z3ejn8DCWQFBuAcw29d3A5jgahEpmycn7YDMX7yRNrn"},
"MIR-USD": {"feedID": "4BDvhA5emySfqyyTHPHofTJqRw1cwDabK1yiEshetPv9"},
"MNGO-USD": {"feedID": "DCNw5mwZgjfTcoNsSZWUiXqU61ushNvr3JRQJRi1Nf95"},
"MSOL-USD": {"feedID": "9a6RNx3tCu1TSs6TBSfV2XRXEPEZXQ6WB7jRojZRvyeZ"},
"NEAR-USD": {"feedID": "3gnSbT7bhoTdGkFVZc1dW1PvjreWzpUNUD5ppXwv1N59"},
"ONE-USD": {"feedID": "BScN1mER6QJ2nFKpnP4PcqffQp97NXAvzAbVPjLKyRaF"},
"ORCA-USD": {"feedID": "A1WttWF7X3Rg6ZRpB2YQUFHCRh1kiXV8sKKLV3S9neJV"},
"PAI-USD": {"feedID": "8EjmYPrH9oHxLqk2oFG1qwY6ert7M9cv5WpXyWHxKiMb"},
"PORT-USD": {"feedID": "33ugpDWbC2mLrYSQvu1BHfykR8bt3MVc4S3YuuXMVRH3"},
"RAY-USD": {"feedID": "EhgAdTrgxi4ZoVZLQx1n93vULucPpiFi2BQtz9RJr1y6"},
"SBR-USD": {"feedID": "4WSN3XDSTfBX9A1YXGg8HJ7n2GtWMDNbtz1ab6aGGXfG"},
"SCNSOL-USD": {"feedID": "HoDAYYYhFvCNQNFPui51H8qvpcdz6KuVtq77ZGtHND2T"},
"SLND-USD": {"feedID": "FtwKARNAnZK2Nx1W4KVXzbyDzuRJqmApHRBtQpZ49HDv"},
"SNY-USD": {"feedID": "DEmEX28EgrdQEBwNXdfMsDoJWZXCHRS5pbgmJiTkjCRH"},
"SOL-USD": {"feedID": "J83w4HKfqxwcq3BEMMkPFSppX3gqekLyLJBexebFVkix"},
"SRM-USD": {"feedID": "992moaMQKs32GKZ9dxi8keyM2bUmbrwBZpK4p2K6X5Vs"},
"STEP-USD": {"feedID": "DKjdYzkPEZLBsfRzUaCjze5jjgCYu5kFCB19wVa9sy6j"},
"STSOL-USD": {"feedID": "2LwhbcswZekofMNRtDRMukZJNSRUiKYMFbqtBwqjDfke"},
"TUSD-USD": {"feedID": "2sbXow64dSbktGM6gG9FpszwVu7GNhr6Qi2WHRCP9ULn"},
"USDC-USD": {"feedID": "5SSkXsEKQepHHAewytPVwdej4epN1nxgLVM84L4KXgy7"},
"USDT-USD": {"feedID": "38xoQ4oeJCBrcVvca2cGk7iV1dAfrmTR1kmhSCJQ8Jto"},
"USTC-USD": {"feedID": "AUKjh1oVPZyudi3nzYSsdZxSjq42afUCvsdbKFc5CbD"},
"VAI-USD": {"feedID": "Gvm85Pbjq4Tv7qyaS4y9ZMqCdY3nynGDBFYAu7mjPoGM"},
"XVS-USD": {"feedID": "8Y4jhVcQvQZWjMarM855NMkVuua78FS8Uwy58TjcnUWs"},
"ZBC-USD": {"feedID": "7myonvBWD5zfh6qfScRP5E4anEue4Bqnu8XS8cdtJTQx"},
}
async def get_price(symbol: str) -> Tuple[float, float, float]:
"""Returns price and confidence interval from pyth live feed. [Source: Pyth]
Parameters
----------
symbol : str
Symbol of the asset to get price and confidence interval from
Returns
-------
Tuple[float, float, float]
Price of the asset,
Confidence level,
Previous price of the asset
"""
account_key = SolanaPublicKey(ASSETS[symbol]["feedID"])
solana_client = SolanaClient(
endpoint=SOLANA_DEVNET_HTTP_ENDPOINT, ws_endpoint=SOLANA_DEVNET_WS_ENDPOINT
)
price: PythPriceAccount = PythPriceAccount(account_key, solana_client)
await price.update()
price_status = price.aggregate_price_status
aggregate_price = -1
confidence = -1
if price_status == PythPriceStatus.TRADING:
aggregate_price = price.aggregate_price
previous_price = price.prev_price
confidence = price.aggregate_price_confidence_interval
await solana_client.close()
return aggregate_price, confidence, previous_price | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/pyth_model.py | 0.541651 | 0.187542 | pyth_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import matplotlib.pyplot as plt
from matplotlib import ticker
from openbb_terminal import config_terminal as cfg
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.cryptocurrency.defi import terraengineer_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
plot_autoscale,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_terra_asset_history(
asset: str = "",
address: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots the 30-day history of specified asset in terra address
[Source: https://terra.engineer/]
Parameters
----------
asset : str
Terra asset {ust,luna,sdt}
address : str
Terra address. Valid terra addresses start with 'terra'
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = terraengineer_model.get_history_asset_from_terra_address(
address=address, asset=asset
)
if df.empty:
console.print("[red]No data in the provided dataframe[/red]\n")
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(df["x"], df["y"])
ax.set_ylabel(f"{asset.upper()} Amount")
ax.set_title(f"{asset.upper()} Amount in {address}")
ax.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
cfg.theme.style_primary_axis(ax)
if not external_axes:
cfg.theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"aterra",
df,
)
@log_start_end(log=logger)
def display_anchor_yield_reserve(
export: str = "", external_axes: Optional[List[plt.Axes]] = None
) -> None:
"""Plots the 30-day history of the Anchor Yield Reserve.
[Source: https://terra.engineer/]
Parameters
----------
export : str
Export dataframe data to csv,json,xlsx file, by default False
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = terraengineer_model.get_anchor_yield_reserve()
if df.empty:
console.print("[red]No data was found[/red]\n")
return
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(df["x"], df["y"])
ax.set_ylabel("UST Amount")
ax.set_title("Anchor UST Yield Reserve")
ax.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
cfg.theme.style_primary_axis(ax)
if not external_axes:
cfg.theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ayr",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/defi/terraengineer_view.py | 0.871448 | 0.360545 | terraengineer_view.py | pypi |
__docformat__ = "numpy"
import logging
import textwrap
from datetime import datetime
from typing import Any, Tuple, Dict
import pandas as pd
import requests
from openbb_terminal.cryptocurrency.dataframe_helpers import (
denominate_number,
prettify_column_names,
lambda_replace_unicode,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
GOV_COLUMNS = [
"submitTime",
"id",
"depositEndTime",
"status",
"type",
"title",
"Yes",
"No",
]
GOV_STATUSES = ["voting", "deposit", "passed", "rejected", "all"]
VALIDATORS_COLUMNS = [
"validatorName",
"tokensAmount",
"votingPower",
"commissionRate",
"status",
"uptime",
]
@log_start_end(log=logger)
def _make_request(endpoint: str) -> dict:
"""Helper method handles terra fcd api requests. [Source: https://fcd.terra.dev/v1]
Parameters
----------
endpoint: str
endpoint url
Returns
-------
dict:
dictionary with response data
"""
url = f"https://fcd.terra.dev/v1/{endpoint}"
response = requests.get(
url, headers={"Accept": "application/json", "User-Agent": "GST"}
)
if not 200 <= response.status_code < 300:
console.print(
f"[red]fcd terra api exception: {response.json()['type']}[/red]\n"
)
return {}
try:
return response.json()
except Exception as e:
logger.exception("Invalid Response: %s", str(e))
console.print(
f"[red]fcd terra api exception: {response.json()['type']}[/red]\n"
)
return {}
@log_start_end(log=logger)
def _adjust_delegation_info(delegation: dict) -> dict:
"""Helper method which removes redundant fields from delegation info dictionary,
and denominate value fields. [Source: https://fcd.terra.dev/v1]
Parameters
----------
delegation:
dictionary object with delegation data e.g.
Returns
-------
dict
adjusted dictionary with delegation data
"""
delegation_info = {}
for key, value in delegation.items():
if key in ["amountDelegated", "totalReward"]:
delegation_info[key] = denominate_number(value)
elif key in ["validatorAddress", "rewards"]:
continue
else:
delegation_info[key] = value
return delegation_info
@log_start_end(log=logger)
def get_staking_account_info(address: str = "") -> Tuple[pd.DataFrame, str]:
"""Get staking info for provided terra account [Source: https://fcd.terra.dev/swagger]
Parameters
----------
address: str
terra blockchain address e.g. terra1jvwelvs7rdk6j3mqdztq5tya99w8lxk6l9hcqg
Returns
-------
Tuple[pd.DataFrame, str]
luna delegations and summary report for given address
"""
response = _make_request(f"staking/{address}")
results: Dict[str, Any] = {"myDelegations": []}
for field in ["availableLuna", "delegationTotal"]:
results[field] = denominate_number(response.get(field, 0))
my_delegations = response.get("myDelegations")
if my_delegations:
for delegation in my_delegations:
validator = _adjust_delegation_info(delegation)
results["myDelegations"].append(validator)
df = pd.DataFrame(results["myDelegations"])
try:
df["validatorName"] = df["validatorName"].apply(
lambda x: lambda_replace_unicode(x)
)
df.columns = prettify_column_names(list(df.columns))
except KeyError:
df = pd.DataFrame()
results["totalRewards"] = denominate_number(
response.get("rewards", {}).get("total", 0)
)
report = f"""Overview:
Address: {address}
Available Luna: {results['availableLuna']}
Delegated Luna: {results['delegationTotal']}
Total Rewards: {results['totalRewards']}\n"""
report += "\nDelegations: " if not df.empty else "\nNo delegations found\n"
return df, report
@log_start_end(log=logger)
def get_validators(sortby: str = "votingPower", ascend: bool = True) -> pd.DataFrame:
"""Get information about terra validators [Source: https://fcd.terra.dev/swagger]
Parameters
-----------
sortby: str
Key by which to sort data. Choose from:
validatorName, tokensAmount, votingPower, commissionRate, status, uptime
ascend: bool
Flag to sort data descending
Returns
-------
pd.DataFrame
terra validators details
"""
response = _make_request("staking")["validators"]
results = [
{
"accountAddress": validator["accountAddress"],
"validatorName": validator["description"].get("moniker"),
"tokensAmount": denominate_number(validator["tokens"]),
"votingPower": round(
(float(validator["votingPower"].get("weight")) * 100), 2
),
"commissionRate": round(
(float(validator["commissionInfo"].get("rate", 0)) * 100), 2
),
"status": validator["status"],
"uptime": round((float(validator.get("upTime", 0)) * 100), 2),
}
for validator in response
]
df = pd.DataFrame(results)
if not df.empty:
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_proposals(
status: str = "", sortby: str = "id", ascend: bool = True, limit: int = 10
) -> pd.DataFrame:
"""Get terra blockchain governance proposals list [Source: https://fcd.terra.dev/swagger]
Parameters
----------
status: str
status of proposal, one from list: ['Voting','Deposit','Passed','Rejected']
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
limit: int
Number of records to display
Returns
-------
pd.DataFrame
Terra blockchain governance proposals list
"""
statuses = ["Voting", "Deposit", "Passed", "Rejected"]
response = _make_request("gov/proposals")["proposals"]
results = []
votes_options = ["Yes", "Abstain", "No", "NoWithVeto"]
for proposal in response:
deposit = proposal.pop("deposit")
proposal["depositEndTime"] = deposit.get("depositEndTime")
vote = proposal.pop("vote")
proposal.pop("proposer")
for opt in votes_options:
proposal[opt] = vote["count"].get(opt)
results.append(proposal)
columns = [
"id",
"submitTime",
"depositEndTime",
"status",
"type",
"title",
"Yes",
"No",
"Abstain",
"NoWithVeto",
]
df = pd.DataFrame(results)[columns]
df[["id", "Yes", "No", "Abstain", "NoWithVeto"]] = df[
["id", "Yes", "No", "Abstain", "NoWithVeto"]
].astype(int, errors="ignore")
df["title"] = df["title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=40)) if isinstance(x, str) else x
)
for col in ["submitTime", "depositEndTime"]:
df[col] = df[col].apply(lambda x: pd.to_datetime(x).strftime("%Y-%m-%d %H:%M"))
if status.title() in statuses:
df = df[df["status"] == status.title()]
df = df.sort_values(by=sortby, ascending=ascend).head(limit)
df.columns = prettify_column_names(df.columns)
return df
@log_start_end(log=logger)
def get_account_growth(cumulative: bool = True) -> pd.DataFrame:
"""Get terra blockchain account growth history [Source: https://fcd.terra.dev/swagger]
Parameters
----------
cumulative: bool
distinguish between periodical and cumulative account growth data
Returns
-------
pd.DataFrame
historical data of accounts growth
"""
response = _make_request("dashboard/account_growth")
kind = "cumulative" if cumulative else "periodic"
df = pd.DataFrame(response[kind])
df["date"] = df["datetime"].apply(lambda x: datetime.fromtimestamp(x / 1000).date())
df = df[["date", "totalAccountCount", "activeAccountCount"]]
df.columns = ["date", "Total accounts", "Active accounts"]
return df
@log_start_end(log=logger)
def get_staking_ratio_history(limit: int = 200):
"""Get terra blockchain staking ratio history [Source: https://fcd.terra.dev/swagger]
Parameters
----------
limit: int
The number of ratios to show
Returns
-------
pd.DataFrame
historical staking ratio
"""
response = _make_request("dashboard/staking_ratio")
df = pd.DataFrame(response)
df["date"] = df["datetime"].apply(lambda x: datetime.fromtimestamp(x / 1000).date())
df["stakingRatio"] = df["stakingRatio"].apply(lambda x: round(float(x) * 100, 2))
df = df[["date", "stakingRatio"]]
df = df.sort_values("date", ascending=False).head(limit)
df = df.set_index("date")
return df
@log_start_end(log=logger)
def get_staking_returns_history(limit: int = 200):
"""Get terra blockchain staking returns history [Source: https://fcd.terra.dev/v1]
Parameters
----------
limit: int
The number of returns to show
Returns
-------
pd.DataFrame
historical staking returns
"""
response = _make_request("dashboard/staking_return")
df = pd.DataFrame(response)
df["date"] = df["datetime"].apply(lambda x: datetime.fromtimestamp(x / 1000).date())
df["annualizedReturn"] = df["annualizedReturn"].apply(
lambda x: round(float(x) * 100, 2)
)
df = df[["date", "annualizedReturn"]]
df = df.sort_values("date", ascending=False).head(limit)
df = df.set_index("date")
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/defi/terramoney_fcd_model.py | 0.748995 | 0.217254 | terramoney_fcd_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import matplotlib.pyplot as plt
from matplotlib import ticker
from openbb_terminal import config_terminal as cfg
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.cryptocurrency.cryptocurrency_helpers import read_data_file
from openbb_terminal.cryptocurrency.defi import llama_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_grouped_defi_protocols(
limit: int = 50, export: str = "", external_axes: Optional[List[plt.Axes]] = None
) -> None:
"""Plots top dApps (in terms of TVL) grouped by chain.
[Source: https://docs.llama.fi/api]
Parameters
----------
num: int
Number of top dApps to display
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = llama_model.get_defi_protocols(limit, drop_chain=False)
chains = llama_model.get_grouped_defi_protocols(limit)
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=(14, 8), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
colors = iter(cfg.theme.get_colors(reverse=True))
for chain in chains:
chain_filter = df.loc[df.Chain == chain]
ax.barh(
y=chain_filter.index,
width=chain_filter["TVL ($)"],
label=chain,
height=0.5,
color=next(colors, "#B6A9CB"),
)
ax.set_xlabel("Total Value Locked ($)")
ax.set_ylabel("Decentralized Application Name")
ax.get_xaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
ax.set_title(f"Top {limit} dApp TVL grouped by chain")
cfg.theme.style_primary_axis(ax)
ax.tick_params(axis="y", labelsize=8)
ax.yaxis.set_label_position("left")
ax.yaxis.set_ticks_position("left")
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], loc="best")
if not external_axes:
cfg.theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"gdapps",
chains,
)
@log_start_end(log=logger)
def display_defi_protocols(
sortby: str,
limit: int = 20,
ascend: bool = False,
description: bool = False,
export: str = "",
) -> None:
"""Prints table showing information about listed DeFi protocols, their current TVL and changes to it in
the last hour/day/week. [Source: https://docs.llama.fi/api]
Parameters
----------
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data descending
description: bool
Flag to display description of protocol
export : str
Export dataframe data to csv,json,xlsx file
"""
df = llama_model.get_defi_protocols(limit, sortby, ascend, description)
print_rich_table(df.head(limit), headers=list(df.columns), show_index=False)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ldapps",
df,
)
@log_start_end(log=logger)
def display_historical_tvl(
dapps: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots historical TVL of different dApps
[Source: https://docs.llama.fi/api]
Parameters
----------
dapps: str
dApps to search historical TVL. Should be split by , e.g.: anchor,sushiswap,pancakeswap
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
available_protocols = read_data_file("defillama_dapps.json")
if isinstance(available_protocols, dict):
for dapp in dapps.split(","):
if dapp in available_protocols.keys():
df = llama_model.get_defi_protocol(dapp)
if not df.empty:
ax.plot(df, label=available_protocols[dapp])
else:
print(f"{dapp} not found\n")
ax.set_ylabel("Total Value Locked ($)")
ax.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
cfg.theme.style_primary_axis(ax)
ax.legend()
ax.set_title("TVL in dApps")
if not external_axes:
cfg.theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"dtvl",
None,
)
@log_start_end(log=logger)
def display_defi_tvl(
limit: int = 5,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots historical values of the total sum of TVLs from all listed protocols.
[Source: https://docs.llama.fi/api]
Parameters
----------
limit: int
Number of records to display, by default 5
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
df = llama_model.get_defi_tvl()
df_data = df.copy()
df = df.tail(limit)
ax.plot(df["date"], df["totalLiquidityUSD"], ms=2)
# ax.set_xlim(df["date"].iloc[0], df["date"].iloc[-1])
ax.set_ylabel("Total Value Locked ($)")
ax.set_title("Total Value Locked in DeFi")
ax.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
cfg.theme.style_primary_axis(ax)
if not external_axes:
cfg.theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"stvl",
df_data,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/defi/llama_view.py | 0.834576 | 0.394201 | llama_view.py | pypi |
__docformat__ = "numpy"
import logging
import textwrap
from datetime import datetime
import numpy as np
import pandas as pd
import requests
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import lambda_long_number_format
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_replace_underscores_in_column_names,
)
logger = logging.getLogger(__name__)
API_URL = "https://api.llama.fi"
LLAMA_FILTERS = [
"tvl",
"symbol",
"category",
"chains",
"change_1h",
"change_1d",
"change_7d",
"name",
]
@log_start_end(log=logger)
def get_defi_protocols(
limit: int = 100,
sortby: str = "",
ascend: bool = False,
description: bool = False,
drop_chain: bool = True,
) -> pd.DataFrame:
"""Returns information about listed DeFi protocols, their current TVL and changes to it in the last hour/day/week.
[Source: https://docs.llama.fi/api]
Parameters
----------
limit: int
The number of dApps to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data descending
description: bool
Flag to display description of protocol
drop_chain: bool
Whether to drop the chain column
Returns
-------
pd.DataFrame
Information about DeFi protocols
"""
response = requests.get(API_URL + "/protocols")
columns = [
"name",
"symbol",
"category",
"chains",
"change_1h",
"change_1d",
"change_7d",
"tvl",
"url",
"description",
"chain",
]
if response.status_code != 200:
raise Exception(f"Status code: {response.status_code}. Reason: {response.text}")
try:
df = pd.DataFrame(response.json())
df.replace({float(np.nan): None}, inplace=True)
df["chains"] = df["chains"].apply(
lambda x: "\n".join(textwrap.wrap(", ".join(x), width=50))
)
df["description"] = df["description"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=70)) if isinstance(x, str) else x
)
df = df[columns]
except Exception as e:
logger.exception("Wrong response type: %s", str(e))
raise ValueError("Wrong response type\n") from e
df = df.set_index("name")
if sortby:
df = df.sort_values(by=sortby, ascending=ascend)
if drop_chain:
df = df.drop(columns="chain")
df["tvl"] = df["tvl"].apply(lambda x: lambda_long_number_format(x))
if description:
orig = ["name", "symbol", "category", "description", "url"]
selection = [x for x in orig if x in df.columns]
df = df[selection]
else:
df.drop(["description", "url"], axis=1, inplace=True)
df.columns = [lambda_replace_underscores_in_column_names(val) for val in df.columns]
df.rename(
columns={
"Change 1H": "Change 1H (%)",
"Change 1D": "Change 1D (%)",
"Change 7D": "Change 7D (%)",
"Tvl": "TVL ($)",
},
inplace=True,
)
return df.head(limit)
@log_start_end(log=logger)
def get_defi_protocol(protocol: str) -> pd.DataFrame:
"""Returns information about historical tvl of a defi protocol.
[Source: https://docs.llama.fi/api]
Parameters
----------
protocol: str
Name of the protocol
Returns
-------
pd.DataFrame
Historical tvl
"""
url = f"{API_URL}/protocol/{protocol}"
r = requests.get(url)
data = r.json()
df = pd.DataFrame(data["tvl"])
df.date = pd.to_datetime(df.date, unit="s")
df = df.set_index("date")
return df
@log_start_end(log=logger)
def get_grouped_defi_protocols(
limit: int = 50,
) -> pd.DataFrame:
"""Display top dApps (in terms of TVL) grouped by chain.
[Source: https://docs.llama.fi/api]
Parameters
----------
limit: int
Number of top dApps to display
Returns
-------
pd.DataFrame
Information about DeFi protocols grouped by chain
"""
df = get_defi_protocols(limit, drop_chain=False)
return df.groupby("Chain").size().index.values.tolist()
@log_start_end(log=logger)
def get_defi_tvl() -> pd.DataFrame:
"""Returns historical values of the total sum of TVLs from all listed protocols.
[Source: https://docs.llama.fi/api]
Returns
-------
pd.DataFrame
Historical values of total sum of Total Value Locked from all listed protocols.
"""
response = requests.get(API_URL + "/charts")
if response.status_code != 200:
raise Exception(f"Status code: {response.status_code}. Reason: {response.text}")
try:
df = pd.DataFrame(response.json())
df["date"] = df["date"].apply(lambda x: datetime.fromtimestamp(int(x)).date())
return df
except Exception as e:
logger.exception("Wrong response data: %s", str(e))
raise ValueError("Wrong response data") from e | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/defi/llama_model.py | 0.690037 | 0.298511 | llama_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional
from openbb_terminal.cryptocurrency.defi import coindix_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
print_rich_table,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_defi_vaults(
chain: Optional[str] = None,
protocol: Optional[str] = None,
kind: Optional[str] = None,
limit: int = 10,
sortby: str = "apy",
ascend: bool = True,
link: bool = False,
export: str = "",
) -> None:
"""Prints table showing Top DeFi Vaults - pools of funds with an assigned strategy which main goal is to
maximize returns of its crypto assets. [Source: https://coindix.com/]
Parameters
----------
chain: str
Blockchain - one from list [
'ethereum', 'polygon', 'avalanche', 'bsc', 'terra', 'fantom',
'moonriver', 'celo', 'heco', 'okex', 'cronos', 'arbitrum', 'eth',
'harmony', 'fuse', 'defichain', 'solana', 'optimism'
]
protocol: str
DeFi protocol - one from list: [
'aave', 'acryptos', 'alpaca', 'anchor', 'autofarm', 'balancer', 'bancor',
'beefy', 'belt', 'compound', 'convex', 'cream', 'curve', 'defichain', 'geist',
'lido', 'liquity', 'mirror', 'pancakeswap', 'raydium', 'sushi', 'tarot', 'traderjoe',
'tulip', 'ubeswap', 'uniswap', 'venus', 'yearn'
]
kind: str
Kind/type of vault - one from list: ['lp','single','noimploss','stable']
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data descending
link: bool
Flag to show links
export : str
Export dataframe data to csv,json,xlsx file
"""
df = coindix_model.get_defi_vaults(
chain=chain, protocol=protocol, kind=kind, sortby=sortby, ascend=ascend
)
if df.empty:
print(
f"Couldn't find any vaults for "
f"{'' if not chain else 'chain: ' + chain}"
f"{'' if not protocol else ', protocol: ' + protocol}"
f"{'' if not kind else ', kind:' + kind}"
)
return
if link is True:
df.drop("Link", axis=1, inplace=True)
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Top DeFi Vaults",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"vaults",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/defi/coindix_view.py | 0.836555 | 0.212865 | coindix_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import matplotlib.pyplot as plt
from openbb_terminal import config_terminal as cfg
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.cryptocurrency.defi import cryptosaurio_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_anchor_data(
address: str = "",
export: str = "",
show_transactions: bool = False,
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots anchor protocol earnings data of a certain terra address
[Source: https://cryptosaurio.com/]
Parameters
----------
asset : str
Terra asset {ust,luna,sdt}
address : str
Terra address. Valid terra addresses start with 'terra'
show_transactions : bool
Flag to show history of transactions in Anchor protocol for address. Default False
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df, df_deposits, stats_str = cryptosaurio_model.get_anchor_data(address=address)
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
console.print(f"\n{stats_str}\n")
if show_transactions:
print_rich_table(
df_deposits,
headers=list(df_deposits.columns),
show_index=False,
title="Transactions history in Anchor Earn",
)
ax.plot(df["time"], df["yield"])
ax.set_ylabel("Earnings Value [UST]")
ax.set_title("Earnings in Anchor Earn")
cfg.theme.style_primary_axis(ax)
if not external_axes:
cfg.theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"anchor",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/defi/cryptosaurio_view.py | 0.849815 | 0.324222 | cryptosaurio_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_very_long_number_formatter,
)
from openbb_terminal.cryptocurrency.defi import graph_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_uni_tokens(
skip: int = 0,
limit: int = 20,
sortby: str = "index",
ascend: bool = False,
export: str = "",
) -> None:
"""Prints table showing tokens trade-able on Uniswap DEX.
[Source: https://thegraph.com/en/]
Parameters
----------
skip: int
Number of records to skip
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data descending
export : str
Export dataframe data to csv,json,xlsx file
"""
df = graph_model.get_uni_tokens(skip=skip)
df_data = df.copy()
# Converting these to float
df["tradeVolumeUSD"] = df["tradeVolumeUSD"].astype(float)
df["totalLiquidity"] = df["totalLiquidity"].astype(float)
df["txCount"] = df["txCount"].astype(float)
df = df.sort_values(by=sortby, ascending=ascend)
df[["totalLiquidity", "tradeVolumeUSD"]] = df[
["totalLiquidity", "tradeVolumeUSD"]
].applymap(lambda x: lambda_very_long_number_formatter(x))
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="UniSwarp DEX Trade-able Tokens",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"tokens",
df_data,
)
@log_start_end(log=logger)
def display_uni_stats(export: str = "") -> None:
"""Prints table showing base statistics about Uniswap DEX. [Source: https://thegraph.com/en/]
[Source: https://thegraph.com/en/]
Parameters
----------
export : str
Export dataframe data to csv,json,xlsx file
"""
df = graph_model.get_uniswap_stats()
df_data = df.copy()
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Uniswap DEX Base Statistics",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"stats",
df_data,
)
@log_start_end(log=logger)
def display_recently_added(
limit: int = 20,
days: int = 7,
min_volume: int = 20,
min_liquidity: int = 0,
min_tx: int = 100,
sortby: str = "created",
ascend: bool = False,
export: str = "",
) -> None:
"""Prints table showing Lastly added pairs on Uniswap DEX.
[Source: https://thegraph.com/en/]
Parameters
----------
limit: int
Number of records to display
days: int
Number of days the pair has been active,
min_volume: int
Minimum trading volume,
min_liquidity: int
Minimum liquidity
min_tx: int
Minimum number of transactions
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data descending
export : str
Export dataframe data to csv,json,xlsx file
"""
df = graph_model.get_uniswap_pool_recently_added(
last_days=days,
min_volume=min_volume,
min_liquidity=min_liquidity,
min_tx=min_tx,
)
df_data = df.copy()
# Converting these to float
df["volumeUSD"] = df["volumeUSD"].astype(float)
df["txCount"] = df["txCount"].astype(float)
df["totalSupply"] = df["totalSupply"].astype(float)
df = df.sort_values(by=sortby, ascending=ascend)
df[["volumeUSD", "totalSupply"]] = df[["volumeUSD", "totalSupply"]].applymap(
lambda x: lambda_very_long_number_formatter(x)
)
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Latest Added Pairs on Uniswap DEX",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"pairs",
df_data,
)
@log_start_end(log=logger)
def display_uni_pools(
limit: int = 20, sortby: str = "volumeUSD", ascend: bool = True, export: str = ""
) -> None:
"""Prints table showing uniswap pools by volume.
[Source: https://thegraph.com/en/]
Parameters
----------
limit: int
Number of records to display
sortby: str
Key by which to sort data. The table can be sorted by every of its columns
(see https://bit.ly/3ORagr1 then press ctrl-enter or execute the query).
ascend: bool
Flag to sort data descending
export : str
Export dataframe data to csv,json,xlsx file
"""
df = graph_model.get_uni_pools_by_volume()
# Converting these to float
df["volumeUSD"] = df["volumeUSD"].astype(float)
df["txCount"] = df["txCount"].astype(float)
df = df.sort_values(by=sortby, ascending=ascend)
df["volumeUSD"] = df["volumeUSD"].apply(
lambda x: lambda_very_long_number_formatter(x)
)
df_data = df.copy()
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Uniswap Pools",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"pools",
df_data,
)
@log_start_end(log=logger)
def display_last_uni_swaps(
limit: int = 10, sortby: str = "timestamp", ascend: bool = False, export: str = ""
) -> None:
"""Prints table showing last swaps done on Uniswap
[Source: https://thegraph.com/en/]
Parameters
----------
limit: int
Number of records to display
sortby: str
Key by which to sort data. The table can be sorted by every of its columns
(see https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v2).
ascend: bool
Flag to sort data descending
export : str
Export dataframe data to csv,json,xlsx file
"""
df = graph_model.get_last_uni_swaps(limit=limit, sortby=sortby, ascend=ascend)
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Last Uniswap Swaps"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"swaps",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/defi/graph_view.py | 0.679072 | 0.378287 | graph_view.py | pypi |
__docformat__ = "numpy"
import datetime
import logging
import pandas as pd
import requests
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_very_long_number_formatter,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
# pylint: disable=unsupported-assignment-operation
logger = logging.getLogger(__name__)
# pylint: disable=unsupported-assignment-operation
UNI_URL = "https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v2"
SWAPS_FILTERS = ["Datetime", "USD", "From", "To"]
POOLS_FILTERS = [
"volumeUSD",
"token0.name",
"token0.symbol",
"token1.name",
"token1.symbol",
"volumeUSD",
"txCount",
]
TOKENS_FILTERS = [
"index",
"symbol",
"name",
"tradeVolumeUSD",
"totalLiquidity",
"txCount",
]
PAIRS_FILTERS = [
"created",
"pair",
"token0",
"token1",
"volumeUSD",
"txCount",
"totalSupply",
]
# TODO: convert USD values to int. otherwise sort by these columns won't work
@log_start_end(log=logger)
def query_graph(url: str, query: str) -> dict:
"""Helper methods for querying graphql api. [Source: https://thegraph.com/en/]
Parameters
----------
url: str
Endpoint url
query: str
Graphql query
Returns
-------
dict:
Dictionary with response data
"""
response = requests.post(url, json={"query": query})
if response.status_code == 200:
return response.json()["data"]
return {}
@log_start_end(log=logger)
def get_uni_tokens(
skip: int = 0, limit: int = 100, sortby: str = "index", ascend: bool = False
) -> pd.DataFrame:
"""Get list of tokens trade-able on Uniswap DEX. [Source: https://thegraph.com/en/]
Parameters
----------
skip: int
Skip n number of records.
limit: int
Show n number of records.
sortby: str
The column to sort by
ascend: bool
Whether to sort in ascending order
Returns
-------
pd.DataFrame
Uniswap tokens with trading volume, transaction count, liquidity.
"""
limit = min(limit, 1000)
query = f"""
{{
tokens(first: {limit}, skip:{skip}) {{
symbol
name
tradeVolumeUSD
totalLiquidity
txCount
}}
}}
"""
data = query_graph(UNI_URL, query)
if not data:
return pd.DataFrame()
df = pd.DataFrame(data["tokens"]).reset_index()
return df.sort_values(by=sortby, ascending=ascend)
@log_start_end(log=logger)
def get_uniswap_stats() -> pd.DataFrame:
"""Get base statistics about Uniswap DEX. [Source: https://thegraph.com/en/]
uniswapFactory id: 0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f - ethereum address on which Uniswap Factory
smart contract was deployed. The factory contract is deployed once from the off-chain source code, and it contains
functions that make it possible to create exchange contracts for any ERC20 token that does not already have one.
It also functions as a registry of ERC20 tokens that have been added to the system, and the exchange with which they
are associated. More: https://docs.uniswap.org/protocol/V1/guides/connect-to-uniswap
We use 0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f address to fetch all smart contracts that were
created with usage of this factory.
Returns
-------
pd.DataFrame
Uniswap DEX statistics like liquidity, volume, number of pairs, number of transactions.
"""
query = """
{
uniswapFactory(id: "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f"){
totalVolumeUSD
totalLiquidityUSD
pairCount
txCount
totalLiquidityUSD
totalLiquidityETH
}
}
"""
data = query_graph(UNI_URL, query)
if not data:
return pd.DataFrame()
df = pd.Series(data["uniswapFactory"]).reset_index()
df.columns = ["Metric", "Value"]
df["Value"] = df["Value"].apply(lambda x: lambda_very_long_number_formatter(x))
return df
@log_start_end(log=logger)
def get_uniswap_pool_recently_added(
last_days: int = 14,
min_volume: int = 100,
min_liquidity: int = 0,
min_tx: int = 100,
) -> pd.DataFrame:
"""Get lastly added trade-able pairs on Uniswap with parameters like:
* number of days the pair has been active,
* minimum trading volume,
* minimum liquidity,
* number of transactions.
[Source: https://thegraph.com/en/]
Parameters
----------
last_days: int
How many days back to look for added pairs.
min_volume: int
Minimum volume
min_liquidity: int
Minimum liquidity
min_tx: int
Minimum number of transactions done in given pool.
Returns
-------
pd.DataFrame
Lastly added pairs on Uniswap DEX.
"""
days = int(
(datetime.datetime.now() - datetime.timedelta(days=last_days)).timestamp()
)
query = f"""
{{
pairs(first: 1000,
where: {{createdAtTimestamp_gt: "{days}", volumeUSD_gt: "{min_volume}", reserveUSD_gt: "{min_liquidity}",
txCount_gt: "{min_tx}" }},
orderBy: createdAtTimestamp, orderDirection: desc) {{
token0 {{
symbol
name
}}
token1 {{
symbol
name
}}
reserveUSD
volumeUSD
createdAtTimestamp
totalSupply
txCount
}}
}}
"""
data = query_graph(UNI_URL, query)
if not data:
return pd.DataFrame()
df = pd.json_normalize(data["pairs"])
df["createdAtTimestamp"] = df["createdAtTimestamp"].apply(
lambda x: datetime.datetime.fromtimestamp(int(x))
)
df["pair"] = df["token0.symbol"] + "/" + df["token1.symbol"]
df.rename(
columns={
"createdAtTimestamp": "created",
"token0.name": "token0",
"token1.name": "token1",
},
inplace=True,
)
return df[
[
"created",
"pair",
"token0",
"token1",
"volumeUSD",
"txCount",
"totalSupply",
]
]
@log_start_end(log=logger)
def get_uni_pools_by_volume() -> pd.DataFrame:
"""Get uniswap pools by volume. [Source: https://thegraph.com/en/]
Returns
-------
pd.DataFrame
Trade-able pairs listed on Uniswap by top volume.
"""
query = """
{
pairs(first: 1000, where: {reserveUSD_gt: "1000", volumeUSD_gt: "10000"},
orderBy: volumeUSD, orderDirection: desc) {
token0 {
symbol
name
}
token1 {
symbol
name
}
volumeUSD
txCount
}
}
"""
data = query_graph(UNI_URL, query)
if not data:
return pd.DataFrame()
df = pd.json_normalize(data["pairs"])
return df[
[
"token0.name",
"token0.symbol",
"token1.name",
"token1.symbol",
"volumeUSD",
"txCount",
]
]
@log_start_end(log=logger)
def get_last_uni_swaps(
limit: int = 100, sortby: str = "timestamp", ascend: bool = False
) -> pd.DataFrame:
"""Get the last 100 swaps done on Uniswap [Source: https://thegraph.com/en/]
Parameters
----------
limit: int
Number of swaps to return. Maximum possible number: 1000.
sortby: str
Key by which to sort data. The table can be sorted by every of its columns
(see https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v2).
ascend: bool
Flag to sort data descending
Returns
-------
pd.DataFrame
Last 100 swaps on Uniswap
"""
limit = min(limit, 1000)
query = f"""
{{
swaps(first: {limit}, orderBy: timestamp, orderDirection: desc) {{
timestamp
pair {{
token0 {{
symbol
}}
token1 {{
symbol
}}
}}
amountUSD
}}
}}
"""
data = query_graph(UNI_URL, query)
if not data:
return pd.DataFrame()
df = pd.json_normalize(data["swaps"])
df["timestamp"] = df["timestamp"].apply(
lambda x: datetime.datetime.fromtimestamp(int(x))
)
df["amountUSD"] = df["amountUSD"].apply(lambda x: round(float(x), 2))
to_rename = {
"timestamp": "Datetime",
"amountUSD": "USD",
"pair.token0.symbol": "From",
"pair.token1.symbol": "To",
}
df = df.rename(columns=to_rename)
if sortby in df.columns:
df = df.sort_values(by=sortby, ascending=ascend)
else:
console.print(f"[red]Dataframe does not have column {sortby}[/red]\n")
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/defi/graph_model.py | 0.588771 | 0.296427 | graph_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Optional
import urllib3
import pandas as pd
import requests
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent, lambda_long_number_format
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logger = logging.getLogger(__name__)
VAULTS_FILTERS = ["name", "chain", "protocol", "apy", "tvl", "link"]
CHAINS = [
"ethereum",
"polygon",
"avalanche",
"bsc",
"terra",
"fantom",
"moonriver",
"celo",
"heco",
"okex",
"cronos",
"arbitrum",
"eth",
"harmony",
"fuse",
"defichain",
"solana",
"optimism",
"kusama",
"metis",
"osmosis",
]
PROTOCOLS = [
"aave",
"acryptos",
"alpaca",
"anchor",
"autofarm",
"balancer",
"bancor",
"beefy",
"belt",
"compound",
"convex",
"cream",
"curve",
"defichain",
"geist",
"lido",
"liquity",
"mirror",
"pancakeswap",
"raydium",
"sushi",
"tarot",
"traderjoe",
"tulip",
"ubeswap",
"uniswap",
"venus",
"yearn",
"osmosis",
"tulip",
]
VAULT_KINDS = [
"lp",
"single",
"noimploss",
"stable",
]
@log_start_end(log=logger)
def _prepare_params(**kwargs) -> dict:
"""Helper method, which handles preparation of parameters for requests to coindix api.
Parameters
----------
kwargs: keyword arguments: chain, kind, protocol
Returns
-------
dict:
Prepared parameters for request
"""
params = {"sort": "-apy", "tvl": "1m", "kind": "all"}
mapping = {"chain": CHAINS, "protocol": PROTOCOLS, "kind": VAULT_KINDS}
for key, value in kwargs.items():
category = mapping.get(key, [])
if value in category:
params.update({key: value})
return {k: v.lower() for k, v in params.items()}
@log_start_end(log=logger)
def get_defi_vaults(
chain: Optional[str] = None,
protocol: Optional[str] = None,
kind: Optional[str] = None,
ascend: bool = True,
sortby: str = "apy",
) -> pd.DataFrame:
"""Get DeFi Vaults Information. DeFi Vaults are pools of funds with an assigned strategy which main goal is to
maximize returns of its crypto assets. [Source: https://coindix.com/]
Parameters
----------
chain: str
Blockchain - one from list [
'ethereum', 'polygon', 'avalanche', 'bsc', 'terra', 'fantom',
'moonriver', 'celo', 'heco', 'okex', 'cronos', 'arbitrum', 'eth',
'harmony', 'fuse', 'defichain', 'solana', 'optimism'
]
protocol: str
DeFi protocol - one from list: [
'aave', 'acryptos', 'alpaca', 'anchor', 'autofarm', 'balancer', 'bancor',
'beefy', 'belt', 'compound', 'convex', 'cream', 'curve', 'defichain', 'geist',
'lido', 'liquity', 'mirror', 'pancakeswap', 'raydium', 'sushi', 'tarot', 'traderjoe',
'tulip', 'ubeswap', 'uniswap', 'venus', 'yearn'
]
kind: str
Kind/type of vault - one from list: ['lp','single','noimploss','stable']
Returns
-------
pd.DataFrame
Top 100 DeFi Vaults for given chain/protocol sorted by APY.
"""
headers = {"User-Agent": get_user_agent()}
params = _prepare_params(chain=chain, protocol=protocol, kind=kind)
response = requests.get(
"https://apiv2.coindix.com/search", headers=headers, params=params, verify=False
)
if not 200 <= response.status_code < 300:
raise Exception(f"Coindix api exception: {response.text}")
try:
data = response.json()["data"]
if len(data) == 0:
return pd.DataFrame()
df = pd.DataFrame(data)[VAULTS_FILTERS]
except Exception as e:
logger.exception(e)
raise ValueError(f"Invalid Response: {response.text}") from e
df = df.sort_values(by=sortby, ascending=ascend).fillna("NA")
df["tvl"] = df["tvl"].apply(lambda x: lambda_long_number_format(x))
df["apy"] = df["apy"].apply(
lambda x: f"{str(round(x * 100, 2))} %" if isinstance(x, (int, float)) else x
)
df.columns = [x.title() for x in df.columns]
df.rename(columns={"Apy": "APY (%)", "Tvl": "TVL ($)"}, inplace=True)
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/defi/coindix_model.py | 0.871461 | 0.261514 | coindix_model.py | pypi |
__docformat__ = "numpy"
import os
import logging
from typing import Optional, List
from matplotlib import pyplot as plt
from openbb_terminal.decorators import check_api_key
from openbb_terminal.cryptocurrency.defi import smartstake_model
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
# pylint: disable=E1101
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_SMARTSTAKE_KEY", "API_SMARTSTAKE_TOKEN"])
def display_luna_circ_supply_change(
days: int = 30,
export: str = "",
supply_type: str = "lunaSupplyChallengeStats",
limit: int = 5,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots and prints table showing Luna circulating supply stats
Parameters
----------
days: int
Number of days
supply_type: str
Supply type to unpack json
export: str
Export type
limit: int
Number of results display on the terminal
Default: 5
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = smartstake_model.get_luna_supply_stats(supply_type, days)
if df.empty:
return
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(
df.index,
df["circulatingSupplyInMil"],
c="black",
label="Circulating Supply",
)
ax.plot(
df.index,
df["liquidCircSupplyInMil"],
c="red",
label="Liquid Circulating Supply",
)
ax.plot(
df.index, df["stakeFromCircSupplyInMil"], c="green", label="Stake of Supply"
)
ax.plot(
df.index,
df["recentTotalLunaBurntInMil"],
c="blue",
label="Supply Reduction (Luna Burnt)",
)
ax.grid()
ax.set_ylabel("Millions")
ax.set_xlabel("Time")
ax.set_title("Luna Circulating Supply Changes (In Millions)")
ax.set_xlim(df.index[0], df.index[-1])
ax.legend(loc="best")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
RAW_COLS = [
"circulatingSupplyInMil",
"liquidCircSupplyInMil",
"circSupplyChangeInMil",
"recentTotalLunaBurntInMil",
]
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"lcsc",
df[RAW_COLS],
)
df.index = df.index.strftime("%Y-%m-%d")
df = df.sort_index(ascending=False)
print_rich_table(
df[RAW_COLS].head(limit),
headers=[
"Circ Supply",
"Liquid Circ Supply",
"Supply Change",
"Supply Reduction (Luna Burnt)",
],
show_index=True,
index_name="Time",
title="Luna Circulating Supply Changes (in Millions)",
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/defi/smartstake_view.py | 0.853379 | 0.35301 | smartstake_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import matplotlib.pyplot as plt
from matplotlib import ticker
from openbb_terminal import config_terminal as cfg
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.cryptocurrency.dataframe_helpers import (
prettify_column_names,
lambda_very_long_number_formatter,
)
from openbb_terminal.cryptocurrency.defi import terramoney_fcd_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_account_staking_info(
address: str = "", limit: int = 10, export: str = ""
) -> None:
"""Prints table showing staking info for provided terra account address [Source: https://fcd.terra.dev/swagger]
Parameters
----------
address: str
terra blockchain address e.g. terra1jvwelvs7rdk6j3mqdztq5tya99w8lxk6l9hcqg
limit: int
Number of records to display
export : str
Export dataframe data to csv,json,xlsx file
"""
df, report = terramoney_fcd_model.get_staking_account_info(address)
if not df.empty:
print_rich_table(
df.head(limit), headers=list(df.columns), show_index=False, title=report
)
else:
console.print(f"[red]No data found for address {address}\n[/red]")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"sinfo",
df,
)
@log_start_end(log=logger)
def display_validators(
limit: int = 10, sortby: str = "votingPower", ascend: bool = True, export: str = ""
) -> None:
"""Prints table showing information about terra validators [Source: https://fcd.terra.dev/swagger]
Parameters
----------
limit: int
Number of records to display
sortby: str
Key by which to sort data. Choose from:
validatorName, tokensAmount, votingPower, commissionRate, status, uptime
ascend: bool
Flag to sort data descending
export : str
Export dataframe data to csv,json,xlsx file
"""
df = terramoney_fcd_model.get_validators(sortby, ascend)
df_data = df.copy()
df["tokensAmount"] = df["tokensAmount"].apply(
lambda x: lambda_very_long_number_formatter(x)
)
df.columns = [
x if x not in ["Voting power", "Commission rate", "Uptime"] else f"{x} %"
for x in prettify_column_names(df.columns)
]
print_rich_table(
df.head(limit),
headers=list(df.columns),
floatfmt=".2f",
show_index=False,
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"validators",
df_data,
)
@log_start_end(log=logger)
def display_gov_proposals(
limit: int = 10,
status: str = "all",
sortby: str = "id",
ascend: bool = True,
export: str = "",
) -> None:
"""Prints table showing terra blockchain governance proposals list [Source: https://fcd.terra.dev/swagger]
Parameters
----------
limit: int
Number of records to display
status: str
status of proposal, one from list: ['Voting','Deposit','Passed','Rejected']
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascend
export : str
Export dataframe data to csv,json,xlsx file
"""
df = terramoney_fcd_model.get_proposals(status, sortby, ascend, limit)
print_rich_table(df, headers=list(df.columns), floatfmt=".2f", show_index=False)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "govp", df)
@log_start_end(log=logger)
def display_account_growth(
kind: str = "total",
cumulative: bool = False,
limit: int = 90,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots terra blockchain account growth history [Source: https://fcd.terra.dev/swagger]
Parameters
----------
limit: int
Number of records to display
kind: str
display total account count or active account count. One from list [active, total]
cumulative: bool
Flag to show cumulative or discrete values. For active accounts only discrete value are available.
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = terramoney_fcd_model.get_account_growth(cumulative)
if kind not in ["active", "total"]:
kind = "total"
options = {"total": "Total accounts", "active": "Active accounts"}
opt = options[kind]
label = "Cumulative" if cumulative and opt == "total" else "Daily"
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
df = df.sort_values("date", ascending=False).head(limit)
df = df.set_index("date")
start, end = df.index[-1], df.index[0]
if cumulative:
ax.plot(df[opt], label=df[opt])
else:
ax.bar(x=df.index, height=df[opt], label=df[opt])
ax.set_ylabel(f"{opt}")
ax.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
ax.set_title(f"{label} number of {opt.lower()} in period from {start} to {end}")
cfg.theme.style_primary_axis(ax)
if not external_axes:
cfg.theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"gacc",
df,
)
@log_start_end(log=logger)
def display_staking_ratio_history(
limit: int = 90,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots terra blockchain staking ratio history [Source: https://fcd.terra.dev/v1]
Parameters
----------
limit: int
Number of records to display
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = terramoney_fcd_model.get_staking_ratio_history(limit)
start, end = df.index[-1], df.index[0]
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(df, label=df["stakingRatio"])
ax.set_ylabel("Staking ratio [%]")
ax.set_title(f"Staking ratio from {start} to {end}")
cfg.theme.style_primary_axis(ax)
if not external_axes:
cfg.theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"sratio",
df,
)
@log_start_end(log=logger)
def display_staking_returns_history(
limit: int = 90,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots terra blockchain staking returns history [Source: https://fcd.terra.dev/swagger]
Parameters
----------
limit: int
Number of records to display
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
df = terramoney_fcd_model.get_staking_returns_history(limit)
start, end = df.index[-1], df.index[0]
ax.plot(df, label=df["annualizedReturn"])
ax.set_ylabel("Staking returns [%]")
ax.set_title(f"Staking returns from {start} to {end}")
cfg.theme.style_primary_axis(ax)
if not external_axes:
cfg.theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"sreturn",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/defi/terramoney_fcd_view.py | 0.804636 | 0.367355 | terramoney_fcd_view.py | pypi |
__docformat__ = "numpy"
import concurrent.futures
import logging
import textwrap
from typing import List
import pandas as pd
import requests
from bs4 import BeautifulSoup
from dateutil import parser
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def scrape_substack(url: str) -> List[List[str]]:
"""Helper method to scrape newsletters from substack.
[Source: substack.com]
Parameters
----------
url: str
Url to newsletter on substack domain.
Returns
-------
List[List[str]]
List of lists containing:
- title of newsletter
- url to newsletter
- str datetime of newsletter [Format: "%Y-%m-%d %H:%M:%S"]
"""
req = requests.get(url)
soup = BeautifulSoup(req.text, features="lxml")
results: List[List[str]] = []
posts = soup.find("div", class_="portable-archive-list").find_all(
"div", class_="post-preview portable-archive-post has-image has-author-line"
)
for post in posts:
title: str = post.a.text
post_url: str = post.a["href"]
time: str = post.find("time").get("datetime")
results.append([title, post_url, time])
return results
@log_start_end(log=logger)
def get_newsletters() -> pd.DataFrame:
"""Scrape all substack newsletters from url list.
[Source: substack.com]
Returns
-------
pd.DataFrame
DataFrame with recent news from most popular DeFi related newsletters.
"""
urls = [
"https://defiweekly.substack.com/archive",
"https://newsletter.thedefiant.io/archive",
"https://thedailygwei.substack.com/archive",
"https://todayindefi.substack.com/archive",
"https://newsletter.banklesshq.com/archive",
"https://defislate.substack.com/archive",
]
threads = len(urls)
newsletters = []
with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
for newsletter in executor.map(scrape_substack, urls):
try:
newsletters.append(pd.DataFrame(newsletter))
except KeyError as e:
logger.exception(str(e))
console.print(e, "\n")
continue
df = pd.concat(newsletters, ignore_index=True)
df.columns = ["Title", "Link", "Date"]
df["Title"] = df["Title"].apply(lambda x: "".join(i for i in x if ord(i) < 128))
df["Date"] = df["Date"].apply(
lambda x: parser.parse(x).strftime("%Y-%m-%d %H:%M:%S")
)
df["Title"] = df["Title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=50)) if isinstance(x, str) else x
)
return (
df[["Title", "Date", "Link"]]
.sort_values(by="Date", ascending=False)
.reset_index(drop="index")
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/defi/substack_model.py | 0.58261 | 0.22289 | substack_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Tuple
import pandas as pd
import requests
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
api_url = "https://barney.cryptosaurio.com"
@log_start_end(log=logger)
def get_anchor_data(address: str = "") -> Tuple[pd.DataFrame, pd.DataFrame, str]:
"""Returns anchor protocol earnings data of a certain terra address
[Source: https://cryptosaurio.com/]
Parameters
----------
address : str
Terra address. Valid terra addresses start with 'terra'
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame, str]
- pd.DataFrame: Earnings over time in UST
- pd.DataFrame: History of transactions
- str: Overall statistics
"""
if not address.startswith("terra"):
raise Exception(
"Select a valid address. Valid terra addresses start with 'terra'"
)
response = requests.get(f"{api_url}/get-anchor-protocol-data-v2/{address}")
if response.status_code != 200:
raise Exception(f"Status code: {response.status_code}. Reason: {response.text}")
data = response.json()
df = pd.DataFrame(reversed(data["historicalData"]))
df["time"] = pd.to_datetime(df["time"])
df["yield"] = df["yield"].astype("float64")
df_deposits = pd.DataFrame(data["deposits"], columns=["out", "fee", "time"])
df_deposits["out"] = df_deposits["out"].astype("float64")
df_deposits["fee"] = df_deposits["fee"].astype("float64")
df_deposits["time"] = pd.to_datetime(df_deposits["time"])
df_deposits["Type"] = df_deposits.apply(
lambda row: "Deposit" if row.out > 0 else "Withdrawal", axis=1
)
df_deposits.columns = ["Amount [UST]", "Fee [UST]", "Date", "Type"]
df_deposits = df_deposits[["Type", "Amount [UST]", "Fee [UST]", "Date"]]
stats_str = f"""Current anchor APY is {data['currentRate']}%
Deposit amount in Anchor Earn of address {address} is {data["totalYield"]["ustHoldings"]} UST.
You already earned [bold]{df.iloc[-1, 1]}[/bold] UST in Anchor Earn.
Your deposit is generating approximately:
- {data["estimatedYield"]["perHour"]} UST hourly
- {data["estimatedYield"]["perDay"]} UST daily
- {data["estimatedYield"]["perWeek"]} UST weekly
- {data["estimatedYield"]["perMonth"]} UST monthly
- {data["estimatedYield"]["perYear"]} UST yearly"""
return df, df_deposits, stats_str | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/defi/cryptosaurio_model.py | 0.867906 | 0.241344 | cryptosaurio_model.py | pypi |
import logging
import os
from typing import List, Optional
from matplotlib import pyplot as plt
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import check_api_key
from openbb_terminal import config_plot as cfgPlot
from openbb_terminal.cryptocurrency.due_diligence.santiment_model import (
get_github_activity,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
is_valid_axes_count,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_SANTIMENT_KEY"])
def display_github_activity(
symbol: str,
start_date: Optional[str] = None,
dev_activity: bool = False,
end_date: Optional[str] = None,
interval: str = "1d",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Returns a list of github activity for a given coin and time interval.
[Source: https://santiment.net/]
Parameters
----------
symbol : str
Crypto symbol to check github activity
dev_activity: bool
Whether to filter only for development activity
start_date : Optional[str]
Initial date like string (e.g., 2021-10-01)
end_date : Optional[str]
End date like string (e.g., 2021-10-01)
interval : str
Interval frequency (some possible values are: 1h, 1d, 1w)
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = get_github_activity(
symbol=symbol,
dev_activity=dev_activity,
start_date=start_date,
end_date=end_date,
interval=interval,
)
if df.empty:
return
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(df.index, df["value"])
ax.set_title(f"{symbol}'s Github activity over time")
ax.set_ylabel(f"{symbol}'s Activity count")
ax.set_xlim(df.index[0], df.index[-1])
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"gh",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/santiment_view.py | 0.863636 | 0.304403 | santiment_view.py | pypi |
import logging
import os
from typing import List, Optional
from matplotlib import pyplot as plt
import pandas as pd
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.cryptocurrency.due_diligence.tokenterminal_model import (
get_fundamental_metric_from_project,
get_project_ids,
get_description,
METRICS,
)
from openbb_terminal.decorators import check_api_key
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_TOKEN_TERMINAL_KEY"])
def display_fundamental_metric_from_project_over_time(
metric: str,
project: str,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots fundamental metric from a project over time [Source: Token Terminal]
Parameters
----------
metric : str
The metric of interest. See `get_possible_metrics()` for available metrics.
project : str
The project of interest. See `get_project_ids()` for available categories.
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
if project not in get_project_ids():
console.print(
f"[red]'{project}' project selected is invalid. See available projects with def get_project_ids()[/red]\n"
)
return
if metric not in METRICS:
console.print(
f"[red]'{metric}' metric selected is invalid. See available metrics with get_possible_metrics()[/red]\n"
)
return
metric_over_time = get_fundamental_metric_from_project(metric, project)
if metric_over_time.empty:
console.print("[red]No data found.[/red]\n")
return
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(
metric_over_time.index,
metric_over_time.values
if max(metric_over_time.values) < 10000
else metric_over_time.values / 1e6,
)
ax.set_xlabel("Time")
if max(metric_over_time.values) < 10000:
labeltouse = "[USD]"
else:
labeltouse = "[1M USD]"
ax.set_ylabel(f"{metric.replace('_', ' ').capitalize()} {labeltouse}")
ax.set_xlim([metric_over_time.index[0], metric_over_time.index[-1]])
ax.set_title(
f"{project.replace('_', ' ').capitalize()} {metric.replace('_', ' ').capitalize()}"
)
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"funot",
metric_over_time,
)
@log_start_end(log=logger)
@check_api_key(["API_TOKEN_TERMINAL_KEY"])
def display_description(project: str, export: str = ""):
"""Prints description from a project [Source: Token Terminal]
Parameters
----------
project : str
The project of interest. See `get_project_ids()` for available categories.
export : str
Export dataframe data to csv,json,xlsx file
"""
if project not in get_project_ids():
console.print(
f"[red]'{project}' project selected is invalid. See available projects with def get_project_ids()[/red]\n"
)
return
description = get_description(project)
for k in description:
console.print(f"{k.replace('_', ' ').upper()}\n {description[k]}\n")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"desc",
pd.DataFrame(description.values(), index=description.keys()),
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/tokenterminal_view.py | 0.903687 | 0.310172 | tokenterminal_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
from matplotlib import pyplot as plt
import numpy as np
from openbb_terminal.cryptocurrency.cryptocurrency_helpers import plot_order_book
from openbb_terminal.cryptocurrency.due_diligence import ccxt_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_order_book(
exchange: str,
symbol: str,
to_symbol: str,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots order book for a coin in a given exchange
[Source: https://docs.ccxt.com/en/latest/manual.html]
Parameters
----------
exchange : str
exchange id
symbol : str
coin symbol
vs : str
currency to compare coin against
export : str
Export dataframe data to csv,json,xlsx file
"""
market_book = ccxt_model.get_orderbook(
exchange=exchange, symbol=symbol, to_symbol=to_symbol
)
bids = np.asarray(market_book["bids"], dtype=float)
asks = np.asarray(market_book["asks"], dtype=float)
bids = np.insert(bids, 2, bids[:, 1].cumsum(), axis=1)
asks = np.insert(asks, 2, np.flipud(asks[:, 1]).cumsum(), axis=1)
plot_order_book(
bids,
asks,
f"{exchange.upper()}:{symbol.upper()}/{to_symbol.upper()}",
external_axes,
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ob",
market_book,
)
@log_start_end(log=logger)
def display_trades(
exchange: str, symbol: str, to_symbol: str, limit: int = 10, export: str = ""
):
"""Prints table showing trades for a coin in a given exchange
[Source: https://docs.ccxt.com/en/latest/manual.html]
Parameters
----------
exchange : str
exchange id
symbol : str
coin symbol
to_symbol : str
currency to compare coin against
limit : int
number of trades to display
export : str
Export dataframe data to csv,json,xlsx file
"""
df = ccxt_model.get_trades(exchange_id=exchange, symbol=symbol, to_symbol=to_symbol)
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title=f"Trades for {exchange.upper()}:{symbol.upper()}/{to_symbol.upper()}",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"trades",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/ccxt_view.py | 0.897021 | 0.363252 | ccxt_view.py | pypi |
__docformat__ = "numpy"
# flake8: noqa
# pylint: disable=C0301,C0302
import logging
from typing import Any, Optional, Tuple
from datetime import datetime, timedelta
import re
import pandas as pd
import requests
from openbb_terminal import config_terminal as cfg
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_replace_underscores_in_column_names,
prettify_column_names,
)
from openbb_terminal.cryptocurrency.due_diligence.pycoingecko_model import (
get_coin_tokenomics,
)
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.helper_funcs import lambda_long_number_format
from openbb_terminal.rich_config import console
# pylint: disable=unsupported-assignment-operation
logger = logging.getLogger(__name__)
INTERVALS_TIMESERIES = ["5m", "15m", "30m", "1h", "1d", "1w"]
# pylint: disable=unsupported-assignment-operation
@log_start_end(log=logger)
def get_available_timeseries(only_free: bool = True) -> pd.DataFrame:
"""Returns available messari timeseries
[Source: https://messari.io/]
Parameters
----------
only_free : bool
Display only timeseries available for free
Returns
-------
pd.DataFrame
available timeseries
"""
r = requests.get("https://data.messari.io/api/v1/assets/metrics")
if r.status_code == 200:
data = r.json()
metrics = data["data"]["metrics"]
arr = []
for metric in metrics:
sources = ""
for source in metric["source_attribution"]:
sources += source["name"]
sources += ","
sources = sources.rstrip(",")
arr.append(
{
"id": metric["metric_id"],
"Title": metric["name"],
"Description": metric["description"],
"Requires Paid Key": "role_restriction" in metric,
"Sources": sources,
}
)
df = pd.DataFrame(arr)
df.set_index("id", inplace=True)
if only_free:
df = df.drop(df[df["Requires Paid Key"]].index)
return df
return pd.DataFrame()
base_url = "https://data.messari.io/api/v1/"
base_url2 = "https://data.messari.io/api/v2/"
@log_start_end(log=logger)
def get_marketcap_dominance(
symbol: str,
interval: str = "1d",
start_date: Optional[str] = None,
end_date: Optional[str] = None,
) -> pd.DataFrame:
"""Returns market dominance of a coin over time
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check market cap dominance
interval : str
Interval frequency (possible values are: 5m, 15m, 30m, 1h, 1d, 1w)
start_date : Optional[str]
Initial date like string (e.g., 2021-10-01)
end_date : Optional[str]
End date like string (e.g., 2021-10-01)
Returns
-------
pd.DataFrame
market dominance percentage over time
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> mcapdom_df = openbb.crypto.dd.mcapdom(symbol="BTC")
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=365)).strftime("%Y-%m-%d")
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
messari_timeseries = get_messari_timeseries(
symbol=symbol,
end_date=end_date,
start_date=start_date,
interval=interval,
timeseries_id="mcap.dom",
)
df, _ = messari_timeseries if messari_timeseries else (pd.DataFrame(), "")
return df
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def get_messari_timeseries(
symbol: str,
timeseries_id: str,
interval: str = "1d",
start_date: Optional[str] = None,
end_date: Optional[str] = None,
) -> Tuple[pd.DataFrame, str]:
"""Returns messari timeseries
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check messari timeseries
timeseries_id : str
Messari timeserie id
interval : str
Interval frequency (possible values are: 5m, 15m, 30m, 1h, 1d, 1w)
start : Optional[str]
Initial date like string (e.g., 2021-10-01)
end : Optional[str]
End date like string (e.g., 2021-10-01)
Returns
-------
Tuple[pd.DataFrame, str]
Messari timeseries over time,
Timeseries title
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=365)).strftime("%Y-%m-%d")
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
url = base_url + f"assets/{symbol}/metrics/{timeseries_id}/time-series"
headers = {"x-messari-api-key": cfg.API_MESSARI_KEY}
parameters = {
"start": start_date,
"end": end_date,
"interval": interval,
}
r = requests.get(url, params=parameters, headers=headers)
df = pd.DataFrame()
title = ""
if r.status_code == 200:
data = r.json()["data"]
title = data["schema"]["name"]
df = pd.DataFrame(data["values"], columns=data["parameters"]["columns"])
if df.empty:
console.print(f"No data found for {symbol}.\n")
else:
df = df.set_index("timestamp")
df.index = pd.to_datetime(df.index, unit="ms")
elif r.status_code == 401:
if "requires a pro or enterprise subscription" in r.text:
console.print("[red]API Key not authorized for Premium Feature[/red]\n")
else:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(r.text)
return df, title
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def get_links(symbol: str) -> pd.DataFrame:
"""Returns asset's links
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check links
Returns
-------
pd.DataFrame
asset links
"""
url = base_url2 + f"assets/{symbol}/profile"
headers = {"x-messari-api-key": cfg.API_MESSARI_KEY}
params = {"fields": "profile/general/overview/official_links"}
r = requests.get(url, headers=headers, params=params)
df = pd.DataFrame()
if r.status_code == 200:
data = r.json()["data"]
df = pd.DataFrame(data["profile"]["general"]["overview"]["official_links"])
df.columns = map(str.capitalize, df.columns)
return df
if r.status_code == 401:
print("[red]Invalid API Key[/red]\n")
else:
print(r.text)
return pd.DataFrame()
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def get_roadmap(symbol: str, ascend: bool = True) -> pd.DataFrame:
"""Returns coin roadmap
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check roadmap
ascend: bool
reverse order
Returns
-------
pd.DataFrame
roadmap
"""
url = base_url2 + f"assets/{symbol}/profile"
headers = {"x-messari-api-key": cfg.API_MESSARI_KEY}
params = {"fields": "profile/general/roadmap"}
r = requests.get(url, headers=headers, params=params)
df = pd.DataFrame()
if r.status_code == 200:
data = r.json()["data"]
df = pd.DataFrame(data["profile"]["general"]["roadmap"])
df["date"] = pd.to_datetime(df["date"])
df.columns = map(str.capitalize, df.columns)
df = df.dropna(axis=1, how="all")
df["Date"] = df["Date"].dt.date
show_df = df
show_df = show_df.sort_values(by="Date", ascending=ascend)
show_df.fillna("Unknown", inplace=True)
elif r.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(r.text)
return df
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def get_tokenomics(symbol: str, coingecko_id: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Returns coin tokenomics
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check tokenomics
coingecko_id : str
ID from coingecko
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame]
Metric Value tokenomics,
Circulating supply overtime
"""
url = base_url2 + f"assets/{symbol}/profile"
headers = {"x-messari-api-key": ""}
params = {"fields": "profile/economics/consensus_and_emission"}
r = requests.get(url, headers=headers, params=params)
df = pd.DataFrame()
circ_df = pd.DataFrame()
if r.status_code == 200:
data = r.json()["data"]
tokenomics_data = data["profile"]["economics"]["consensus_and_emission"]
df = pd.DataFrame(
{
"Metric": [
"Emission Type",
"Consensus Mechanism",
"Consensus Details",
"Mining Algorithm",
"Block Reward",
],
"Value": [
tokenomics_data["supply"]["general_emission_type"],
tokenomics_data["consensus"]["general_consensus_mechanism"],
tokenomics_data["consensus"]["consensus_details"],
tokenomics_data["consensus"]["mining_algorithm"],
tokenomics_data["consensus"]["block_reward"],
],
}
)
df["Value"] = df["Value"].str.replace("n/a", "-")
cg_df = get_coin_tokenomics(coingecko_id)
df = pd.concat([df, cg_df], ignore_index=True, sort=False)
df.fillna("-", inplace=True)
circ_df, _ = get_messari_timeseries(
symbol=symbol,
timeseries_id="sply.circ",
interval="1d",
start_date="",
end_date="",
)
elif r.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(r.text)
return df, circ_df
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def get_project_product_info(
symbol: str,
) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""Returns coin product info
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check product info
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]
Metric, Value with project and technology details,
Coin public repos,
Coin audits,
Coin known exploits/vulns
"""
url = base_url2 + f"assets/{symbol}/profile"
headers = {"x-messari-api-key": ""}
params = {"fields": "profile/general/overview/project_details,profile/technology"}
r = requests.get(url, headers=headers, params=params)
df = pd.DataFrame()
if r.status_code == 200:
data = r.json()["data"]
project_details = data["profile"]["general"]["overview"]["project_details"]
technology_data = data["profile"]["technology"]
technology_details = technology_data["overview"]["technology_details"]
df_info = pd.DataFrame(
{
"Metric": ["Project Details", "Technology Details"],
"Value": [project_details, technology_details],
}
)
df_repos = pd.DataFrame(technology_data["overview"]["client_repositories"])
df_repos.columns = prettify_column_names(df_repos.columns)
df_repos.fillna("-", inplace=True)
df_audits = pd.DataFrame(technology_data["security"]["audits"])
df_audits.columns = prettify_column_names(df_audits.columns)
if not df_audits.empty:
df_audits["Date"] = pd.to_datetime(df_audits["Date"])
df_audits.fillna("-", inplace=True)
df_vulns = pd.DataFrame(
technology_data["security"]["known_exploits_and_vulnerabilities"]
)
df_vulns.columns = prettify_column_names(df_vulns.columns)
if not df_vulns.empty:
df_vulns["Date"] = pd.to_datetime(df_vulns["Date"])
df_vulns.fillna("-", inplace=True)
return df_info, df_repos, df_audits, df_vulns
if r.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(r.text)
return df, df, df, df
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def get_team(symbol: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Returns coin team
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check team
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame]
Individuals,
Organizations
"""
url = base_url2 + f"assets/{symbol}/profile"
headers = {"x-messari-api-key": cfg.API_MESSARI_KEY}
params = {"fields": "profile/contributors"}
r = requests.get(url, headers=headers, params=params)
df = pd.DataFrame()
if r.status_code == 200:
data = r.json()["data"]
df_individual_contributors = pd.DataFrame(
data["profile"]["contributors"]["individuals"]
)
if not df_individual_contributors.empty:
df_individual_contributors.fillna("-", inplace=True)
df_individual_contributors.insert(
0,
"Name",
df_individual_contributors[["first_name", "last_name"]].apply(
lambda x: " ".join(x), axis=1
),
)
df_individual_contributors.drop(
["slug", "avatar_url", "first_name", "last_name"],
axis=1,
inplace=True,
errors="ignore",
)
df_individual_contributors.columns = map(
str.capitalize, df_individual_contributors.columns
)
df_individual_contributors.replace(
to_replace=[None], value="-", inplace=True
)
df_organizations_contributors = pd.DataFrame(
data["profile"]["contributors"]["organizations"]
)
if not df_organizations_contributors.empty:
df_organizations_contributors.drop(
["slug", "logo"], axis=1, inplace=True, errors="ignore"
)
df_organizations_contributors.columns = map(
str.capitalize, df_organizations_contributors.columns
)
df_organizations_contributors.replace(
to_replace=[None], value="-", inplace=True
)
return df_individual_contributors, df_organizations_contributors
if r.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(r.text)
return df, df
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def get_investors(symbol: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Returns coin investors
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check investors
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame]
Individuals,
Organizations
"""
url = base_url2 + f"assets/{symbol}/profile"
headers = {"x-messari-api-key": cfg.API_MESSARI_KEY}
params = {"fields": "profile/investors"}
r = requests.get(url, headers=headers, params=params)
df = pd.DataFrame()
if r.status_code == 200:
data = r.json()["data"]
df_individual_investors = pd.DataFrame(
data["profile"]["investors"]["individuals"]
)
if not df_individual_investors.empty:
df_individual_investors.fillna("-", inplace=True)
df_individual_investors.insert(
0,
"Name",
df_individual_investors[["first_name", "last_name"]].apply(
lambda x: " ".join(x), axis=1
),
)
df_individual_investors.drop(
["slug", "avatar_url", "first_name", "last_name"],
axis=1,
inplace=True,
errors="ignore",
)
df_individual_investors.columns = map(
str.capitalize, df_individual_investors.columns
)
df_individual_investors.replace(to_replace=[None], value="-", inplace=True)
df_organizations_investors = pd.DataFrame(
data["profile"]["investors"]["organizations"]
)
if not df_organizations_investors.empty:
df_organizations_investors.drop(
["slug", "logo"], axis=1, inplace=True, errors="ignore"
)
df_organizations_investors.columns = map(
str.capitalize, df_organizations_investors.columns
)
df_organizations_investors.replace(
to_replace=[None], value="-", inplace=True
)
return df_individual_investors, df_organizations_investors
if r.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(r.text)
return df, df
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def get_governance(symbol: str) -> Tuple[str, pd.DataFrame]:
"""Returns coin governance
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check governance
Returns
-------
Tuple[str, pd.DataFrame]
Governance summary,
Metric Value with governance details
"""
url = base_url2 + f"assets/{symbol}/profile"
headers = {"x-messari-api-key": cfg.API_MESSARI_KEY}
params = {"fields": "profile/governance"}
r = requests.get(url, headers=headers, params=params)
df = pd.DataFrame()
if r.status_code == 200:
data = r.json()["data"]
governance_data = data["profile"]["governance"]
if (
governance_data["onchain_governance"]["onchain_governance_type"] is not None
and governance_data["onchain_governance"]["onchain_governance_details"]
is not None
):
return (
re.sub("<[^>]*>", "", governance_data["governance_details"]),
pd.DataFrame(
{
"Metric": ["Type", "Details"],
"Value": [
governance_data["onchain_governance"][
"onchain_governance_type"
],
governance_data["onchain_governance"][
"onchain_governance_details"
],
],
}
),
)
return (
re.sub("<[^>]*>", "", governance_data["governance_details"]),
df,
)
if r.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(r.text)
return "", df
def format_addresses(x: Any):
final_str = ""
for address in x:
final_str += f"{address['name']}: {address['link']}"
return final_str
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def get_fundraising(
symbol: str,
) -> Tuple[str, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""Returns coin fundraising
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check fundraising
Returns
-------
Tuple[str, pd.DataFrame, pd.DataFrame, pd.DataFrame]
Launch summary,
Sales rounds,
Treasury Accounts,
Metric Value launch details
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> fundraise = openbb.crypto.dd.fr(symbol="BTC")
"""
url = base_url2 + f"assets/{symbol}/profile"
headers = {"x-messari-api-key": cfg.API_MESSARI_KEY}
params = {"fields": "profile/economics/launch"}
r = requests.get(url, headers=headers, params=params)
df = pd.DataFrame()
if r.status_code == 200:
data = r.json()["data"]
launch_data = data["profile"]["economics"]["launch"]
launch_details = launch_data["general"]["launch_details"]
launch_type = launch_data["general"]["launch_style"]
launch_fundraising_rounds = pd.DataFrame(
launch_data["fundraising"]["sales_rounds"]
)
if not launch_fundraising_rounds.empty:
launch_fundraising_rounds.fillna("-", inplace=True)
launch_fundraising_rounds.drop(
[
"details",
"asset_collected",
"price_per_token_in_asset",
"amount_collected_in_asset",
"is_kyc_required",
"restricted_jurisdictions",
],
axis=1,
inplace=True,
errors="ignore",
)
launch_fundraising_rounds.columns = [
lambda_replace_underscores_in_column_names(val)
for val in launch_fundraising_rounds.columns
]
launch_fundraising_rounds["Start Date"] = launch_fundraising_rounds.apply(
lambda x: x["Start Date"].split("T")[0], axis=1
)
launch_fundraising_rounds["End Date"] = launch_fundraising_rounds.apply(
lambda x: x["End Date"].split("T")[0], axis=1
)
launch_fundraising_rounds.rename(
columns={
"Native Tokens Allocated": "Tokens Allocated",
"Equivalent Price Per Token In Usd": "Price [$]",
"Amount Collected In Usd": "Amount Collected [$]",
},
inplace=True,
)
launch_fundraising_rounds.fillna("-", inplace=True)
launch_fundraising_accounts = pd.DataFrame(
launch_data["fundraising"]["sales_treasury_accounts"]
)
if not launch_fundraising_accounts.empty:
launch_fundraising_accounts.columns = [
lambda_replace_underscores_in_column_names(val)
for val in launch_fundraising_accounts.columns
]
launch_fundraising_accounts.drop(
["Asset Held", "Security"], inplace=True, axis=1
)
launch_fundraising_accounts["Addresses"] = launch_fundraising_accounts[
"Addresses"
].map(format_addresses)
launch_distribution = pd.DataFrame(
{
"Metric": [
"Genesis Date",
"Type",
"Total Supply",
"Investors [%]",
"Organization/Founders [%]",
"Rewards/Airdrops [%]",
],
"Value": [
launch_data["initial_distribution"]["genesis_block_date"].split(
"T"
)[0]
if launch_data["initial_distribution"]["genesis_block_date"]
else "-",
launch_type,
lambda_long_number_format(
launch_data["initial_distribution"]["initial_supply"]
),
launch_data["initial_distribution"]["initial_supply_repartition"][
"allocated_to_investors_percentage"
],
launch_data["initial_distribution"]["initial_supply_repartition"][
"allocated_to_organization_or_founders_percentage"
],
launch_data["initial_distribution"]["initial_supply_repartition"][
"allocated_to_premined_rewards_or_airdrops_percentage"
],
],
}
)
return (
launch_details,
launch_fundraising_rounds,
launch_fundraising_accounts,
launch_distribution,
)
if r.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(r.text)
return "", df, df, df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/messari_model.py | 0.879056 | 0.169131 | messari_model.py | pypi |
from datetime import datetime, timedelta
import logging
import json
from typing import Optional
import pandas as pd
import requests
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import log_start_end, check_api_key
from openbb_terminal.rich_config import console
from openbb_terminal.helper_funcs import str_date_to_timestamp
# pylint: disable=unsupported-assignment-operation
logger = logging.getLogger(__name__)
# pylint: disable=unsupported-assignment-operation
api_url = "https://api.glassnode.com/v1/metrics/"
GLASSNODE_SUPPORTED_HASHRATE_ASSETS = ["BTC", "ETH"]
GLASSNODE_SUPPORTED_EXCHANGES = [
"aggregated",
"binance",
"bittrex",
"coinex",
"gate.io",
"gemini",
"huobi",
"kucoin",
"poloniex",
"bibox",
"bigone",
"bitfinex",
"hitbtc",
"kraken",
"okex",
"bithumb",
"zb.com",
"cobinhood",
"bitmex",
"bitstamp",
"coinbase",
"coincheck",
"luno",
]
GLASSNODE_SUPPORTED_ASSETS = [
"BTC",
"ETH",
"LTC",
"AAVE",
"ABT",
"AMPL",
"ANT",
"ARMOR",
"BADGER",
"BAL",
"BAND",
"BAT",
"BIX",
"BNT",
"BOND",
"BRD",
"BUSD",
"BZRX",
"CELR",
"CHSB",
"CND",
"COMP",
"CREAM",
"CRO",
"CRV",
"CVC",
"CVP",
"DAI",
"DDX",
"DENT",
"DGX",
"DHT",
"DMG",
"DODO",
"DOUGH",
"DRGN",
"ELF",
"ENG",
"ENJ",
"EURS",
"FET",
"FTT",
"FUN",
"GNO",
"GUSD",
"HEGIC",
"HOT",
"HPT",
"HT",
"HUSD",
"INDEX",
"KCS",
"LAMB",
"LBA",
"LDO",
"LEO",
"LINK",
"LOOM",
"LRC",
"MANA",
"MATIC",
"MCB",
"MCO",
"MFT",
"MIR",
"MKR",
"MLN",
"MTA",
"MTL",
"MX",
"NDX",
"NEXO",
"NFTX",
"NMR",
"Nsure",
"OCEAN",
"OKB",
"OMG",
"PAX",
"PAY",
"PERP",
"PICKLE",
"PNK",
"PNT",
"POLY",
"POWR",
"PPT",
"QASH",
"QKC",
"QNT",
"RDN",
"REN",
"REP",
"RLC",
"ROOK",
"RPL",
"RSR",
"SAI",
"SAN",
"SNT",
"SNX",
"STAKE",
"STORJ",
"sUSD",
"SUSHI",
"TEL",
"TOP",
"UBT",
"UMA",
"UNI",
"USDC",
"USDK",
"USDT",
"UTK",
"VERI",
"WaBi",
"WAX",
"WBTC",
"WETH",
"wNMX",
"WTC",
"YAM",
"YFI",
"ZRX",
]
INTERVALS_HASHRATE = ["24h", "1w", "1month"]
INTERVALS_ACTIVE_ADDRESSES = ["24h", "1w", "1month"]
@log_start_end(log=logger)
@check_api_key(["API_GLASSNODE_KEY"])
def get_close_price(
symbol: str,
start_date: str = "2010-01-01",
end_date: Optional[str] = None,
print_errors: bool = True,
) -> pd.DataFrame:
"""Returns the price of a cryptocurrency
[Source: https://glassnode.com]
Parameters
----------
symbol : str
Crypto to check close price (BTC or ETH)
start_date : str
Initial date, format YYYY-MM-DD
end_date : str
Final date, format YYYY-MM-DD
print_errors: bool
Flag to print errors. Default: True
Returns
-------
pd.DataFrame
price over time
"""
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
ts_start_date = str_date_to_timestamp(start_date)
ts_end_date = str_date_to_timestamp(end_date)
url = api_url + "market/price_usd_close"
parameters = {
"api_key": cfg.API_GLASSNODE_KEY,
"a": symbol,
"i": "24h",
"s": str(ts_start_date),
"u": str(ts_end_date),
}
r = requests.get(url, params=parameters)
df = pd.DataFrame()
if r.status_code == 200:
df = pd.DataFrame(json.loads(r.text))
if df.empty:
if print_errors:
console.print(f"No data found for {symbol} price.\n")
else:
df = df.set_index("t")
df.index = pd.to_datetime(df.index, unit="s")
elif r.status_code == 401:
if print_errors:
console.print("[red]Invalid API Key[/red]\n")
else:
if print_errors:
console.print(r.text)
return df
@log_start_end(log=logger)
@check_api_key(["API_GLASSNODE_KEY"])
def get_non_zero_addresses(
symbol: str,
start_date: str = "2010-01-01",
end_date: Optional[str] = None,
) -> pd.DataFrame:
"""Returns addresses with non-zero balance of a certain symbol
[Source: https://glassnode.com]
Parameters
----------
symbol : str
Asset to search (e.g., BTC)
start_date : str
Initial date, format YYYY-MM-DD
end_date : str
Final date, format YYYY-MM-DD
Returns
-------
pd.DataFrame
addresses with non-zero balances
"""
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
ts_start_date = str_date_to_timestamp(start_date)
ts_end_date = str_date_to_timestamp(end_date)
url = api_url + "addresses/non_zero_count"
parameters = {
"api_key": cfg.API_GLASSNODE_KEY,
"a": symbol,
"i": "24h",
"s": str(ts_start_date),
"u": str(ts_end_date),
}
r = requests.get(url, params=parameters)
df = pd.DataFrame()
if r.status_code == 200:
df = pd.DataFrame(json.loads(r.text))
if df.empty:
console.print(f"No data found for {symbol}'s non-zero addresses.\n")
else:
df["t"] = pd.to_datetime(df["t"], unit="s")
df = df.set_index("t")
elif r.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(r.text)
return df
@log_start_end(log=logger)
@check_api_key(["API_GLASSNODE_KEY"])
def get_active_addresses(
symbol: str,
interval: str = "24h",
start_date: str = "2010-01-01",
end_date: Optional[str] = None,
) -> pd.DataFrame:
"""Returns active addresses of a certain symbol
[Source: https://glassnode.com]
Parameters
----------
symbol : str
Asset to search active addresses (e.g., BTC)
interval : str
Interval frequency (e.g., 24h)
start_date : str
Initial date, format YYYY-MM-DD
end_date : str
Final date, format YYYY-MM-DD
Returns
-------
pd.DataFrame
active addresses over time
"""
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
ts_start_date = str_date_to_timestamp(start_date)
ts_end_date = str_date_to_timestamp(end_date)
url = api_url + "addresses/active_count"
parameters = {
"api_key": cfg.API_GLASSNODE_KEY,
"a": symbol,
"i": interval,
"s": str(ts_start_date),
"u": str(ts_end_date),
}
r = requests.get(url, params=parameters)
df = pd.DataFrame()
if r.status_code == 200:
df = pd.DataFrame(json.loads(r.text))
if df.empty:
console.print(f"No data found for {symbol}'s active addresses.\n")
else:
df["t"] = pd.to_datetime(df["t"], unit="s")
df = df.set_index("t")
elif r.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(r.text)
return df
@log_start_end(log=logger)
@check_api_key(["API_GLASSNODE_KEY"])
def get_hashrate(
symbol: str,
interval: str = "24h",
start_date: str = "2010-01-01",
end_date: Optional[str] = None,
) -> pd.DataFrame:
"""Returns dataframe with mean hashrate of btc or eth blockchain and symbol price
[Source: https://glassnode.com]
Parameters
----------
symbol : str
Blockchain to check hashrate (BTC or ETH)
start_date : str
Initial date, format YYYY-MM-DD
end_date : str
Final date, format YYYY-MM-DD
interval : str
Interval frequency (e.g., 24h)
Returns
-------
pd.DataFrame
mean hashrate and symbol price over time
"""
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
ts_start_date = str_date_to_timestamp(start_date)
ts_end_date = str_date_to_timestamp(end_date)
url = api_url + "mining/hash_rate_mean"
url2 = api_url + "market/price_usd_close"
parameters = {
"api_key": cfg.API_GLASSNODE_KEY,
"a": symbol,
"i": interval,
"s": str(ts_start_date),
"u": str(ts_end_date),
}
df = pd.DataFrame()
r = requests.get(url, params=parameters)
r2 = requests.get(url2, params=parameters)
if r.status_code == 200 and r2.status_code == 200:
df = pd.DataFrame(json.loads(r.text))
df2 = pd.DataFrame(json.loads(r2.text))
if df.empty or df2.empty:
console.print(f"No data found for {symbol}'s hashrate or price.\n")
else:
df = df.set_index("t")
df2 = df2.set_index("t")
df.index = pd.to_datetime(df.index, unit="s")
df = df.rename(columns={"v": "hashrate"})
df2.index = pd.to_datetime(df2.index, unit="s")
df2 = df2.rename(columns={"v": "price"})
df = df.merge(df2, left_index=True, right_index=True, how="outer")
elif r.status_code == 401 or r2.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
else:
if r.status_code != 200:
console.print(f"Error getting hashrate: {r.text}")
if r2.status_code != 200:
console.print(f"Error getting {symbol} price: {r2.text}")
return df
@log_start_end(log=logger)
@check_api_key(["API_GLASSNODE_KEY"])
def get_exchange_balances(
symbol: str,
exchange: str = "aggregated",
start_date: Optional[str] = None,
end_date: Optional[str] = None,
) -> pd.DataFrame:
"""Returns the total amount of coins held on exchange addresses in units and percentage.
[Source: https://glassnode.com]
Parameters
----------
symbol : str
Asset to search active addresses (e.g., BTC)
exchange : str
Exchange to check net position change (possible values are: aggregated, binance, bittrex,
coinex, gate.io, gemini, huobi, kucoin, poloniex, bibox, bigone, bitfinex, hitbtc, kraken,
okex, bithumb, zb.com, cobinhood, bitmex, bitstamp, coinbase, coincheck, luno), by default "aggregated"
start_date : Optional[str], optional
Initial date (format YYYY-MM-DD) by default 2 years ago
end_date : Optional[str], optional
Final date (format YYYY-MM-DD) by default 1 year ago
Returns
-------
pd.DataFrame
total amount of coins in units/percentage and symbol price over time
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.crypto.dd.eb(symbol="BTC")
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=365 * 2)).strftime("%Y-%m-%d")
if end_date is None:
end_date = (datetime.now() - timedelta(days=367)).strftime("%Y-%m-%d")
ts_start_date = str_date_to_timestamp(start_date)
ts_end_date = str_date_to_timestamp(end_date)
url = api_url + "distribution/balance_exchanges"
url2 = api_url + "distribution/balance_exchanges_relative"
url3 = api_url + "market/price_usd_close"
parameters = {
"api_key": cfg.API_GLASSNODE_KEY,
"a": symbol,
"i": "24h",
"e": exchange,
"s": str(ts_start_date),
"u": str(ts_end_date),
}
df = pd.DataFrame()
r1 = requests.get(url, params=parameters) # get balances
r2 = requests.get(url2, params=parameters) # get relative (percentage) balances
r3 = requests.get(
url3, params=parameters
) # get price TODO: grab data from loaded symbol
if r1.status_code == 200 and r2.status_code == 200 and r3.status_code == 200:
df1 = pd.DataFrame(json.loads(r1.text))
df1.set_index("t", inplace=True)
df1.rename(columns={"v": "stacked"}, inplace=True)
df2 = pd.DataFrame(json.loads(r2.text))
df2.set_index("t", inplace=True)
df2.rename(columns={"v": "percentage"}, inplace=True)
df3 = pd.DataFrame(json.loads(r3.text))
df3.set_index("t", inplace=True)
df3.rename(columns={"v": "price"}, inplace=True)
df = pd.merge(df1, df2, left_index=True, right_index=True)
df = pd.merge(df, df3, left_index=True, right_index=True)
df.index = pd.to_datetime(df.index, unit="s")
if df.empty or df1.empty or df2.empty or df3.empty:
console.print(f"No data found for {symbol}'s exchange balance or price.\n")
elif r1.status_code == 401 or r2.status_code == 401 or r3.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
else:
if r1.status_code != 200:
console.print(f"Error getting {symbol}'s exchange balance: {r1.text}")
if r2.status_code != 200:
console.print(
f"Error getting {symbol}'s exchange balance relatives: {r2.text}"
)
if r3.status_code != 200:
console.print(f"Error getting {symbol} price: {r3.text}")
return df
@log_start_end(log=logger)
@check_api_key(["API_GLASSNODE_KEY"])
def get_exchange_net_position_change(
symbol: str,
exchange: str = "binance",
start_date: str = "2010-01-01",
end_date: Optional[str] = None,
) -> pd.DataFrame:
"""Returns 30d change of the supply held in exchange wallets of a certain symbol.
[Source: https://glassnode.com]
Parameters
----------
symbol : str
Asset symbol to search supply (e.g., BTC)
exchange : str
Exchange to check net position change (e.g., binance)
start_date : Optional[str]
Initial date, format YYYY-MM-DD
end_date : Optional[str]
Final date, format YYYY-MM-DD
Returns
-------
pd.DataFrame
supply change in exchange wallets of a certain symbol over time
"""
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
ts_start_date = str_date_to_timestamp(start_date)
ts_end_date = str_date_to_timestamp(end_date)
url = api_url + "distribution/exchange_net_position_change"
parameters = {
"api_key": cfg.API_GLASSNODE_KEY,
"a": symbol,
"i": "24h",
"e": exchange,
"s": str(ts_start_date),
"u": str(ts_end_date),
}
r = requests.get(url, params=parameters)
df = pd.DataFrame()
if r.status_code == 200:
df = pd.DataFrame(json.loads(r.text))
if df.empty:
console.print(f"No data found for {symbol}'s net position change.\n")
else:
df["t"] = pd.to_datetime(df["t"], unit="s")
df = df.set_index("t")
elif r.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(r.text)
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/glassnode_model.py | 0.779909 | 0.244848 | glassnode_model.py | pypi |
__docformat__ = "numpy"
import argparse
import logging
from collections import defaultdict
from typing import List, Tuple, Union
import pandas as pd
from binance.client import Client
from binance.exceptions import BinanceAPIException
import openbb_terminal.config_terminal as cfg
from openbb_terminal.decorators import log_start_end, check_api_key
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_BINANCE_KEY", "API_BINANCE_SECRET"])
def _get_trading_pairs() -> List[dict]:
"""Helper method that return all trading pairs on binance. Methods ause this data for input for e.g
building dataframe with all coins, or to build dict of all trading pairs. [Source: Binance]
Returns
-------
List[dict]
list of dictionaries in format:
[
{'symbol': 'ETHBTC', 'status': 'TRADING', 'baseAsset': 'ETH', 'baseAssetPrecision': 8,
'quoteAsset': 'BTC', 'quotePrecision': 8, 'quoteAssetPrecision': 8,
'baseCommissionPrecision': 8, 'quoteCommissionPrecision': 8,
'orderTypes': ['LIMIT', 'LIMIT_MAKER', 'MARKET', 'STOP_LOSS_LIMIT', 'TAKE_PROFIT_LIMIT'],
'icebergAllowed': True,
'ocoAllowed': True,
'quoteOrderQtyMarketAllowed': True,
'isSpotTradingAllowed': True,
'isMarginTradingAllowed': True,
'filters': [{'filterType': 'PRICE_FILTER', 'minPrice': '0.00000100',
'maxPrice': '922327.00000000', 'tickSize': '0.00000100'},
{'filterType': 'PERCENT_PRICE', 'multiplierUp': '5', 'multiplierDown': '0.2', 'avgPriceMins': 5},
{'filterType': 'LOT_SIZE', 'minQty': '0.00100000', 'maxQty': '100000.00000000', 'stepSize': '0.00100000'},
{'filterType': 'MIN_NOTIONAL', 'minNotional': '0.00010000', 'applyToMarket': True, 'avgPriceMins': 5},
{'filterType': 'ICEBERG_PARTS', 'limit': 10}, {'filterType': 'MARKET_LOT_SIZE', 'minQty': '0.00000000',
'maxQty': '930.49505347', 'stepSize': '0.00000000'}, {'filterType': 'MAX_NUM_ORDERS', 'maxNumOrders': 200},
{'filterType': 'MAX_NUM_ALGO_ORDERS', 'maxNumAlgoOrders': 5}], 'permissions': ['SPOT', 'MARGIN']},
...
]
"""
client = Client(cfg.API_BINANCE_KEY, cfg.API_BINANCE_SECRET)
ex_info = client.get_exchange_info()["symbols"]
trading_pairs = [p for p in ex_info if p["status"] == "TRADING"]
return trading_pairs
@log_start_end(log=logger)
@check_api_key(["API_BINANCE_KEY", "API_BINANCE_SECRET"])
def get_all_binance_trading_pairs() -> pd.DataFrame:
"""Returns all available pairs on Binance in DataFrame format. DataFrame has 3 columns symbol, baseAsset, quoteAsset
example row: ETHBTC | ETH | BTC
[Source: Binance]
Returns
-------
pd.DataFrame
All available pairs on Binance
Columns: symbol, baseAsset, quoteAsset
"""
trading_pairs = _get_trading_pairs()
return pd.DataFrame(trading_pairs)[["symbol", "baseAsset", "quoteAsset"]]
@log_start_end(log=logger)
@check_api_key(["API_BINANCE_KEY", "API_BINANCE_SECRET"])
def get_binance_available_quotes_for_each_coin() -> dict:
"""Helper methods that for every coin available on Binance add all quote assets. [Source: Binance]
Returns
-------
dict
All quote assets for given coin
{'ETH' : ['BTC', 'USDT' ...], 'UNI' : ['ETH', 'BTC','BUSD', ...]
"""
trading_pairs = _get_trading_pairs()
results = defaultdict(list)
for pair in trading_pairs:
results[pair["baseAsset"]].append(pair["quoteAsset"])
return results
@log_start_end(log=logger)
@check_api_key(["API_BINANCE_KEY", "API_BINANCE_SECRET"])
def check_valid_binance_str(symbol: str) -> str:
"""Check if symbol is in defined binance. [Source: Binance]
Returns
-------
str
Symbol
"""
client = Client(cfg.API_BINANCE_KEY, cfg.API_BINANCE_SECRET)
try:
client.get_avg_price(symbol=symbol.upper())
return symbol.upper()
except BinanceAPIException as e:
logger.exception("%s is not a valid binance symbol", str(symbol))
raise argparse.ArgumentTypeError(
f"{symbol} is not a valid binance symbol"
) from e
@log_start_end(log=logger)
@check_api_key(["API_BINANCE_KEY", "API_BINANCE_SECRET"])
def show_available_pairs_for_given_symbol(
symbol: str = "ETH",
) -> Tuple[Union[str, None], list]:
"""Return all available quoted assets for given symbol. [Source: Binance]
Parameters
----------
symbol: str
Uppercase symbol of coin e.g BTC, ETH, UNI, LUNA, DOT ...
Returns
-------
Tuple[Union[str, None], list]
Tuple of symbol,
list of quoted assets for given symbol: ["BTC", "USDT" , "BUSD"]
"""
symbol_upper = symbol.upper()
pairs = get_binance_available_quotes_for_each_coin()
for k, v in pairs.items():
if k == symbol_upper:
return k, v
return None, []
@log_start_end(log=logger)
@check_api_key(["API_BINANCE_KEY", "API_BINANCE_SECRET"])
def get_order_book(
from_symbol: str,
limit: int = 100,
to_symbol: str = "USDT",
) -> pd.DataFrame:
"""Get order book for currency. [Source: Binance]
Parameters
----------
from_symbol: str
Cryptocurrency symbol
limit: int
Limit parameter. Adjusts the weight
to_symbol: str
Quote currency (what to view coin vs)
Returns
-------
pd.DataFrame
Dataframe containing orderbook
"""
pair = from_symbol + to_symbol
client = Client(cfg.API_BINANCE_KEY, cfg.API_BINANCE_SECRET)
try:
market_book = client.get_order_book(symbol=pair, limit=limit)
except BinanceAPIException:
console.print(f"{to_symbol} is not a valid binance symbol")
return pd.DataFrame(market_book)
@log_start_end(log=logger)
@check_api_key(["API_BINANCE_KEY", "API_BINANCE_SECRET"])
def get_balance(
from_symbol: str,
to_symbol: str = "USDT",
) -> pd.DataFrame:
"""Get account holdings for asset. [Source: Binance]
Parameters
----------
from_symbol: str
Cryptocurrency
to_symbol: str
Cryptocurrency
Returns
-------
pd.DataFrame
Dataframe with account holdings for an asset
"""
client = Client(cfg.API_BINANCE_KEY, cfg.API_BINANCE_SECRET)
pair = from_symbol + to_symbol
current_balance = client.get_asset_balance(asset=from_symbol)
if current_balance is None:
console.print("Check loaded coin\n")
return pd.DataFrame()
last_price = client.get_ticker(symbol=pair).get("lastPrice")
amounts = [float(current_balance["free"]), float(current_balance["locked"])]
df = pd.DataFrame(amounts)
df.columns = ["Amount"]
df.index = ["Free", "Locked"]
if last_price:
last_price = float(last_price)
df[f"Amount [{to_symbol}]"] = df["Amount"].mul(last_price)
df["Percent"] = df["Amount"].mul(100).div(df["Amount"].sum(axis=0)).round(3)
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/binance_model.py | 0.770378 | 0.187263 | binance_model.py | pypi |
import datetime as dt
import json
import logging
import pandas as pd
import requests
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
api_url = "https://open-api.coinglass.com/api/pro/v1/"
# Prompt toolkit does not allow integers, so these items need to be strings
INTERVALS = [0, 1, 2, 4]
@log_start_end(log=logger)
@check_api_key(["API_COINGLASS_KEY"])
def get_liquidations(symbol: str) -> pd.DataFrame:
"""Returns liquidations per day for a certain symbol
[Source: https://coinglass.github.io/API-Reference/#liquidation-chart]
Parameters
----------
symbol : str
Crypto Symbol to search daily liquidations (e.g., BTC)
Returns
-------
pd.DataFrame
daily liquidations for loaded symbol
"""
url = api_url + f"futures/liquidation_chart?symbol={symbol.upper()}"
headers = {"coinglassSecret": cfg.API_COINGLASS_KEY}
response = requests.request("GET", url, headers=headers)
df = pd.DataFrame()
if response.status_code == 200:
res_json = json.loads(response.text)
if res_json["success"]:
if "data" in res_json:
data = res_json["data"]
time = data["dateList"]
time_new = []
for elem in time:
time_actual = dt.datetime.utcfromtimestamp(elem / 1000)
time_new.append(time_actual)
df = pd.DataFrame(
data={
"date": time_new,
"price": data["priceList"],
"Shorts": data["buyList"],
"Longs": data["sellList"],
}
)
df = df.set_index("date")
else:
console.print(f"No data found for {symbol}.\n")
else:
if "secret invalid" in res_json["msg"]:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(res_json["msg"])
elif response.status_code == 429:
console.print("[red]Exceeded number of calls per minute[/red]\n")
elif response.status_code == 429:
console.print(
"[red]IP address autobanned for exceeding calls limit multiple times.[/red]\n"
)
return df
@log_start_end(log=logger)
@check_api_key(["API_COINGLASS_KEY"])
def get_funding_rate(symbol: str) -> pd.DataFrame:
"""Returns open interest by exchange for a certain symbol
[Source: https://coinglass.github.io/API-Reference/]
Parameters
----------
symbol : str
Crypto Symbol to search open interest futures (e.g., BTC)
Returns
-------
pd.DataFrame
funding rate per exchange
"""
url = api_url + f"futures/funding_rates_chart?symbol={symbol.upper()}&type=C"
headers = {"coinglassSecret": cfg.API_COINGLASS_KEY}
response = requests.request("GET", url, headers=headers)
df = pd.DataFrame()
if response.status_code == 200:
res_json = json.loads(response.text)
if res_json["success"]:
if "data" in res_json:
data = res_json["data"]
time = data["dateList"]
time_new = []
for elem in time:
time_actual = dt.datetime.utcfromtimestamp(elem / 1000)
time_new.append(time_actual)
df = pd.DataFrame(
data={
"date": time_new,
"price": data["priceList"],
**data["dataMap"],
}
)
df = df.set_index("date")
else:
console.print(f"No data found for {symbol}.\n")
else:
if "secret invalid" in res_json["msg"]:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(res_json["msg"])
elif response.status_code == 429:
console.print("[red]Exceeded number of calls per minute[/red]\n")
elif response.status_code == 429:
console.print(
"[red]IP address autobanned for exceeding calls limit multiple times.[/red]\n"
)
return df
@log_start_end(log=logger)
@check_api_key(["API_COINGLASS_KEY"])
def get_open_interest_per_exchange(symbol: str, interval: int = 0) -> pd.DataFrame:
"""Returns open interest by exchange for a certain symbol
[Source: https://coinglass.github.io/API-Reference/]
Parameters
----------
symbol : str
Crypto Symbol to search open interest futures (e.g., BTC)
interval : int
Frequency (possible values are: 0 for ALL, 2 for 1H, 1 for 4H, 4 for 12H), by default 0
Returns
-------
pd.DataFrame
open interest by exchange and price
"""
url = (
api_url
+ f"futures/openInterest/chart?symbol={symbol.upper()}&interval={interval}"
)
headers = {"coinglassSecret": cfg.API_COINGLASS_KEY}
response = requests.request("GET", url, headers=headers)
df = pd.DataFrame()
if response.status_code == 200:
res_json = json.loads(response.text)
if res_json["success"]:
if "data" in res_json:
data = res_json["data"]
time = data["dateList"]
time_new = []
for elem in time:
time_actual = dt.datetime.utcfromtimestamp(elem / 1000)
time_new.append(time_actual)
df = pd.DataFrame(
data={
"date": time_new,
"price": data["priceList"],
**data["dataMap"],
}
)
df = df.set_index("date")
else:
console.print(f"No data found for {symbol}.\n")
else:
if "secret invalid" in res_json["msg"]:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(res_json["msg"])
elif response.status_code == 429:
console.print("[red]Exceeded number of calls per minute[/red]\n")
elif response.status_code == 429:
console.print(
"[red]IP address autobanned for exceeding calls limit multiple times.[/red]\n"
)
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/coinglass_model.py | 0.597138 | 0.173708 | coinglass_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional
from pandas.plotting import register_matplotlib_converters
import openbb_terminal.cryptocurrency.due_diligence.pycoingecko_model as gecko
from openbb_terminal.cryptocurrency import cryptocurrency_helpers
from openbb_terminal.cryptocurrency.dataframe_helpers import wrap_text_in_df
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def display_coin_potential_returns(
to_symbol: str,
from_symbol: Optional[str] = None,
limit: Optional[int] = None,
price: Optional[int] = None,
export: str = "",
) -> None:
"""Prints table showing potential returns of a certain coin. [Source: CoinGecko]
Parameters
----------
to_symbol : str
Coin loaded to check potential returns for (e.g., algorand)
from_symbol : str | None
Coin to compare main_coin with (e.g., bitcoin)
limit : int | None
Number of coins with highest market cap to compare main_coin with (e.g., 5)
price
Target price of main_coin to check potential returns (e.g., 5)
export : str
Export dataframe data to csv,json,xlsx file
"""
df = gecko.get_coin_potential_returns(to_symbol, from_symbol, limit, price)
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Potential Coin Returns"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"prt",
df,
)
@log_start_end(log=logger)
def display_info(symbol: str, export: str = "") -> None:
"""Prints table showing basic information about loaded coin. [Source: CoinGecko]
Parameters
----------
symbol : str
Cryptocurrency symbol
export : str
Export dataframe data to csv,json,xlsx file
"""
cg_id = cryptocurrency_helpers.check_cg_id(symbol)
if not cg_id:
return
coin = gecko.Coin(cg_id)
df = wrap_text_in_df(coin.get_base_info(), w=80)
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Basic Coin Information"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"info",
df,
)
@log_start_end(log=logger)
def display_web(symbol: str, export: str = "") -> None:
"""Prints table showing found websites corresponding to loaded coin. [Source: CoinGecko]
Parameters
----------
symbol : str
Cryptocurrency symbol
export : str
Export dataframe data to csv,json,xlsx file
"""
cg_id = cryptocurrency_helpers.check_cg_id(symbol)
if not cg_id:
return
coin = gecko.Coin(cg_id)
df = coin.get_websites()
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Websites for Loaded Coin"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"web",
df,
)
@log_start_end(log=logger)
def display_social(symbol: str, export: str = "") -> None:
"""Prints table showing social media corresponding to loaded coin. [Source: CoinGecko]
Parameters
----------
symbol : str
Cryptocurrency
export : str
Export dataframe data to csv,json,xlsx file
"""
coin = gecko.Coin(symbol)
df = coin.get_social_media()
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Social Media for Loaded Coin",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"social",
df,
)
@log_start_end(log=logger)
def display_dev(symbol: str, export: str = "") -> None:
"""Prints table showing developers data for loaded coin. [Source: CoinGecko]
Parameters
----------
symbol : str
Cryptocurrency
export : str
Export dataframe data to csv,json,xlsx file
"""
coin = gecko.Coin(symbol)
df = coin.get_developers_data()
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Developers Data for Loaded Coin",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"dev",
df,
)
@log_start_end(log=logger)
def display_ath(symbol: str, currency: str = "usd", export: str = "") -> None:
"""Prints table showing all time high data for loaded coin. [Source: CoinGecko]
Parameters
----------
symbol : str
Cryptocurrency
currency: str
currency vs which coin ath will be displayed: usd or btc
export : str
Export dataframe data to csv,json,xlsx file
"""
coin = gecko.Coin(symbol)
df = coin.get_all_time_high(currency=currency)
print_rich_table(df, headers=list(df.columns), show_index=False, title="Coin Highs")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ath",
df,
)
@log_start_end(log=logger)
def display_atl(symbol: str, currency: str = "usd", export: str = "") -> None:
"""Prints table showing all time low data for loaded coin. [Source: CoinGecko]
Parameters
----------
symbol : str
Cryptocurrency
currency: str
currency vs which coin ath will be displayed: usd or btc
export : str
Export dataframe data to csv,json,xlsx file
"""
coin = gecko.Coin(symbol)
df = coin.get_all_time_low(currency=currency)
print_rich_table(df, headers=list(df.columns), show_index=False, title="Coin Lows")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"atl",
df,
)
@log_start_end(log=logger)
def display_score(symbol: str, export: str = "") -> None:
"""Prints table showing different kind of scores for loaded coin. [Source: CoinGecko]
Parameters
----------
symbol : str
Cryptocurrency
export : str
Export dataframe data to csv,json,xlsx file
"""
coin = gecko.Coin(symbol)
df = coin.get_scores()
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Different Scores for Loaded Coin",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"score",
df,
)
@log_start_end(log=logger)
def display_bc(symbol: str, export: str = "") -> None:
"""Prints table showing urls to blockchain explorers. [Source: CoinGecko]
Parameters
----------
symbol : str
Cryptocurrency
export : str
Export dataframe data to csv,json,xlsx file
"""
coin = gecko.Coin(symbol)
df = coin.get_blockchain_explorers()
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Blockchain URLs"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"bc",
df,
)
@log_start_end(log=logger)
def display_market(symbol: str, export: str = "") -> None:
"""Prints table showing market data for loaded coin. [Source: CoinGecko]
Parameters
----------
symbol : str
Cryptocurrency
export : str
Export dataframe data to csv,json,xlsx file
"""
coin = gecko.Coin(symbol)
df = coin.get_market_data()
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Market Data"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"market",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/pycoingecko_view.py | 0.839438 | 0.320795 | pycoingecko_view.py | pypi |
__docformat__ = "numpy"
import logging
import textwrap
from typing import Tuple, Optional
from datetime import datetime, timedelta
import pandas as pd
from dateutil import parser
from openbb_terminal.cryptocurrency.coinpaprika_helpers import PaprikaSession
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
# pylint: disable=unsupported-assignment-operation
logger = logging.getLogger(__name__)
# pylint: disable=unsupported-assignment-operation
@log_start_end(log=logger)
def get_coin_twitter_timeline(
symbol: str = "BTC", sortby: str = "date", ascend: bool = True
) -> pd.DataFrame:
"""Get twitter timeline for given coin id. Not more than last 50 tweets [Source: CoinPaprika]
Parameters
----------
symbol: str
Cryptocurrency symbol (e.g. BTC)
sortby: str
Key by which to sort data. Every column name is valid
(see for possible values:
https://api.coinpaprika.com/docs#tag/Coins/paths/~1coins~1%7Bcoin_id%7D~1twitter/get).
ascend: bool
Flag to sort data descending
Returns
-------
pd.DataFrame
Twitter timeline for given coin.
Columns: date, user_name, status, retweet_count, like_count
"""
# get coinpaprika id using crypto symbol
cp_id = get_coinpaprika_id(symbol)
session = PaprikaSession()
res = session.make_request(session.ENDPOINTS["coin_tweeter"].format(cp_id))
if "error" in res:
console.print(res)
return pd.DataFrame()
if isinstance(res, list) and len(res) == 0:
return pd.DataFrame()
df = pd.DataFrame(res)[
["date", "user_name", "status", "retweet_count", "like_count"]
]
df = df.applymap(
lambda x: "\n".join(textwrap.wrap(x, width=80)) if isinstance(x, str) else x
)
df["status"] = df["status"].apply(lambda x: x.replace(" ", ""))
df["date"] = df["date"].apply(lambda x: x.replace("T", "\n"))
df["date"] = df["date"].apply(lambda x: x.replace("Z", ""))
df = df.sort_values(by=sortby, ascending=ascend)
# Remove unicode chars (it breaks pretty tables)
df["status"] = df["status"].apply(
lambda text: "".join(i if ord(i) < 128 else "" for i in text)
)
return df
@log_start_end(log=logger)
def get_coin_events_by_id(
symbol: str = "BTC", sortby: str = "date", ascend: bool = False
) -> pd.DataFrame:
"""Get all events related to given coin like conferences, start date of futures trading etc.
[Source: CoinPaprika]
Example of response from API:
.. code-block:: json
{
"id": "17398-cme-april-first-trade",
"date": "2018-04-02T00:00:00Z",
"date_to": "string",
"name": "CME: April First Trade",
"description": "First trade of Bitcoin futures contract for April 2018.",
"is_conference": false,
"link": "http://www.cmegroup.com/trading/equity-index/us-index/bitcoin_product_calendar_futures.html",
"proof_image_link": "https://static.coinpaprika.com/storage/cdn/event_images/16635.jpg"
}
Parameters
----------
symbol: str
Cryptocurrency symbol (e.g. BTC)
sortby: str
Key by which to sort data. Every column name is valid
(see for possible values:
https://api.coinpaprika.com/docs#tag/Coins/paths/~1coins~1%7Bcoin_id%7D~1events/get).
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
Events found for given coin
Columns: id, date , date_to, name, description, is_conference, link, proof_image_link
"""
# get coinpaprika id using crypto symbol
cp_id = get_coinpaprika_id(symbol)
session = PaprikaSession()
res = session.make_request(session.ENDPOINTS["coin_events"].format(cp_id))
if not res or "error" in res:
return pd.DataFrame()
data = pd.DataFrame(res)
data["description"] = data["description"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=40)) if isinstance(x, str) else x
)
data.drop(["id", "proof_image_link"], axis=1, inplace=True)
for col in ["date", "date_to"]:
data[col] = data[col].apply(
lambda x: x.replace("T", "\n") if isinstance(x, str) else x
)
data[col] = data[col].apply(
lambda x: x.replace("Z", "") if isinstance(x, str) else x
)
data = data.sort_values(by=sortby, ascending=ascend)
return data
@log_start_end(log=logger)
def get_coin_exchanges_by_id(
symbol: str = "BTC",
sortby: str = "adjusted_volume_24h_share",
ascend: bool = True,
) -> pd.DataFrame:
"""Get all exchanges for given coin id. [Source: CoinPaprika]
Parameters
----------
symbol: str
Cryptocurrency symbol (e.g. BTC)
sortby: str
Key by which to sort data. Every column name is valid (see for possible values:
https://api.coinpaprika.com/v1).
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
All exchanges for given coin
Columns: id, name, adjusted_volume_24h_share, fiats
"""
# get coinpaprika id using crypto symbol
cp_id = get_coinpaprika_id(symbol)
session = PaprikaSession()
res = session.make_request(session.ENDPOINTS["coin_exchanges"].format(cp_id))
df = pd.DataFrame(res)
if "fiats" in df.columns.tolist():
df["fiats"] = (
df["fiats"].copy().apply(lambda x: len([i["symbol"] for i in x if x]))
)
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_coin_markets_by_id(
symbol: str = "BTC",
quotes: str = "USD",
sortby: str = "pct_volume_share",
ascend: bool = True,
) -> pd.DataFrame:
"""All markets for given coin and currency [Source: CoinPaprika]
Parameters
----------
symbol: str
Cryptocurrency symbol (e.g. BTC)
quotes: str
Comma separated list of quotes to return.
Example: quotes=USD,BTC
Allowed values:
BTC, ETH, USD, EUR, PLN, KRW, GBP, CAD, JPY, RUB, TRY, NZD, AUD, CHF, UAH, HKD, SGD, NGN,
PHP, MXN, BRL, THB, CLP, CNY, CZK, DKK, HUF, IDR, ILS, INR, MYR, NOK, PKR, SEK, TWD, ZAR,
VND, BOB, COP, PEN, ARS, ISK
sortby: str
Key by which to sort data. Every column name is valid (see for possible values:
https://api.coinpaprika.com/v1).
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
All markets for given coin and currency
"""
if sortby in ["volume", "price"]:
sortby = f"{str(symbol).lower()}_{sortby}"
# get coinpaprika id using crypto symbol
cp_id = get_coinpaprika_id(symbol)
session = PaprikaSession()
markets = session.make_request(
session.ENDPOINTS["coin_markets"].format(cp_id), quotes=quotes
)
if "error" in markets:
console.print(markets)
return pd.DataFrame()
data = []
for r in markets:
dct = {
"exchange": r.get("exchange_name"),
"pair": r.get("pair"),
"trust_score": r.get("trust_score"),
"pct_volume_share": r.get("adjusted_volume_24h_share"),
}
_quotes: dict = r.get("quotes")
for k, v in _quotes.items():
dct[f"{k.lower()}_price"] = v.get("price")
dct[f"{k.lower()}_volume"] = v.get("volume_24h")
dct["market_url"] = r.get("market_url")
data.append(dct)
df = pd.DataFrame(data)
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_ohlc_historical(
symbol: str = "eth-ethereum", quotes: str = "USD", days: int = 90
) -> pd.DataFrame:
"""
Open/High/Low/Close values with volume and market_cap. [Source: CoinPaprika]
Request example: https://api.coinpaprika.com/v1/coins/btc-bitcoin/ohlcv/historical?start=2019-01-01&end=2019-01-20
if the last day is current day it can an change with every request until actual close of the day at 23:59:59
Parameters
----------
symbol: str
Paprika coin identifier e.g. eth-ethereum
quotes: str
returned data quote (available values: usd btc)
days: int
time range for chart in days. Maximum 365
Returns
-------
pd.DataFrame
Open/High/Low/Close values with volume and market_cap.
"""
if quotes.lower() not in ["usd", "btc"]:
quotes = "USD"
if abs(int(days)) > 365:
days = 365
end = datetime.now().strftime("%Y-%m-%d")
start = (datetime.now() - timedelta(days=days)).strftime("%Y-%m-%d")
session = PaprikaSession()
data = session.make_request(
session.ENDPOINTS["ohlcv_hist"].format(symbol),
quotes=quotes,
start=start,
end=end,
)
if "error" in data:
# console.print(
# "Could not load data. Try use symbol (e.g., btc) instead of coin name (e.g., bitcoin)"
# )
return pd.DataFrame()
return pd.DataFrame(data)
@log_start_end(log=logger)
def get_tickers_info_for_coin(symbol: str = "BTC", quotes: str = "USD") -> pd.DataFrame:
"""Get all most important ticker related information for given coin id [Source: CoinPaprika]
.. code-block:: json
{
"id": "btc-bitcoin",
"name": "Bitcoin",
"symbol": "BTC",
"rank": 1,
"circulating_supply": 17007062,
"total_supply": 17007062,
"max_supply": 21000000,
"beta_value": 0.735327,
"first_data_at": "2010-11-14T07:20:41Z",
"last_updated": "2018-11-14T07:20:41Z",
"quotes": {
"USD": {
"price": 5162.15941296,
"volume_24h": 7304207651.1585,
"volume_24h_change_24h": -2.5,
"market_cap": 91094433242,
"market_cap_change_24h": 1.6,
"percent_change_15m": 0,
"percent_change_30m": 0,
"percent_change_1h": 0,
"percent_change_6h": 0,
"percent_change_12h": -0.09,
"percent_change_24h": 1.59,
"percent_change_7d": 0.28,
"percent_change_30d": 27.39,
"percent_change_1y": -37.99,
"ath_price": 20089,
"ath_date": "2017-12-17T12:19:00Z",
"percent_from_price_ath": -74.3
}
}
}
Parameters
----------
symbol: str
Cryptocurrency symbol (e.g. BTC)
quotes: str
Comma separated quotes to return e.g quotes = USD, BTC
Returns
-------
pd.DataFrame
Most important ticker related information
Columns: Metric, Value
"""
# get coinpaprika id using crypto symbol
cp_id = get_coinpaprika_id(symbol)
session = PaprikaSession()
tickers = session.make_request(
session.ENDPOINTS["ticker_info"].format(cp_id), quotes=quotes
)
for key, date in tickers.items():
if "date" in key or "data" in key:
try:
tickers[key] = parser.parse(date).strftime("%Y-%m-%d %H:%M:%S")
except (KeyError, ValueError, TypeError) as e:
logger.exception(str(e))
console.print(e)
if key == "quotes":
try:
tickers[key][quotes]["ath_date"] = parser.parse(
tickers[key][quotes]["ath_date"]
).strftime("%Y-%m-%d %H:%M:%S")
except (KeyError, ValueError, TypeError) as e:
logger.exception(str(e))
console.print(e)
df = pd.json_normalize(tickers)
try:
df.columns = [col.replace("quotes.", "") for col in list(df.columns)]
df.columns = [col.replace(".", "_").lower() for col in list(df.columns)]
except KeyError as e:
logger.exception(str(e))
console.print(e)
df = df.T.reset_index()
df.columns = ["Metric", "Value"]
return df
@log_start_end(log=logger)
def basic_coin_info(symbol: str = "BTC") -> pd.DataFrame:
"""Basic coin information [Source: CoinPaprika]
Parameters
----------
symbol: str
Cryptocurrency symbol (e.g. BTC)
Returns
-------
pd.DataFrame
Metric, Value
"""
# get coinpaprika id using crypto symbol
cp_id = get_coinpaprika_id(symbol)
coin = get_coin(cp_id)
tags = coin.get("tags") or []
keys = [
"id",
"name",
"symbol",
"rank",
"type",
"description",
"platform",
"proof_type",
"contract",
]
results = {key: coin.get(key) for key in keys}
try:
tags = ", ".join(t.get("name") for t in tags)
parent = coin.get("parent") or {}
except (KeyError, IndexError):
tags, parent = [], {}
results["tags"] = tags
results["parent"] = parent.get("id")
df = pd.Series(results).reset_index()
df.columns = ["Metric", "Value"]
df["Value"] = df["Value"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=80)) if isinstance(x, str) else x
)
df.dropna(subset=["Value"], inplace=True)
return df
@log_start_end(log=logger)
def get_coin(symbol: str = "eth-ethereum") -> dict:
"""Get coin by id [Source: CoinPaprika]
Parameters
----------
symbol: str
id of coin from coinpaprika e.g. Ethereum - > 'eth-ethereum'
Returns
-------
dict
Coin response
"""
session = PaprikaSession()
coin = session.make_request(session.ENDPOINTS["coin"].format(symbol))
return coin
def get_coinpaprika_id(symbol: str) -> Optional[str]:
paprika_coins = get_coin_list()
paprika_coins_dict = dict(zip(paprika_coins.id, paprika_coins.symbol))
coinpaprika_id, _ = validate_coin(symbol.upper(), paprika_coins_dict)
return coinpaprika_id
def get_coin_list() -> pd.DataFrame:
"""Get list of all available coins on CoinPaprika [Source: CoinPaprika]
Returns
-------
pandas.DataFrame
Available coins on CoinPaprika
rank, id, name, symbol, type
"""
session = PaprikaSession()
coins = session.make_request(session.ENDPOINTS["coins"])
df = pd.DataFrame(coins)
df = df[df["is_active"]]
return df[["rank", "id", "name", "symbol", "type"]]
def validate_coin(symbol: str, coins_dct: dict) -> Tuple[Optional[str], Optional[str]]:
"""
Helper method that validates if proper coin id or symbol was provided
[Source: CoinPaprika]
Parameters
----------
symbol: str
id or symbol of coin for CoinPaprika
coins_dct: dict
dictionary of coins
Returns
-------
Tuple[Optional[str], Optional[str]]
coin id, coin symbol
"""
for key, value in coins_dct.items():
if symbol == value:
return key, value.lower()
return None, None | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/coinpaprika_model.py | 0.85857 | 0.3217 | coinpaprika_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional
from openbb_terminal.cryptocurrency.overview import cryptopanic_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.decorators import check_api_key
from openbb_terminal.cryptocurrency.dataframe_helpers import prettify_column_names
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_CRYPTO_PANIC_KEY"])
def display_news(
post_kind: str = "news",
region: str = "en",
filter_: Optional[str] = None,
source: Optional[str] = None,
symbol: Optional[str] = None,
limit: int = 25,
ascend: bool = True,
export: str = "",
) -> None:
"""Prints table showing recent posts from CryptoPanic news aggregator platform.
[Source: https://cryptopanic.com/]
Parameters
----------
limit: int
number of news to display
post_kind: str
Filter by category of news. Available values: news or media.
filter_: Optional[str]
Filter by kind of news. One from list: rising|hot|bullish|bearish|important|saved|lol
region: str
Filter news by regions. Available regions are: en (English), de (Deutsch), nl (Dutch),
es (Español), fr (Français), it (Italiano), pt (Português), ru (Русский)
ascend: bool
Sort in ascending order.
export : str
Export dataframe data to csv,json,xlsx file
"""
df = cryptopanic_model.get_news(
limit=limit,
post_kind=post_kind,
filter_=filter_,
region=region,
symbol=symbol,
source=source,
ascend=ascend,
)
if not df.empty:
df.drop(["negative_votes", "positive_votes", "domain"], axis=1, inplace=True)
df.columns = prettify_column_names(df.columns)
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Most Recent News",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"news",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/cryptopanic_view.py | 0.818265 | 0.197406 | cryptopanic_view.py | pypi |
import logging
from typing import List, Dict
import pandas as pd
from tokenterminal import TokenTerminal
from openbb_terminal.decorators import log_start_end
from openbb_terminal import config_terminal as cfg
logger = logging.getLogger(__name__)
token_terminal = TokenTerminal(key=cfg.API_TOKEN_TERMINAL_KEY)
# Fetch all data for projects'
try:
PROJECTS_DATA = token_terminal.get_all_projects()
# This is to catch the invalid header and raise Exception to load the autocomplete tickers
if PROJECTS_DATA["message"] == "Invalid authorization header":
raise Exception
except Exception:
PROJECTS_DATA = [
"0x",
"1inch",
"88mph",
"aave",
"abracadabra-money",
"alchemist",
"alchemix-finance",
"algorand",
"alpha-finance",
"arweave",
"autofarm",
"avalanche",
"axie-infinity",
"balancer",
"bancor",
"barnbridge",
"basket-dao",
"benqi",
"binance-smart-chain",
"bitcoin",
"cap",
"cardano",
"centrifuge",
"clipper",
"compound",
"convex-finance",
"cosmos",
"cryptex",
"curve",
"decentral-games",
"decred",
"dforce",
"dhedge",
"dodo",
"dogecoin",
"dydx",
"ellipsis-finance",
"elrond",
"enzyme-finance",
"erasure-protocol",
"ethereum",
"ethereum-name-service",
"euler",
"fantom",
"fei-protocol",
"filecoin",
"futureswap",
"gmx",
"goldfinch",
"harvest-finance",
"helium",
"hurricaneswap",
"idle-finance",
"index-cooperative",
"instadapp",
"integral-protocol",
"karura",
"keeperdao",
"keep-network",
"kusama",
"kyber",
"lido-finance",
"liquity",
"litecoin",
"livepeer",
"looksrare",
"loopring",
"maiar",
"makerdao",
"maple-finance",
"mcdex",
"metamask",
"mstable",
"near-protocol",
"nexus-mutual",
"nftx",
"notional-finance",
"opensea",
"optimism",
"osmosis",
"pancakeswap",
"pangolin",
"perpetual-protocol",
"piedao",
"pocket-network",
"polkadot",
"polygon",
"polymarket",
"pooltogether",
"powerpool",
"quickswap",
"rarible",
"rari-capital",
"reflexer",
"ren",
"ribbon-finance",
"rocket-pool",
"saddle-finance",
"set-protocol",
"solana",
"solend",
"spookyswap",
"stake-dao",
"stellar",
"sushiswap",
"synthetix",
"terra",
"tezos",
"the-graph",
"thorchain",
"tokemak",
"tokenlon",
"tornado-cash",
"trader-joe",
"uma",
"uniswap",
"unit-protocol",
"venus",
"vesper-finance",
"volmex",
"wakaswap",
"yearn-finance",
"yield-guild-games",
"yield-yak",
"zcash",
"zora",
]
METRICS = [
"twitter_followers",
"gmv_annualized",
"market_cap",
"take_rate",
"revenue",
"revenue_protocol",
"tvl",
"pe",
"pe_circulating",
"ps",
"ps_circulating",
]
@log_start_end(log=logger)
def get_possible_metrics() -> List[str]:
"""This function returns the available metrics.
Returns
-------
List[str]
A list with the available metrics values.
"""
return METRICS
@log_start_end(log=logger)
def get_project_ids() -> List[str]:
"""This function returns the available project ids.
Returns
-------
List[str]
A list with the all the project IDs
"""
# check if its a dict - which would be a successful api call -
# might need to add error checking here later if they messed up the API key though.
if isinstance(PROJECTS_DATA, dict):
return [project["project_id"] for project in PROJECTS_DATA]
return PROJECTS_DATA
@log_start_end(log=logger)
def get_fundamental_metric_from_project(
metric: str,
project: str,
) -> pd.Series:
"""Get fundamental metrics from a single project [Source: Token Terminal]
Parameters
----------
metric : str
The metric of interest. See `get_possible_metrics()` for available metrics.
project : str
The project of interest. See `get_possible_projects()` for available categories.
Returns
-------
pandas.Series:
Date, Metric value
"""
project_metrics = token_terminal.get_historical_metrics(project)
metric_date = list()
metric_value = list()
for proj in project_metrics:
if metric in proj:
val = proj[metric]
if isinstance(val, (float, int)):
metric_value.append(val)
metric_date.append(proj["datetime"])
else:
return pd.Series(dtype="float64")
if metric_value:
return pd.Series(index=pd.to_datetime(metric_date), data=metric_value)[::-1]
return pd.Series(dtype="float64")
@log_start_end(log=logger)
def get_description(
project: str,
) -> Dict:
"""Get description from a single project [Source: Token Terminal]
Parameters
----------
project : str
The project of interest. See `get_possible_projects()` for available categories.
Returns
-------
Dict[str, Any]
Description of the project with fields: 'how', 'who', 'what', 'funding',
'competition', 'business_model', 'github_contributors'
"""
for p in PROJECTS_DATA:
if p["project_id"] == project:
return p["description"]
return Dict() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/tokenterminal_model.py | 0.800302 | 0.227276 | tokenterminal_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_long_number_format_with_type_check,
)
from openbb_terminal.cryptocurrency.due_diligence import coinpaprika_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
register_matplotlib_converters()
# pylint: disable=inconsistent-return-statements
# pylint: disable=C0302, too-many-lines
TWEETS_FILTERS = ["date", "user_name", "status", "retweet_count", "like_count"]
EVENTS_FILTERS = ["date", "date_to", "name", "description", "is_conference"]
EX_FILTERS = ["id", "name", "adjusted_volume_24h_share", "fiats"]
MARKET_FILTERS = [
"pct_volume_share",
"exchange",
"pair",
"trust_score",
"volume",
"price",
]
CURRENCIES = [
"BTC",
"ETH",
"USD",
"EUR",
"PLN",
"KRW",
"GBP",
"CAD",
"JPY",
"RUB",
"TRY",
"NZD",
"AUD",
"CHF",
"UAH",
"HKD",
"SGD",
"NGN",
"PHP",
"MXN",
"BRL",
"THB",
"CLP",
"CNY",
"CZK",
"DKK",
"HUF",
"IDR",
"ILS",
"INR",
"MYR",
"NOK",
"PKR",
"SEK",
"TWD",
"ZAR",
"VND",
"BOB",
"COP",
"PEN",
"ARS",
"ISK",
]
@log_start_end(log=logger)
def display_twitter(
symbol: str = "BTC",
limit: int = 10,
sortby: str = "date",
ascend: bool = True,
export: str = "",
) -> None:
"""Prints table showing twitter timeline for given coin id. Not more than last 50 tweets [Source: CoinPaprika]
Parameters
----------
symbol: str
Cryptocurrency symbol (e.g. BTC)
limit: int
Number of records to display
sortby: str
Key by which to sort data. Every column name is valid
(see for possible values:
https://api.coinpaprika.com/docs#tag/Coins/paths/~1coins~1%7Bcoin_id%7D~1twitter/get).
ascend: bool
Flag to sort data ascending
export : str
Export dataframe data to csv,json,xlsx file
"""
df = coinpaprika_model.get_coin_twitter_timeline(symbol, sortby, ascend)
if df.empty:
console.print(f"Couldn't find any tweets for coin {symbol}", "\n")
return
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Twitter Timeline",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"twitter",
df,
)
@log_start_end(log=logger)
def display_events(
symbol: str = "BTC",
limit: int = 10,
sortby: str = "date",
ascend: bool = False,
links: bool = False,
export: str = "",
) -> None:
"""Prints table showing all events for given coin id. [Source: CoinPaprika]
Parameters
----------
symbol: str
Cryptocurrency symbol (e.g. BTC)
limit: int
Number of records to display
sortby: str
Key by which to sort data. Every column name is valid
(see for possible values:
https://api.coinpaprika.com/docs#tag/Coins/paths/~1coins~1%7Bcoin_id%7D~1events/get).
ascend: bool
Flag to sort data ascending
links: bool
Flag to display urls
export : str
Export dataframe data to csv,json,xlsx file
"""
df = coinpaprika_model.get_coin_events_by_id(symbol, sortby, ascend)
if df.empty:
console.print(f"Couldn't find any events for coin {symbol}\n")
return
df_data = df.copy()
if links is True:
df = df[["date", "name", "link"]]
else:
df.drop("link", axis=1, inplace=True)
print_rich_table(
df.head(limit), headers=list(df.columns), show_index=False, title="All Events"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"events",
df_data,
)
@log_start_end(log=logger)
def display_exchanges(
symbol: str = "btc",
limit: int = 10,
sortby: str = "adjusted_volume_24h_share",
ascend: bool = True,
export: str = "",
) -> None:
"""Prints table showing all exchanges for given coin id. [Source: CoinPaprika]
Parameters
----------
symbol: str
Cryptocurrency symbol (e.g. BTC)
limit: int
Number of records to display
sortby: str
Key by which to sort data. Every column name is valid (see for possible values:
https://api.coinpaprika.com/v1).
ascend: bool
Flag to sort data ascending
export : str
Export dataframe data to csv,json,xlsx file
"""
df = coinpaprika_model.get_coin_exchanges_by_id(symbol, sortby, ascend)
if df.empty:
console.print("No data found", "\n")
return
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="All Exchanges",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ex",
df,
)
@log_start_end(log=logger)
def display_markets(
from_symbol: str = "BTC",
to_symbol: str = "USD",
limit: int = 20,
sortby: str = "pct_volume_share",
ascend: bool = True,
links: bool = False,
export: str = "",
) -> None:
"""Prints table showing all markets for given coin id. [Source: CoinPaprika]
Parameters
----------
from_symbol: str
Cryptocurrency symbol (e.g. BTC)
to_symbol: str
Quoted currency
limit: int
Number of records to display
sortby: str
Key by which to sort data. Every column name is valid (see for possible values:
https://api.coinpaprika.com/v1).
ascend: bool
Flag to sort data ascending
links: bool
Flag to display urls
export : str
Export dataframe data to csv,json,xlsx file
"""
df = coinpaprika_model.get_coin_markets_by_id(
from_symbol, to_symbol, sortby, ascend
)
if df.empty:
console.print("There is no data \n")
return
df_data = df.copy()
if links is True:
df = df[["exchange", "pair", "trust_score", "market_url"]]
else:
df.drop("market_url", axis=1, inplace=True)
print_rich_table(
df.head(limit), headers=list(df.columns), show_index=False, title="All Markets"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"mkt",
df_data,
)
@log_start_end(log=logger)
def display_price_supply(
from_symbol: str = "BTC",
to_symbol: str = "USD",
export: str = "",
) -> None:
"""Prints table showing ticker information for single coin [Source: CoinPaprika]
Parameters
----------
from_symbol: str
Cryptocurrency symbol (e.g. BTC)
to_symbol: str
Quoted currency
export: str
Export dataframe data to csv,json,xlsx
"""
df = coinpaprika_model.get_tickers_info_for_coin(from_symbol, to_symbol)
if df.empty:
console.print("No data found", "\n")
return
df = df.applymap(lambda x: lambda_long_number_format_with_type_check(x))
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Coin Information"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ps",
df,
)
@log_start_end(log=logger)
def display_basic(
symbol: str = "BTC",
export: str = "",
) -> None:
"""Prints table showing basic information for coin. Like:
name, symbol, rank, type, description, platform, proof_type, contract, tags, parent.
[Source: CoinPaprika]
Parameters
----------
symbol: str
Cryptocurrency symbol (e.g. BTC)
export: str
Export dataframe data to csv,json,xlsx
"""
df = coinpaprika_model.basic_coin_info(symbol)
if df.empty:
console.print("No data available\n")
return
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Basic Coin Information"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"basic",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/coinpaprika_view.py | 0.652795 | 0.281035 | coinpaprika_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from openbb_terminal.common.behavioural_analysis.finbrain_model import get_sentiment
from openbb_terminal.common.behavioural_analysis.finbrain_view import (
lambda_sentiment_coloring,
)
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal import rich_config
logger = logging.getLogger(__name__)
PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
COINS_JSON = pd.read_json(PATH + "/data/finbrain_coins.json")
COINS = COINS_JSON["SYMBOL"].tolist()
except ValueError:
COINS = None
@log_start_end(log=logger)
def display_crypto_sentiment_analysis(
symbol: str,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Sentiment analysis from FinBrain for Cryptocurrencies
FinBrain collects the news headlines from 15+ major financial news
sources on a daily basis and analyzes them to generate sentiment scores
for more than 4500 US stocks. FinBrain Technologies develops deep learning
algorithms for financial analysis and prediction, which currently serves
traders from more than 150 countries all around the world.
[Source: https://finbrain.tech]
Parameters
----------
symbol: str
Cryptocurrency
raw : False
Display raw table data
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
sentiment = get_sentiment(f"{symbol}-USD") # Currently only USD pairs are available
if sentiment.empty:
console.print(f"Couldn't find Sentiment Data for {symbol}\n")
return
if not raw:
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
for index, row in sentiment.iterrows():
ax.scatter(
index, float(row["Sentiment Analysis"]), s=75, color="white", zorder=3
)
ax.axhline(y=0, linestyle="--")
ax.set_xlabel("Time")
ax.set_ylabel("Finbrain's Sentiment Score")
start_date = sentiment.index[0].strftime("%Y/%m/%d")
ax.set_title(
f"FinBrain's Sentiment Analysis for {symbol}-USD since {start_date}"
)
ax.set_ylim([-1.1, 1.1])
senValues = np.array(pd.to_numeric(sentiment["Sentiment Analysis"].values))
senNone = np.array(0 * len(sentiment))
ax.fill_between(
sentiment.index,
pd.to_numeric(sentiment["Sentiment Analysis"].values),
0,
where=(senValues < senNone),
alpha=0.30,
color=theme.down_color,
interpolate=True,
)
ax.fill_between(
sentiment.index,
pd.to_numeric(sentiment["Sentiment Analysis"].values),
0,
where=(senValues >= senNone),
alpha=0.30,
color=theme.up_color,
interpolate=True,
)
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
else:
sentiment.sort_index(ascending=True, inplace=True)
if rich_config.USE_COLOR:
console.print(
sentiment["Sentiment Analysis"]
.apply(lambda_sentiment_coloring, last_val=0)
.to_string(),
"\n",
)
else:
console.print(sentiment.to_string(), "\n")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"finbrain",
sentiment,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/finbrain_crypto_view.py | 0.780328 | 0.339171 | finbrain_crypto_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from openbb_terminal.cryptocurrency.cryptocurrency_helpers import plot_order_book
from openbb_terminal.cryptocurrency.due_diligence.binance_model import (
get_balance,
get_order_book,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_order_book(
from_symbol: str,
limit: int = 100,
to_symbol: str = "USDT",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots order book for currency. [Source: Binance]
Parameters
----------
from_symbol: str
Cryptocurrency symbol
limit: int
Limit parameter. Adjusts the weight
to_symbol: str
Quote currency (what to view coin vs)
export: str
Export dataframe data to csv,json,xlsx
external_axes : Optional[List[plt.Axes]]
External axes (1 axis is expected in the list), by default None
"""
market_book = get_order_book(from_symbol, limit, to_symbol)
bids = np.asarray(market_book["bids"], dtype=float)
asks = np.asarray(market_book["asks"], dtype=float)
bids = np.insert(bids, 2, bids[:, 1].cumsum(), axis=1)
asks = np.insert(asks, 2, np.flipud(asks[:, 1]).cumsum(), axis=1)
plot_order_book(bids, asks, to_symbol, external_axes)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"book",
market_book,
)
@log_start_end(log=logger)
def display_balance(
from_symbol: str, to_symbol: str = "USDT", export: str = ""
) -> None:
"""Prints table showing account holdings for asset. [Source: Binance]
Parameters
----------
from_symbol: str
Cryptocurrency
to_symbol: str
Cryptocurrency
export: str
Export dataframe data to csv,json,xlsx
"""
df = get_balance(from_symbol, to_symbol)
if df is None or df.empty:
console.print("[red]No data found[/red]\n")
return
total = np.sum(df["Amount"])
console.print(f"\nYou currently have {total} coins and the breakdown is:")
print_rich_table(
df, headers=df.columns, show_index=True, title="Account Holdings for Assets"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"book",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/binance_view.py | 0.872402 | 0.354908 | binance_view.py | pypi |
import logging
from datetime import datetime, timedelta
from typing import Optional
import requests
import pandas as pd
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.cryptocurrency.discovery.pycoingecko_model import read_file_data
logger = logging.getLogger(__name__)
def get_slug(symbol: str) -> str:
"""
Get Santiment slug mapping and return corresponding slug for a given coin
"""
df = pd.DataFrame(read_file_data("santiment_slugs.json"))
slug = df.loc[df["ticker"] == symbol.upper()]["slug"].values[0]
return slug
@log_start_end(log=logger)
@check_api_key(["API_SANTIMENT_KEY"])
def get_github_activity(
symbol: str,
dev_activity: bool = False,
interval: str = "1d",
start_date: Optional[str] = None,
end_date: Optional[str] = None,
) -> pd.DataFrame:
"""Returns a list of developer activity for a given coin and time interval.
[Source: https://santiment.net/]
Parameters
----------
symbol : str
Crypto symbol to check github activity
dev_activity: bool
Whether to filter only for development activity
interval : str
Interval frequency (e.g., 1d)
start_date : Optional[str]
Initial date like string (e.g., 2021-10-01)
end_date : Optional[str]
End date like string (e.g., 2021-10-01)
Returns
-------
pd.DataFrame
developer activity over time
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=365)).strftime(
"%Y-%m-%dT%H:%M:%SZ"
)
if end_date is None:
end_date = (datetime.now()).strftime("%Y-%m-%dT%H:%M:%SZ")
activity_type = "dev_activity" if dev_activity else "github_activity"
slug = get_slug(symbol)
headers = {
"Content-Type": "application/graphql",
"Authorization": f"Apikey {cfg.API_SANTIMENT_KEY}",
}
data = (
f'\n{{ getMetric(metric: "{activity_type}"){{ timeseriesData( slug: "{slug}"'
f'from: "{start_date}" to: "{end_date}" interval: "{interval}"){{ datetime value }} }} }}'
)
response = requests.post(
"https://api.santiment.net/graphql", headers=headers, data=data
)
df = pd.DataFrame()
if response.status_code == 200:
if "getMetric" in response.json()["data"]:
df = pd.DataFrame(response.json()["data"]["getMetric"]["timeseriesData"])
df["datetime"] = pd.to_datetime(df["datetime"])
df = df.set_index("datetime")
else:
console.print(f"Could not find github activity found for {symbol}\n")
elif response.status_code == 400:
if "Apikey" in response.json()["errors"]["details"]:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(f"Error in request: {response.json()['error']}", "\n")
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/santiment_model.py | 0.850701 | 0.208743 | santiment_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
import pandas as pd
from pandas.plotting import register_matplotlib_converters
import matplotlib.pyplot as plt
from openbb_terminal.cryptocurrency.cryptocurrency_helpers import plot_order_book
from openbb_terminal.cryptocurrency.due_diligence import coinbase_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def display_order_book(
symbol: str,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots a list of available currency pairs for trading. [Source: Coinbase]
Parameters
----------
symbol: str
Trading pair of coins on Coinbase e.g ETH-USDT or UNI-ETH
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
bids, asks, pair, market_book = coinbase_model.get_order_book(symbol)
plot_order_book(bids, asks, pair, external_axes)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"book",
pd.DataFrame(market_book),
)
@log_start_end(log=logger)
def display_trades(
symbol: str, limit: int = 20, side: Optional[str] = None, export: str = ""
) -> None:
"""Prints table showing last N trades for chosen trading pair. [Source: Coinbase]
Parameters
----------
symbol: str
Trading pair of coins on Coinbase e.g ETH-USDT or UNI-ETH
limit: int
Last `limit` of trades. Maximum is 1000.
side: Optional[str]
You can chose either sell or buy side. If side is not set then all trades will be displayed.
export : str
Export dataframe data to csv,json,xlsx file
"""
df = coinbase_model.get_trades(symbol, limit, side)
print_rich_table(df, headers=list(df.columns), show_index=False)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "trades", df)
@log_start_end(log=logger)
def display_candles(symbol: str, interval: str = "24hour", export: str = "") -> None:
"""Prints table showing candles for chosen trading pair and time interval. [Source: Coinbase]
Parameters
----------
symbol: str
Trading pair of coins on Coinbase e.g ETH-USDT or UNI-ETH
interval: str
Time interval. One from 1min, 5min ,15min, 1hour, 6hour, 24hour, 1day
export : str
Export dataframe data to csv,json,xlsx file
"""
df = coinbase_model.get_candles(symbol, interval)
print_rich_table(
df, headers=list(df.columns), show_index=True, title="Trading Pair Candles"
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "candles", df)
@log_start_end(log=logger)
def display_stats(symbol: str, export: str = "") -> None:
"""Prints table showing 24 hr stats for the product. Volume is in base currency units.
Open, high and low are in quote currency units. [Source: Coinbase]
Parameters
----------
symbol: str
Trading pair of coins on Coinbase e.g ETH-USDT or UNI-ETH
export : str
Export dataframe data to csv,json,xlsx file
"""
df = coinbase_model.get_product_stats(symbol)
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title=f"Coinbase:{symbol.upper()} 24 hr Product Stats",
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "stats", df) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/coinbase_view.py | 0.894453 | 0.366987 | coinbase_view.py | pypi |
import logging
import os
from typing import List, Optional
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib import ticker
from openbb_terminal.config_terminal import theme
from openbb_terminal import config_plot as cfgPlot
from openbb_terminal.cryptocurrency.due_diligence.coinglass_model import (
get_liquidations,
get_funding_rate,
get_open_interest_per_exchange,
)
from openbb_terminal.decorators import check_api_key
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
plot_autoscale,
is_valid_axes_count,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_COINGLASS_KEY"])
def display_funding_rate(symbol: str, export: str = "") -> None:
"""Plots funding rate by exchange for a certain cryptocurrency
[Source: https://coinglass.github.io/API-Reference/]
Parameters
----------
symbol : str
Crypto symbol to search funding rate (e.g., BTC)
export : str
Export dataframe data to csv,json,xlsx file"""
df = get_funding_rate(symbol)
if df.empty:
return
plot_data(df, symbol, f"Exchange {symbol} Funding Rate", "Funding Rate [%]")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"fundrate",
df,
)
@log_start_end(log=logger)
@check_api_key(["API_COINGLASS_KEY"])
def display_open_interest(symbol: str, interval: int = 0, export: str = "") -> None:
"""Plots open interest by exchange for a certain cryptocurrency
[Source: https://coinglass.github.io/API-Reference/]
Parameters
----------
symbol : str
Crypto symbol to search open interest (e.g., BTC)
interval : int
Frequency (possible values are: 0 for ALL, 2 for 1H, 1 for 4H, 4 for 12H), by default 0
export : str
Export dataframe data to csv,json,xlsx file"""
df = get_open_interest_per_exchange(symbol, interval)
if df.empty:
return
plot_data(
df,
symbol,
f"Exchange {symbol} Futures Open Interest",
"Open futures value [$B]",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"oi",
df,
)
@log_start_end(log=logger)
@check_api_key(["API_COINGLASS_KEY"])
def display_liquidations(symbol: str, export: str = "") -> None:
"""Plots liquidation per day data for a certain cryptocurrency
[Source: https://coinglass.github.io/API-Reference/#liquidation-chart]
Parameters
----------
symbol : str
Crypto symbol to search open interest (e.g., BTC)
export : str
Export dataframe data to csv,json,xlsx file"""
df = get_liquidations(symbol)
if df.empty:
return
plot_data_bar(
df,
symbol,
f"Total liquidations for {symbol}",
"Liquidations value [$M]",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"liquidations",
df,
)
@log_start_end(log=logger)
def plot_data(
df: pd.DataFrame,
symbol: str = "",
title: str = "",
ylabel: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
# This plot has 2 axes
if not external_axes:
_, axes = plt.subplots(
2, 1, sharex=True, figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI
)
(ax1, ax2) = axes
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
df_price = df[["price"]].copy()
df_without_price = df.drop("price", axis=1)
ax1.stackplot(
df_without_price.index,
df_without_price.transpose().to_numpy(),
labels=df_without_price.columns.tolist(),
)
ax1.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
ax1.legend(df_without_price.columns, fontsize="x-small", ncol=2)
if title:
ax1.set_title(title)
if ylabel:
ax1.set_ylabel(ylabel)
ax2.plot(df_price.index, df_price)
if symbol:
ax2.legend([f"{symbol} price"])
ax2.set_ylabel(f"{symbol} Price [$]")
ax2.set_xlim([df_price.index[0], df_price.index[-1]])
ax2.set_ylim(bottom=0.0)
theme.style_primary_axis(ax1)
theme.style_primary_axis(ax2)
if not external_axes:
theme.visualize_output()
@log_start_end(log=logger)
def plot_data_bar(
df: pd.DataFrame,
symbol: str = "",
title: str = "",
ylabel: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
# This plot has 2 axes
if not external_axes:
_, axes = plt.subplots(
2, 1, sharex=True, figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI
)
(ax1, ax2) = axes
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
df_price = df[["price"]].copy()
df_without_price = df.drop("price", axis=1)
df_without_price["Shorts"] = df_without_price["Shorts"] * -1
ax1.bar(
df_without_price.index,
df_without_price["Shorts"],
label="Shorts",
color=theme.down_color,
)
ax1.bar(
df_without_price.index,
df_without_price["Longs"],
label="Longs",
color=theme.up_color,
)
ax1.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
ax1.legend(df_without_price.columns, fontsize="x-small", ncol=2)
if title:
ax1.set_title(title)
if ylabel:
ax1.set_ylabel(ylabel)
ax2.plot(df_price.index, df_price)
if symbol:
ax2.legend([f"{symbol} price"])
ax2.set_ylabel(f"{symbol} Price [$]")
ax2.set_xlim([df_price.index[0], df_price.index[-1]])
ax2.set_ylim(bottom=0.0)
theme.style_primary_axis(ax1)
theme.style_primary_axis(ax2)
if not external_axes:
theme.visualize_output() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/coinglass_view.py | 0.845449 | 0.324717 | coinglass_view.py | pypi |
import logging
import os
from datetime import datetime
from typing import List, Optional
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import ticker
from matplotlib.lines import Line2D
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import check_api_key
from openbb_terminal import config_plot as cfgPlot
from openbb_terminal.cryptocurrency.due_diligence.glassnode_model import (
get_active_addresses,
get_exchange_balances,
get_exchange_net_position_change,
get_hashrate,
get_non_zero_addresses,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
is_valid_axes_count,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_GLASSNODE_KEY"])
def display_active_addresses(
symbol: str,
start_date: str = "2010-01-01",
end_date: Optional[str] = None,
interval: str = "24h",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots active addresses of a certain symbol over time
[Source: https://glassnode.org]
Parameters
----------
symbol : str
Asset to search active addresses (e.g., BTC)
start_date : str
Initial date, format YYYY-MM-DD
end_date : Optional[str]
Final date, format YYYY-MM-DD
interval : str
Interval frequency (possible values are: 24h, 1w, 1month)
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
df_addresses = get_active_addresses(symbol, interval, start_date, end_date)
if df_addresses.empty:
return
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(df_addresses.index, df_addresses["v"] / 1_000, linewidth=1.5)
ax.set_title(f"Active {symbol} addresses over time")
ax.set_ylabel("Addresses [thousands]")
ax.set_xlim(df_addresses.index[0], df_addresses.index[-1])
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"active",
df_addresses,
)
@log_start_end(log=logger)
@check_api_key(["API_GLASSNODE_KEY"])
def display_non_zero_addresses(
symbol: str,
start_date: str = "2010-01-01",
end_date: Optional[str] = None,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots addresses with non-zero balance of a certain symbol
[Source: https://glassnode.org]
Parameters
----------
symbol : str
Asset to search (e.g., BTC)
start_date : str
Initial date, format YYYY-MM-DD
end_date : Optional[str]
Final date, format YYYY-MM-DD
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
df_addresses = get_non_zero_addresses(symbol, start_date, end_date)
if df_addresses.empty:
return
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(df_addresses.index, df_addresses["v"] / 1_000, linewidth=1.5)
ax.set_title(f"{symbol} Addresses with non-zero balances")
ax.set_ylabel("Number of Addresses")
ax.set_xlim(df_addresses.index[0], df_addresses.index[-1])
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"nonzero",
df_addresses,
)
@log_start_end(log=logger)
@check_api_key(["API_GLASSNODE_KEY"])
def display_exchange_net_position_change(
symbol: str,
exchange: str = "binance",
start_date: str = "2010-01-01",
end_date: Optional[str] = None,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots 30d change of the supply held in exchange wallets.
[Source: https://glassnode.org]
Parameters
----------
symbol : str
Asset to search active addresses (e.g., BTC)
exchange : str
Exchange to check net position change (possible values are: aggregated, binance,
bittrex, coinex, gate.io, gemini, huobi, kucoin, poloniex, bibox, bigone, bitfinex,
hitbtc, kraken, okex, bithumb, zb.com, cobinhood, bitmex, bitstamp, coinbase, coincheck, luno)
start_date : str
Initial date, format YYYY-MM-DD
end_date : Optional[str]
Final date, format YYYY-MM-DD
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
df_addresses = get_exchange_net_position_change(
symbol, exchange, start_date, end_date
)
if df_addresses.empty:
return
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.fill_between(
df_addresses[df_addresses["v"] < 0].index,
df_addresses[df_addresses["v"] < 0]["v"].values / 1e3,
np.zeros(len(df_addresses[df_addresses["v"] < 0])),
facecolor=theme.down_color,
)
ax.fill_between(
df_addresses[df_addresses["v"] >= 0].index,
df_addresses[df_addresses["v"] >= 0]["v"].values / 1e3,
np.zeros(len(df_addresses[df_addresses["v"] >= 0])),
facecolor=theme.up_color,
)
ax.set_ylabel(f"30d change of {symbol} supply held in exchange wallets [thousands]")
ax.set_title(
f"{symbol}: Exchange Net Position Change - {'all exchanges' if exchange == 'aggregated' else exchange}"
)
ax.set_xlim(df_addresses.index[0], df_addresses.index[-1])
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"change",
df_addresses,
)
@log_start_end(log=logger)
@check_api_key(["API_GLASSNODE_KEY"])
def display_exchange_balances(
symbol: str,
exchange: str = "aggregated",
start_date: Optional[str] = None,
end_date: Optional[str] = None,
percentage: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots total amount of coins held on exchange addresses in units and percentage.
[Source: https://glassnode.org]
Parameters
----------
symbol : str
Asset to search active addresses (e.g., BTC)
exchange : str
Exchange to check net position change (possible values are: aggregated, binance, bittrex,
coinex, gate.io, gemini, huobi, kucoin, poloniex, bibox, bigone, bitfinex, hitbtc, kraken,
okex, bithumb, zb.com, cobinhood, bitmex, bitstamp, coinbase, coincheck, luno), by default "aggregated"
start_date : Optional[str], optional
Initial date (format YYYY-MM-DD) by default 2 years ago
end_date : Optional[str], optional
Final date (format YYYY-MM-DD) by default 1 year ago
percentage : bool
Show percentage instead of stacked value.
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.crypto.dd.eb_chart(symbol="BTC")
"""
df_balance = get_exchange_balances(symbol, exchange, start_date, end_date)
if df_balance.empty:
return
# This plot has 2 axes
if not external_axes:
_, ax1 = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
ax2 = ax1.twinx()
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
if percentage:
ax1.plot(df_balance.index, df_balance["percentage"] * 100)
else:
ax1.plot(df_balance.index, df_balance["stacked"] / 1000)
ax1.set_ylabel(f"{symbol} units [{'%' if percentage else 'thousands'}]")
ax1.set_title(
f"{symbol}: Total Balance in {'all exchanges' if exchange == 'aggregated' else exchange}"
)
ax1.tick_params(axis="x", labelrotation=10)
ax1.legend([f"{symbol} Unit"], loc="upper right")
ax2.grid(visible=False)
ax2.plot(df_balance.index, df_balance["price"], color="orange")
ax2.set_ylabel(f"{symbol} price [$]")
ax2.legend([f"{symbol} Price"], loc="upper left")
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"eb",
df_balance,
)
@log_start_end(log=logger)
@check_api_key(["API_GLASSNODE_KEY"])
def display_hashrate(
symbol: str,
start_date: str = "2010-01-01",
end_date: Optional[str] = None,
interval: str = "24h",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots dataframe with mean hashrate of btc or eth blockchain and symbol price.
[Source: https://glassnode.org]
Parameters
----------
symbol : str
Blockchain to check mean hashrate (BTC or ETH)
start_date : str
Initial date, format YYYY-MM-DD
end_date : Optional[str]
Final date, format YYYY-MM-DD
interval : str
Interval frequency (possible values are: 24, 1w, 1month)
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
df = get_hashrate(symbol, interval, start_date, end_date)
if df.empty:
return
# This plot has 2 axes
if not external_axes:
_, ax1 = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
ax2 = ax1.twinx()
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
ax1.plot(
df.index, df["hashrate"] / 1_000_000_000_000, color=theme.down_color, lw=0.8
)
ax1.yaxis.set_major_formatter(ticker.StrMethodFormatter("{x:.0f}T"))
ax1.set_ylabel(f"{symbol} hashrate (Terahashes/second)")
ax1.set_title(f"{symbol}: Mean hashrate")
ax1.tick_params(axis="x", labelrotation=10)
ax2.set_xlim(left=df.index[0])
ax2.grid(visible=False)
ax2.plot(df.index, df["price"] / 1_000, color=theme.up_color, lw=0.8)
ax2.yaxis.set_major_formatter(ticker.StrMethodFormatter("${x:.1f}k"))
ax2.set_ylabel(f"{symbol} price [$]")
# Manually construct the chart legend
lines = [
Line2D([0], [0], color=color) for color in [theme.up_color, theme.down_color]
]
labels = ["Price", "Hash Rate"]
ax2.legend(lines, labels)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"hr",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/glassnode_view.py | 0.856077 | 0.367469 | glassnode_view.py | pypi |
__docformat__ = "numpy"
# pylint:disable=unsupported-assignment-operation
import logging
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import regex as re
from pycoingecko import CoinGeckoAPI
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_replace_underscores_in_column_names,
)
from openbb_terminal.cryptocurrency.discovery.pycoingecko_model import read_file_data
from openbb_terminal.cryptocurrency.pycoingecko_helpers import (
DENOMINATION,
calc_change,
create_dictionary_with_prefixes,
filter_list,
find_discord,
remove_keys,
rename_columns_in_dct,
)
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
CHANNELS = {
"telegram_channel_identifier": "telegram",
"twitter_screen_name": "twitter",
"subreddit_url": "subreddit",
"bitcointalk_thread_identifier": "bitcointalk",
"facebook_username": "facebook",
"discord": "discord",
}
BASE_INFO = [
"id",
"name",
"symbol",
"asset_platform_id",
"description",
"contract_address",
"market_cap_rank",
"public_interest_score",
]
def format_df(df: pd.DataFrame):
df["Potential Market Cap ($)"] = df.apply(
lambda x: f"{int(x['Potential Market Cap ($)']):n}", axis=1
)
df["Current Market Cap ($)"] = df.apply(
lambda x: f"{int(x['Current Market Cap ($)']):n}", axis=1
)
return df
@log_start_end(log=logger)
def get_coin_potential_returns(
main_coin: str,
to_symbol: Union[str, None] = None,
limit: Union[int, None] = None,
price: Union[int, None] = None,
) -> pd.DataFrame:
"""Fetch data to calculate potential returns of a certain coin. [Source: CoinGecko]
Parameters
----------
main_coin : str
Coin loaded to check potential returns for (e.g., algorand)
to_symbol : str | None
Coin to compare main_coin with (e.g., bitcoin)
limit : int | None
Number of coins with highest market cap to compare main_coin with (e.g., 5)
price
Target price of main_coin to check potential returns (e.g., 5)
Returns
-------
pd.DataFrame
Potential returns data
Columns: Coin, Current Price, Target Coin, Potential Price, Potential Market Cap ($), Change (%)
"""
client = CoinGeckoAPI()
COLUMNS = [
"Coin",
"Current Price ($)",
"Current Market Cap ($)",
"Target Coin",
"Potential Price ($)",
"Potential Market Cap ($)",
"Change (%)",
]
if limit and limit > 0: # user wants to compare with top coins
data = client.get_price(
ids=f"{main_coin}",
vs_currencies="usd",
include_market_cap=True,
include_24hr_vol=False,
include_24hr_change=False,
include_last_updated_at=False,
)
top_coins_data = client.get_coins_markets(
vs_currency="usd", per_page=limit, order="market_cap_desc"
)
main_coin_data = data[main_coin]
diff_arr = []
for coin in top_coins_data:
market_cap_difference_percentage = calc_change(
coin["market_cap"], main_coin_data["usd_market_cap"]
)
future_price = main_coin_data["usd"] * (
1 + market_cap_difference_percentage / 100
)
diff_arr.append(
[
main_coin,
main_coin_data["usd"],
main_coin_data["usd_market_cap"],
coin["id"],
future_price,
coin["market_cap"],
market_cap_difference_percentage,
]
)
df = pd.DataFrame(
data=diff_arr,
columns=COLUMNS,
)
return format_df(df)
if to_symbol: # user passed a coin
data = client.get_price(
ids=f"{main_coin},{to_symbol}",
vs_currencies="usd",
include_market_cap=True,
include_24hr_vol=False,
include_24hr_change=False,
include_last_updated_at=False,
)
main_coin_data = data[main_coin]
vs_coin_data = data[to_symbol]
if main_coin_data and vs_coin_data:
market_cap_difference_percentage = calc_change(
vs_coin_data["usd_market_cap"], main_coin_data["usd_market_cap"]
)
future_price = main_coin_data["usd"] * (
1 + market_cap_difference_percentage / 100
)
df = pd.DataFrame(
data=[
[
main_coin,
main_coin_data["usd"],
main_coin_data["usd_market_cap"],
to_symbol,
future_price,
vs_coin_data["usd_market_cap"],
market_cap_difference_percentage,
]
],
columns=COLUMNS,
)
return format_df(df)
if price and price > 0: # user passed a price
data = client.get_price(
ids=main_coin,
vs_currencies="usd",
include_market_cap=True,
include_24hr_vol=False,
include_24hr_change=False,
include_last_updated_at=False,
)
main_coin_data = data[main_coin]
if main_coin_data:
final_market_cap = (
main_coin_data["usd_market_cap"] * price / main_coin_data["usd"]
)
market_cap_difference_percentage = calc_change(
final_market_cap, main_coin_data["usd_market_cap"]
)
future_price = main_coin_data["usd"] * (
1 + market_cap_difference_percentage / 100
)
df = pd.DataFrame(
data=[
[
main_coin,
main_coin_data["usd"],
main_coin_data["usd_market_cap"],
main_coin,
future_price,
final_market_cap,
market_cap_difference_percentage,
]
],
columns=COLUMNS,
)
return format_df(df)
return pd.DataFrame()
@log_start_end(log=logger)
def check_coin(symbol: str):
coins = read_file_data("coingecko_coins.json")
for coin in coins:
if coin["id"] == symbol:
return coin["id"]
if coin["symbol"] == symbol:
return coin["id"]
return None
@log_start_end(log=logger)
def get_coin_market_chart(
symbol: str = "", vs_currency: str = "usd", days: int = 30, **kwargs: Any
) -> pd.DataFrame:
"""Get prices for given coin. [Source: CoinGecko]
Parameters
----------
vs_currency: str
currency vs which display data
days: int
number of days to display the data
kwargs
unspecified keyword arguments
Returns
-------
pd.DataFrame
Prices for given coin
Columns: time, price, currency
"""
client = CoinGeckoAPI()
prices = client.get_coin_market_chart_by_id(symbol, vs_currency, days, **kwargs)
prices = prices["prices"]
df = pd.DataFrame(data=prices, columns=["time", "price"])
df["time"] = pd.to_datetime(df.time, unit="ms")
df = df.set_index("time")
df["currency"] = vs_currency
return df
@log_start_end(log=logger)
def get_coin_tokenomics(symbol: str = "") -> pd.DataFrame:
"""Get tokenomics for given coin. [Source: CoinGecko]
Parameters
----------
symbol: str
coin symbol to check tokenomics
Returns
-------
pd.DataFrame
Metric, Value with tokenomics
"""
client = CoinGeckoAPI()
coin_data = client.get_coin_by_id(symbol)
block_time = coin_data["block_time_in_minutes"]
total_supply = coin_data["market_data"]["total_supply"]
max_supply = coin_data["market_data"]["max_supply"]
circulating_supply = coin_data["market_data"]["circulating_supply"]
return pd.DataFrame(
{
"Metric": [
"Block time [min]",
"Total Supply",
"Max Supply",
"Circulating Supply",
],
"Value": [block_time, total_supply, max_supply, circulating_supply],
}
)
class Coin:
"""Coin class, it holds loaded coin"""
@log_start_end(log=logger)
def __init__(self, symbol: str, load_from_api: bool = True):
self.client = CoinGeckoAPI()
if load_from_api:
self._coin_list = self.client.get_coins_list()
else:
self._coin_list = read_file_data("coingecko_coins.json")
self.coin_symbol, self.symbol = self._validate_coin(symbol)
if self.coin_symbol:
self.coin: Dict[Any, Any] = self._get_coin_info()
else:
pass
@log_start_end(log=logger)
def __str__(self):
return f"{self.coin_symbol}"
@log_start_end(log=logger)
def _validate_coin(
self,
search_coin: str,
) -> Tuple[Optional[Any], Optional[Any]]:
"""Validate if given coin symbol or id exists in list of available coins on CoinGecko.
If yes it returns coin id. [Source: CoinGecko]
Parameters
----------
symbol: str
Either coin symbol or coin id
Returns
----------
Tuple[Optional[Any], Optional[Any]]
- str with coin
- str with symbol
"""
coin = None
symbol = None
for dct in self._coin_list:
if search_coin.lower() in [dct["symbol"], dct["id"]]:
coin = dct.get("id")
symbol = dct.get("symbol")
return coin, symbol
return None, None
@log_start_end(log=logger)
def coin_list(self) -> List[Dict[str, Any]]:
"""List all available coins [Source: CoinGecko]
Returns
----------
List[Dict[str, Any]]
list of all available coin ids
"""
return [token.get("id") for token in self._coin_list]
@log_start_end(log=logger)
def _get_coin_info(self) -> Dict[str, Any]:
"""Helper method which fetch the coin information by id from CoinGecko API like:
(name, price, market, ... including exchange tickers) [Source: CoinGecko]
Returns
----------
Dict[str, Any]
Coin information
"""
params = dict(localization="false", tickers="false", sparkline=True)
return self.client.get_coin_by_id(self.coin_symbol, **params)
@log_start_end(log=logger)
def _get_links(self) -> Dict[str, Any]:
"""Helper method that extracts links from coin [Source: CoinGecko]
Returns
----------
Dict[str, Any]
Links related to coin
"""
return self.coin.get("links", {})
@log_start_end(log=logger)
def get_repositories(self) -> Optional[Dict[str, Any]]:
"""Get list of all repositories for given coin [Source: CoinGecko]
Returns
----------
Dict[str, Any]
Repositories related to coin
"""
return self._get_links().get("repos_url")
@log_start_end(log=logger)
def get_developers_data(self) -> pd.DataFrame:
"""Get coin development data from GitHub or BitBucket like:
number of pull requests, contributor etc [Source: CoinGecko]
Returns
----------
pd.DataFrame
Developers Data
Columns: Metric, Value
"""
dev = self.coin.get("developer_data", {})
useless_keys = (
"code_additions_deletions_4_weeks",
"last_4_weeks_commit_activity_series",
)
remove_keys(useless_keys, dev)
df = pd.Series(dev).to_frame().reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
return df[df["Value"].notna()]
@log_start_end(log=logger)
def get_blockchain_explorers(self) -> Union[pd.DataFrame, Any]:
"""Get list of URLs to blockchain explorers for given coin. [Source: CoinGecko]
Returns
----------
pd.DataFrame
Blockchain Explorers
Columns: Metric, Value
"""
blockchain = self._get_links().get("blockchain_site")
if blockchain:
dct = filter_list(blockchain)
df = pd.Series(dct).to_frame().reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
return df[df["Value"].notna()]
return None
@log_start_end(log=logger)
def get_social_media(self) -> pd.DataFrame:
"""Get list of URLs to social media like twitter, facebook, reddit... [Source: CoinGecko]
Returns
----------
pd.DataFrame
Urls to social media
Columns: Metric, Value
"""
social_dct = {}
links = self._get_links()
for (
channel
) in CHANNELS.keys(): # pylint: disable=consider-iterating-dictionary)
if channel in links:
value = links.get(channel, "")
if channel == "twitter_screen_name":
value = "https://twitter.com/" + value
elif channel == "bitcointalk_thread_identifier" and value is not None:
value = f"https://bitcointalk.org/index.php?topic={value}"
social_dct[channel] = value
social_dct["discord"] = find_discord(links.get("chat_url"))
dct = rename_columns_in_dct(social_dct, CHANNELS)
df = pd.Series(dct).to_frame().reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
return df[df["Value"].notna()]
@log_start_end(log=logger)
def get_websites(self) -> pd.DataFrame:
"""Get list of URLs to websites like homepage of coin, forum. [Source: CoinGecko]
Returns
----------
pd.DataFrame
Urls to website, homepage, forum
Columns: Metric, Value
"""
websites_dct = {}
links = self._get_links()
sites = ["homepage", "official_forum_url", "announcement_url"]
for site in sites:
websites_dct[site] = filter_list(links.get(site))
df = pd.Series(websites_dct).to_frame().reset_index()
df.columns = ["Metric", "Value"]
df["Value"] = df["Value"].apply(lambda x: ",".join(x))
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
return df[df["Value"].notna()]
@log_start_end(log=logging)
def get_categories(self) -> Union[Dict[Any, Any], List[Any]]:
"""Coins categories. [Source: CoinGecko]
Returns
----------
Union[Dict[Any, Any], List[Any]]
Coin categories
"""
return self.coin.get("categories", {})
@log_start_end(log=logger)
def _get_base_market_data_info(self) -> Union[Dict[str, Any], Any]:
"""Helper method that fetches all the base market/price information about given coin. [Source: CoinGecko]
Returns
----------
Dict[str, Any]
All market related information for given coin
"""
market_dct = {}
market_data = self.coin.get("market_data", {})
for stat in [
"total_supply",
"max_supply",
"circulating_supply",
"price_change_percentage_24h",
"price_change_percentage_7d",
"price_change_percentage_30d",
]:
market_dct[stat] = market_data.get(stat)
prices = create_dictionary_with_prefixes(
["current_price"], market_data, DENOMINATION
)
market_dct.update(prices)
return market_dct
@log_start_end(log=logger)
def get_base_info(self) -> pd.DataFrame:
"""Get all the base information about given coin. [Source: CoinGecko]
Returns
----------
pd.DataFrame
Base information about coin
"""
regx = r'<a href="(.+?)">|</a>'
results = {}
for attr in BASE_INFO:
info_obj = self.coin.get(attr, {})
if attr == "description":
info_obj = info_obj.get("en")
info_obj = re.sub(regx, "", info_obj)
info_obj = re.sub(r"\r\n\r\n", " ", info_obj)
results[attr] = info_obj
results.update(self._get_base_market_data_info())
df = pd.Series(results).to_frame().reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
return df[df["Value"].notna()]
@log_start_end(log=logger)
def get_market_data(self) -> pd.DataFrame:
"""Get all the base market information about given coin. [Source: CoinGecko]
Returns
----------
pd.DataFrame
Base market information about coin
Metric,Value
"""
market_data = self.coin.get("market_data", {})
market_columns_denominated = [
"market_cap",
"fully_diluted_valuation",
"total_volume",
"high_24h",
"low_24h",
]
denominated_data = create_dictionary_with_prefixes(
market_columns_denominated, market_data, DENOMINATION
)
market_single_columns = [
"market_cap_rank",
"total_supply",
"max_supply",
"circulating_supply",
"price_change_percentage_24h",
"price_change_percentage_7d",
"price_change_percentage_30d",
"price_change_percentage_60d",
"price_change_percentage_1y",
"market_cap_change_24h",
]
single_stats = {col: market_data.get(col) for col in market_single_columns}
single_stats.update(denominated_data)
if (
(single_stats["total_supply"] is not None)
and (single_stats["circulating_supply"] is not None)
and (single_stats["total_supply"] != 0)
):
single_stats["circulating_supply_to_total_supply_ratio"] = (
single_stats["circulating_supply"] / single_stats["total_supply"]
)
else:
single_stats["circulating_supply_to_total_supply_ratio"] = np.nan
df = pd.Series(single_stats).to_frame().reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
return df[df["Value"].notna()]
@log_start_end(log=logger)
def get_all_time_high(self, currency: str = "usd") -> pd.DataFrame:
"""Get all time high data for given coin. [Source: CoinGecko]
Returns
----------
pd.DataFrame
All time high price data
Metric,Value
"""
market_data = self.coin.get("market_data", {})
if market_data == {}:
return pd.DataFrame()
ath_columns = [
"current_price",
"ath",
"ath_date",
"ath_change_percentage",
]
results = {column: market_data[column].get(currency) for column in ath_columns}
df = pd.Series(results).to_frame().reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
df["Metric"] = df["Metric"].apply(lambda x: x.replace("Ath", "All Time High"))
df["Metric"] = df["Metric"] + f" {currency.upper()}"
return df[df["Value"].notna()]
@log_start_end(log=logger)
def get_all_time_low(self, currency: str = "usd") -> pd.DataFrame:
"""Get all time low data for given coin. [Source: CoinGecko]
Returns
----------
pd.DataFrame
All time low price data
Metric,Value
"""
market_data = self.coin.get("market_data", {})
if market_data == {}:
return pd.DataFrame()
ath_columns = [
"current_price",
"atl",
"atl_date",
"atl_change_percentage",
]
results = {column: market_data[column].get(currency) for column in ath_columns}
df = pd.Series(results).to_frame().reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
df["Metric"] = df["Metric"].apply(lambda x: x.replace("Atl", "All Time Low"))
df["Metric"] = df["Metric"] + f" {currency.upper()}"
return df[df["Value"].notna()]
@log_start_end(log=logger)
def get_scores(self) -> pd.DataFrame:
"""Get different kind of scores for given coin. [Source: CoinGecko]
Returns
----------
pd.DataFrame
Social, community, sentiment scores for coin
Metric,Value
"""
score_columns = [
"coingecko_rank",
"coingecko_score",
"developer_score",
"community_score",
"liquidity_score",
"sentiment_votes_up_percentage",
"sentiment_votes_down_percentage",
"public_interest_score",
"community_data",
"public_interest_stats",
]
single_stats = {col: self.coin.get(col) for col in score_columns[:-2]}
nested_stats = {}
for col in score_columns[-2:]:
_dct = self.coin.get(col, {})
for k, _ in _dct.items():
nested_stats[k] = _dct.get(k, {})
single_stats.update(nested_stats)
df = pd.Series(single_stats).reset_index()
df.replace({0: ""}, inplace=True)
df = df.fillna("")
df.columns = ["Metric", "Value"]
# pylint: disable=unsupported-assignment-operation
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
return df[df["Value"].notna()]
@log_start_end(log=logger)
def get_coin_market_chart(
self, vs_currency: str = "usd", days: int = 30, **kwargs: Any
) -> pd.DataFrame:
"""Get prices for given coin. [Source: CoinGecko]
Parameters
----------
vs_currency: str
currency vs which display data
days: int
number of days to display the data
kwargs
Returns
----------
pd.DataFrame
Prices for given coin
Columns: time, price, currency
"""
prices = self.client.get_coin_market_chart_by_id(
self.coin_symbol, vs_currency, days, **kwargs
)
prices = prices["prices"]
df = pd.DataFrame(data=prices, columns=["time", "price"])
df["time"] = pd.to_datetime(df.time, unit="ms")
df = df.set_index("time")
df["currency"] = vs_currency
return df
@log_start_end(log=logger)
def get_ohlc(self, vs_currency: str = "usd", days: int = 90) -> pd.DataFrame:
"""Get Open, High, Low, Close prices for given coin. [Source: CoinGecko]
Parameters
----------
vs_currency: str
currency vs which display data
days: int
number of days to display the data
on from (1/7/14/30/90/180/365, max)
Returns
----------
pd.DataFrame
OHLC data for coin
Columns: time, price, currency
"""
prices = self.client.get_coin_ohlc_by_id(self.coin_symbol, vs_currency, days)
df = pd.DataFrame(data=prices, columns=["time", "open", "high", "low", "close"])
df["time"] = pd.to_datetime(df.time, unit="ms")
df = df.set_index("time")
df["currency"] = vs_currency
return df
@log_start_end(log=logger)
def get_ohlc(symbol: str, vs_currency: str = "usd", days: int = 90) -> pd.DataFrame:
"""Get Open, High, Low, Close prices for given coin. [Source: CoinGecko]
Parameters
----------
vs_currency: str
currency vs which display data
days: int
number of days to display the data
on from (1/7/14/30/90/180/365, max)
Returns
-------
pd.DataFrame
OHLC data for coin
Columns: time, price, currency
"""
client = CoinGeckoAPI()
prices = client.get_coin_ohlc_by_id(symbol, vs_currency, days)
df = pd.DataFrame(data=prices, columns=["date", "Open", "High", "Low", "Close"])
df["date"] = pd.to_datetime(df.date, unit="ms")
df = df.set_index("date")
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/pycoingecko_model.py | 0.834407 | 0.194425 | pycoingecko_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Any, Optional, Tuple
import numpy as np
import pandas as pd
from openbb_terminal.cryptocurrency.coinbase_helpers import (
check_validity_of_product,
make_coinbase_request,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def show_available_pairs_for_given_symbol(symbol: str = "ETH") -> Tuple[str, list]:
"""Return all available quoted assets for given symbol. [Source: Coinbase]
Parameters
----------
symbol: str
Uppercase symbol of coin e.g BTC, ETH, UNI, LUNA, DOT ...
Returns
-------
Tuple[str, list]
Symbol and list of available pairs
"""
pairs = make_coinbase_request("/products")
df = pd.DataFrame(pairs)[["base_currency", "quote_currency"]]
if not isinstance(symbol, str):
console.print(
f"You did not provide correct symbol {symbol}. Symbol needs to be a string.\n"
)
return symbol, []
coin_df = df[df["base_currency"] == symbol.upper()]
return symbol, coin_df["quote_currency"].to_list()
@log_start_end(log=logger)
def get_trading_pair_info(symbol: str) -> pd.DataFrame:
"""Get information about chosen trading pair. [Source: Coinbase]
Parameters
----------
symbol: str
Trading pair of coins on Coinbase e.g ETH-USDT or UNI-ETH
Returns
-------
pd.DataFrame
Basic information about given trading pair
"""
product_id = check_validity_of_product(symbol)
pair = make_coinbase_request(f"/products/{product_id}")
df = pd.Series(pair).to_frame().reset_index()
df.columns = ["Metric", "Value"]
console.print(df)
return df
@log_start_end(log=logger)
def get_order_book(symbol: str) -> Tuple[np.ndarray, np.ndarray, str, dict]:
"""Get orders book for chosen trading pair. [Source: Coinbase]
Parameters
----------
symbol: str
Trading pair of coins on Coinbase e.g ETH-USDT or UNI-ETH
Returns
-------
Tuple[np.array, np.array, str, dict]
array with bid prices, order sizes and cumulative order sizes
array with ask prices, order sizes and cumulative order sizes
trading pair
dict with raw data
"""
# TODO: Order price > current price. E.g current price 200 USD, sell order with 10000 USD
# makes chart look very ugly (bad scaling). Think about removing outliers or add log scale ?
product_id = check_validity_of_product(symbol)
market_book = make_coinbase_request(f"/products/{product_id}/book?level=2")
size = min(
len(market_book["bids"]), len(market_book["asks"])
) # arrays needs to have equal size.
market_book["bids"] = market_book["bids"][:size]
market_book["asks"] = market_book["asks"][:size]
market_book.pop("sequence")
bids = np.asarray(market_book["bids"], dtype=float)[:size]
asks = np.asarray(market_book["asks"], dtype=float)[:size]
bids = np.insert(bids, 3, (bids[:, 1] * bids[:, 2]).cumsum(), axis=1)
asks = np.insert(asks, 3, np.flipud(asks[:, 1] * asks[:, 2]).cumsum(), axis=1)
bids = np.delete(bids, 2, axis=1)
asks = np.delete(asks, 2, axis=1)
return bids, asks, product_id, market_book
@log_start_end(log=logger)
def get_trades(
symbol: str, limit: int = 1000, side: Optional[Any] = None
) -> pd.DataFrame:
"""Get last N trades for chosen trading pair. [Source: Coinbase]
Parameters
----------
symbol: str
Trading pair of coins on Coinbase e.g ETH-USDT or UNI-ETH
limit: int
Last `limit` of trades. Maximum is 1000.
side: str
You can chose either sell or buy side. If side is not set then all trades will be displayed.
Returns
-------
pd.DataFrame
Last N trades for chosen trading pairs.
"""
params = {"limit": limit}
if side is not None and side in ["buy", "sell"]:
params["side"] = side
product_id = check_validity_of_product(symbol)
product = make_coinbase_request(f"/products/{product_id}/trades", params=params)
return pd.DataFrame(product)[["time", "price", "size", "side"]]
@log_start_end(log=logger)
def get_candles(symbol: str, interval: str = "24hour") -> pd.DataFrame:
"""Get candles for chosen trading pair and time interval. [Source: Coinbase]
Parameters
----------
symbol: str
Trading pair of coins on Coinbase e.g ETH-USDT or UNI-ETH
interval: str
Time interval. One from 1min, 5min ,15min, 1hour, 6hour, 24hour, 1day
Returns
-------
pd.DataFrame
Candles for chosen trading pair.
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.crypto.dd.candle(symbol="eth-usdt", interval="24hour")
"""
interval_map = {
"1min": 60,
"5min": 300,
"15min": 900,
"1hour": 3600,
"6hour": 21600,
"24hour": 86400,
"1day": 86400,
}
if interval not in interval_map:
console.print(
f"Wrong interval. Please use on from {list(interval_map.keys())}\n"
)
return pd.DataFrame()
granularity: int = interval_map[interval]
product_id = check_validity_of_product(symbol)
candles = make_coinbase_request(
f"/products/{product_id}/candles", params={"granularity": granularity}
)
df = pd.DataFrame(candles)
df.columns = [
"date",
"Low",
"High",
"Open",
"Close",
"Volume",
]
return df[
[
"date",
"Open",
"High",
"Low",
"Close",
"Volume",
]
]
@log_start_end(log=logger)
def get_product_stats(symbol: str) -> pd.DataFrame:
"""Get 24 hr stats for the product. Volume is in base currency units.
Open, high and low are in quote currency units. [Source: Coinbase]
Parameters
----------
symbol: str
Trading pair of coins on Coinbase e.g ETH-USDT or UNI-ETH
Returns
-------
pd.DataFrame
24h stats for chosen trading pair
"""
product_id = check_validity_of_product(symbol)
product = make_coinbase_request(f"/products/{product_id}/stats")
df = pd.Series(product).reset_index()
df.columns = ["Metric", "Value"]
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/coinbase_model.py | 0.892773 | 0.348396 | coinbase_model.py | pypi |
__docformat__ = "numpy"
# pylint: disable=C0201
import logging
import os
from datetime import datetime, timedelta
from typing import List, Optional
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import ticker
from matplotlib import dates as mdates
from openbb_terminal.config_terminal import theme
from openbb_terminal import feature_flags as obbff
from openbb_terminal.cryptocurrency import cryptocurrency_helpers
from openbb_terminal.decorators import check_api_key
from openbb_terminal import config_plot as cfgPlot
from openbb_terminal.cryptocurrency.due_diligence.messari_model import (
get_available_timeseries,
get_fundraising,
get_governance,
get_investors,
get_links,
get_marketcap_dominance,
get_messari_timeseries,
get_project_product_info,
get_roadmap,
get_team,
get_tokenomics,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal.cryptocurrency.dataframe_helpers import prettify_paragraph
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_messari_timeseries_list(
limit: int = 10,
query: str = "",
only_free: bool = True,
export: str = "",
) -> None:
"""Prints table showing messari timeseries list
[Source: https://messari.io/]
Parameters
----------
limit : int
number to show
query : str
Query to search across all messari timeseries
only_free : bool
Display only timeseries available for free
export : str
Export dataframe data to csv,json,xlsx file
"""
df = get_available_timeseries(only_free)
if not df.empty:
if query:
mask = np.column_stack(
[
df[col].str.contains(query, na=False, regex=False, case=False)
for col in ["Title", "Description"]
]
)
df = df.loc[mask.any(axis=1)]
if df.empty:
console.print(f"\nNo timeseries found with query {query}\n")
else:
print_rich_table(
df.head(limit),
index_name="ID",
headers=list(df.columns),
show_index=True,
title="Messari Timeseries",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"mt",
df,
)
else:
console.print("\nUnable to retrieve data from Messari.\n")
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_messari_timeseries(
symbol: str,
timeseries_id: str,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
interval: str = "1d",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots messari timeseries
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check market cap dominance
timeseries_id: str
Obtained by api.crypto.dd.get_mt command
start_date : Optional[str]
Initial date like string (e.g., 2021-10-01)
end_date : Optional[str]
End date like string (e.g., 2021-10-01)
interval : str
Interval frequency (possible values are: 5m, 15m, 30m, 1h, 1d, 1w)
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=365)).strftime("%Y-%m-%d")
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
df, title = get_messari_timeseries(
symbol=symbol,
timeseries_id=timeseries_id,
start_date=start_date,
end_date=end_date,
interval=interval,
)
if not df.empty:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
ax.plot(df.index, df[df.columns[0]])
ax.set_title(f"{symbol}'s {title}")
ax.set_ylabel(title)
ax.set_xlim(df.index[0], df.index[-1])
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"mt",
df,
)
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_marketcap_dominance(
symbol: str,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
interval: str = "1d",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots market dominance of a coin over time
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check market cap dominance
start_date : Optional[str]
Initial date like string (e.g., 2021-10-01)
end_date : Optional[str]
End date like string (e.g., 2021-10-01)
interval : str
Interval frequency (possible values are: 5m, 15m, 30m, 1h, 1d, 1w)
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=365)).strftime("%Y-%m-%d")
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
df = get_marketcap_dominance(
symbol=symbol, start_date=start_date, end_date=end_date, interval=interval
)
if not df.empty:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(df.index, df["marketcap_dominance"])
ax.set_title(f"{symbol}'s Market Cap Dominance over time")
ax.set_ylabel(f"{symbol} Percentage share")
ax.set_xlim(df.index[0], df.index[-1])
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"mcapdom",
df,
)
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_links(symbol: str, export: str = "") -> None:
"""Prints table showing coin links
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check links
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = get_links(symbol)
if not df.empty:
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title=f"{symbol} Links",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"links",
df,
)
else:
console.print("\nUnable to retrieve data from Messari.\n")
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_roadmap(
symbol: str,
ascend: bool = True,
limit: int = 5,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots coin roadmap
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check roadmap
ascend: bool
reverse order
limit : int
number to show
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = get_roadmap(symbol, ascend)
if not df.empty:
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title=f"{symbol} Roadmap",
)
df_prices, _ = cryptocurrency_helpers.load_yf_data(
symbol=symbol,
currency="USD",
days=4380,
interval="1d",
)
if not df_prices.empty:
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
roadmap_dates = np.array(
pd.to_datetime(df["Date"], format="%Y-%m-%d", errors="coerce")
)
df_copy = df
df_copy["Date"] = pd.to_datetime(
df_copy["Date"], format="%Y-%m-%d", errors="coerce"
)
df_copy = df_copy[df_copy["Date"].notnull()]
titles = list(df_copy[df_copy["Date"] > df_prices.index[0]]["Title"])
roadmap_dates = mdates.date2num(roadmap_dates)
counter = 0
max_price = df_prices["Close"].max()
for x in roadmap_dates:
if x > mdates.date2num(df_prices.index[0]):
ax.text(
x,
max_price * 0.7,
titles[counter],
rotation=-90,
verticalalignment="center",
size=6,
)
counter += 1
ax.vlines(
x=roadmap_dates,
color="orange",
ymin=0,
ymax=max_price,
)
ax.plot(df_prices.index, df_prices["Close"].values)
ax.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
ax.set_title(f"{symbol.upper()} Price and Roadmap")
ax.set_ylabel("Price [$]")
ax.set_xlim(df_prices.index[0], df_prices.index[-1])
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"rm",
df,
)
else:
console.print("\nUnable to retrieve data from Messari.\n")
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_tokenomics(
symbol: str,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots coin tokenomics
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check tokenomics
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
coingecko_id = cryptocurrency_helpers.get_coingecko_id(symbol)
df, circ_df = get_tokenomics(symbol, coingecko_id)
if not df.empty and not circ_df.empty:
df = df.applymap(lambda x: lambda_long_number_format(x, 2))
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title=f"{symbol} Tokenomics",
)
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
ax2 = ax.twinx()
elif is_valid_axes_count(external_axes, 2):
(ax, ax2) = external_axes
else:
return
df_prices, _ = cryptocurrency_helpers.load_yf_data(
symbol=symbol,
currency="USD",
days=4380,
interval="1d",
)
merged_df = pd.concat([circ_df, df_prices], axis=1)
color_palette = theme.get_colors()
ax.plot(
merged_df.index,
merged_df["circulating_supply"],
color=color_palette[0],
label="Circ Supply",
)
ax.plot(np.nan, label="Price", color=color_palette[1])
if not df_prices.empty:
ax2.plot(merged_df.index, merged_df["Close"], color=color_palette[1])
ax2.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
ax2.set_ylabel(f"{symbol} price [$]")
theme.style_twin_axis(ax2)
ax2.yaxis.set_label_position("right")
ax.legend()
ax.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
ax.set_title(f"{symbol} circulating supply over time")
ax.set_ylabel("Number of tokens")
ax.set_xlim(merged_df.index[0], merged_df.index[-1])
theme.style_primary_axis(ax)
ax.yaxis.set_label_position("left")
ax.legend()
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"tk",
df,
)
else:
console.print("\nUnable to retrieve data from Messari.\n")
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_project_info(
symbol: str,
export: str = "",
) -> None:
"""Prints table showing project info
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check project info
export : str
Export dataframe data to csv,json,xlsx file
"""
(df_info, df_repos, df_audits, df_vulns) = get_project_product_info(symbol)
for df, title in zip(
[df_info, df_repos, df_audits, df_vulns],
["General Info", "Public Repos", "Audits", "Vulnerabilities"],
):
if not df.empty:
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title=f"{symbol} {title}",
)
else:
console.print(f"\n{title} not found\n")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"pi",
df_info,
)
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_investors(
symbol: str,
export: str = "",
) -> None:
"""Prints table showing coin investors
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check coin investors
export : str
Export dataframe data to csv,json,xlsx file
"""
(df_individuals, df_organizations) = get_investors(symbol)
if not df_individuals.empty or not df_organizations.empty:
if not df_individuals.empty:
print_rich_table(
df_individuals,
headers=list(df_individuals.columns),
show_index=False,
title=f"{symbol} Investors - Individuals",
)
else:
console.print("\nIndividual investors not found\n")
if not df_organizations.empty:
print_rich_table(
df_organizations,
headers=list(df_organizations.columns),
show_index=False,
title=f"{symbol} Investors - Organizations",
)
else:
console.print("\nInvestors - Organizations not found\n")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"inv",
df_individuals,
)
else:
console.print("\nInvestors not found\n")
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_team(
symbol: str,
export: str = "",
) -> None:
"""Prints table showing coin team
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check coin team
export : str
Export dataframe data to csv,json,xlsx file
"""
(df_individuals, df_organizations) = get_team(symbol)
if not df_individuals.empty or not df_organizations.empty:
if not df_individuals.empty:
print_rich_table(
df_individuals,
headers=list(df_individuals.columns),
show_index=False,
title=f"{symbol} Team - Individuals",
)
else:
console.print("\nIndividual team members not found\n")
if not df_organizations.empty:
print_rich_table(
df_organizations,
headers=list(df_organizations.columns),
show_index=False,
title=f"{symbol} Team - Organizations",
)
else:
console.print("\nTeam organizations not found\n")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"team",
df_individuals,
)
else:
console.print("\nTeam not found\n")
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_governance(
symbol: str,
export: str = "",
) -> None:
"""Prints table showing coin governance
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check coin governance
export : str
Export dataframe data to csv,json,xlsx file
"""
(summary, df) = get_governance(symbol)
if summary:
summary = prettify_paragraph(summary)
console.print(summary, "\n")
if not df.empty:
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title=f"{symbol} Governance details",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"gov",
df,
)
else:
console.print(f"\n{symbol} governance details not found\n")
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_fundraising(
symbol: str,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Display coin fundraising
[Source: https://messari.io/]
Parameters
----------
symbol : str
Crypto symbol to check coin fundraising
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
(summary, df_sales_rounds, df_treasury_accs, df_details) = get_fundraising(symbol)
if summary:
summary = prettify_paragraph(summary)
console.print(summary, "\n")
if not df_sales_rounds.empty:
df_sales_rounds = df_sales_rounds.applymap(
lambda x: lambda_long_number_format(x, 2)
)
print_rich_table(
df_sales_rounds,
headers=list(df_sales_rounds.columns),
show_index=False,
title=f"{symbol} Sales Rounds",
)
else:
console.print("\nSales rounds not found\n")
if not df_treasury_accs.empty:
print_rich_table(
df_treasury_accs,
headers=list(df_treasury_accs.columns),
show_index=False,
title=f"{symbol} Treasury Accounts",
)
else:
console.print("\nTreasury accounts not found\n")
if not df_details.empty:
values = []
labels = []
investors = df_details.loc[df_details["Metric"] == "Investors [%]"][
"Value"
].item()
founders = df_details.loc[df_details["Metric"] == "Organization/Founders [%]"][
"Value"
].item()
airdrops = (
df_details.loc[df_details["Metric"] == "Rewards/Airdrops [%]"][
"Value"
].item(),
)
if isinstance(investors, (int, float)) and investors > 0:
values.append(investors)
labels.append("Investors")
if isinstance(founders, (int, float)) and founders > 0:
values.append(founders)
labels.append("Organization/Founders")
if isinstance(airdrops[0], (int, float)) and airdrops[0] > 0:
values.append(airdrops[0])
labels.append("Rewards/Airdrops")
if len(values) > 0 and sum(values) > 0:
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
ax.pie(
[s / 100 for s in values],
normalize=False,
labels=labels,
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
autopct="%1.0f%%",
startangle=90,
colors=theme.get_colors()[1:4],
)
ax.set_title(f"{symbol} Fundraising Distribution")
if obbff.USE_ION:
plt.ion()
plt.show()
df_details.fillna("-", inplace=True)
print_rich_table(
df_details,
headers=list(df_details.columns),
show_index=False,
title=f"{symbol} Fundraising Details",
)
else:
console.print("\nFundraising details not found\n")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"fr",
df_details,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/messari_view.py | 0.799638 | 0.240736 | messari_view.py | pypi |
import logging
import os
from typing import List, Optional
from matplotlib import pyplot as plt
from openbb_terminal import config_terminal as cfg
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.cryptocurrency.overview.tokenterminal_model import (
get_fundamental_metrics,
METRICS,
TIMELINES,
CATEGORIES,
)
from openbb_terminal.decorators import check_api_key
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_TOKEN_TERMINAL_KEY"])
def display_fundamental_metrics(
metric: str,
category: str = "",
timeline: str = "24h",
ascend: bool = False,
limit: int = 10,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display fundamental metrics [Source: Token Terminal]
Parameters
----------
metric : str
The metric of interest. See `get_possible_metrics()` for available metrics.
category : str
The category of interest. See `get_possible_categories()` for available categories.
The default value is an empty string which means that all categories are considered.
timeline : str
The category of interest. See `get_possible_timelines()` for available timelines.
ascend : bool
Direction of the sort. If True, the data is sorted in ascending order.
limit : int
The number of rows to display.
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
if metric not in METRICS:
console.print(
"[red]Invalid metric selected. See available metrics with get_possible_metrics()[/red]\n"
)
return
if category not in CATEGORIES and category != "":
console.print(
"[red]Invalid category selected. See available categories with get_possible_categories()[/red]\n"
)
return
if timeline not in TIMELINES:
console.print(
"[red]Invalid timeline selected. See available timelines with get_possible_timelines()[/red]\n"
)
return
metric_series = get_fundamental_metrics(
metric=metric, category=category, timeline=timeline, ascend=ascend
)
if metric_series.empty:
console.print("\n[/red]No data found[/red]\n")
else:
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
num = max(metric_series[:limit].values)
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
ax.bar(
metric_series[:limit].index,
metric_series[:limit].values
if magnitude == 0
else metric_series[:limit].values / (1000.0**magnitude),
color=cfg.theme.get_colors(reverse=True)[:limit],
)
if category:
ax.set_xlabel(category)
else:
ax.set_xlabel("Dapps and Blockchains")
if metric == "twitter_followers":
if max(metric_series[:limit].values) < 10_000:
labeltouse = "Followers"
else:
labeltouse = f"[1{' KMBTP'[magnitude]}] Followers"
else:
if max(metric_series[:limit].values) < 10_000:
labeltouse = "[USD]"
else:
labeltouse = f"[1{' KMBTP'[magnitude]} USD]"
ax.set_ylabel(f"{metric.replace('_', ' ').capitalize()} {labeltouse}")
ax.set_title(f"{metric.replace('_', ' ').capitalize()} from past {timeline}")
plt.xticks(rotation=45)
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"fun",
metric_series,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/tokenterminal_view.py | 0.893036 | 0.366278 | tokenterminal_view.py | pypi |
import logging
import math
from typing import Any, List
import pandas as pd
import requests
from bs4 import BeautifulSoup
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
logger = logging.getLogger(__name__)
POSSIBLE_CRYPTOS = [
"bitcoin",
"ethereum",
"binance-coin",
"tether",
"solana",
"cardano",
"usd-coin",
"xrp",
"polkadot",
"terra",
"dogecoin",
"avalanche",
"shiba-inu",
"polygon",
"crypto-com-coin",
"binance-usd",
"wrapped-bitcoin",
"litecoin",
"algorand",
"chainlink",
"tron",
"dai",
"bitcoin-cash",
"terrausd",
"uniswap",
"stellar",
"axie-infinity",
"okb",
"cosmos",
"lido-staked-ether",
"vechain",
"ftx-token",
"elrond",
"internet-computer",
"filecoin",
"decentraland",
"ethereum-classic",
"hedera",
"the-sandbox",
"theta-network",
"fantom",
"near",
"magic-internet-money",
"gala",
"bittorrent",
"monero",
"tezos",
"klaytn",
"the-graph",
"leo-token",
"iota",
"helium",
"flow",
"eos",
"radix",
"loopring",
"bitcoin-sv",
"pancakeswap",
"olympus",
"enjin-coin",
"kusama",
"amp",
"aave",
"stacks",
"ecash",
"maker",
"arweave",
"quant",
"thorchain",
"harmony",
"zcash",
"neo",
"bitcoin-cash-abc",
"basic-attention-token",
"waves",
"kadena",
"theta-fuel",
"holo",
"chiliz",
"kucoin-token",
"celsius-network",
"curve-dao-token",
"dash",
"marinade-staked-sol",
"nexo",
"compound",
"celo",
"huobi-token",
"wonderland",
"frax",
"decred",
"trueusd",
"ecomi",
"e-radix",
"spell-token",
"mina-protocol",
"nem",
"qtum",
"sushi",
"synthetix-network-token",
]
@log_start_end(log=logger)
def get_overall_withdrawal_fees(limit: int = 100) -> pd.DataFrame:
"""Scrapes top coins withdrawal fees
[Source: https://withdrawalfees.com/]
Parameters
----------
limit: int
Number of coins to search, by default n=100, one page has 100 coins, so 1 page is scraped.
Returns
-------
pd.DataFrame
Coin, Lowest, Average, Median, Highest, Exchanges Compared
"""
COINS_PER_PAGE = 100
withdrawal_fees_homepage = BeautifulSoup(
requests.get(
"https://withdrawalfees.com/",
headers={"User-Agent": get_user_agent()},
).text,
"lxml",
)
table = withdrawal_fees_homepage.find_all("table")
tickers_html = withdrawal_fees_homepage.find_all("div", {"class": "name"})
if table is None or tickers_html is None:
return pd.DataFrame()
df = pd.read_html(str(table))[0]
df["Coin"] = [ticker.text for ticker in tickers_html]
df["Lowest"] = df["Lowest"].apply(
lambda x: f'{x[:x.index(".")+3]} ({x[x.index(".")+3:]})'
if "." in x and isinstance(x, str)
else x
)
num_pages = int(math.ceil(limit / COINS_PER_PAGE))
if num_pages > 1:
for idx in range(2, num_pages + 1):
withdrawal_fees_homepage = BeautifulSoup(
requests.get(
f"https://withdrawalfees.com/coins/page/{idx}",
headers={"User-Agent": get_user_agent()},
).text,
"lxml",
)
table = withdrawal_fees_homepage.find_all("table")
tickers_html = withdrawal_fees_homepage.find_all("div", {"class": "name"})
if table is not None and tickers_html is not None:
new_df = pd.read_html(str(table))[0]
new_df["Highest"] = new_df["Highest"].apply(
lambda x: f'{x[:x.index(".")+3]} ({x[x.index(".")+3:]})'
if "." in x
else x
)
new_df["Coin"] = [ticker.text for ticker in tickers_html]
df = df.append(new_df)
df = df.fillna("")
return df
@log_start_end(log=logger)
def get_overall_exchange_withdrawal_fees() -> pd.DataFrame:
"""Scrapes exchange withdrawal fees
[Source: https://withdrawalfees.com/]
Returns
-------
pd.DataFrame
Exchange, Coins, Lowest, Average, Median, Highest
"""
exchange_withdrawal_fees = BeautifulSoup(
requests.get(
"https://withdrawalfees.com/exchanges",
headers={"User-Agent": get_user_agent()},
).text,
"lxml",
)
table = exchange_withdrawal_fees.find_all("table")
if table is None:
return pd.DataFrame()
df = pd.read_html(str(table))[0]
df = df.fillna("")
return df
@log_start_end(log=logger)
def get_crypto_withdrawal_fees(
symbol: str,
) -> List[Any]:
"""Scrapes coin withdrawal fees per exchange
[Source: https://withdrawalfees.com/]
Parameters
----------
symbol: str
Coin to check withdrawal fees. By default bitcoin
Returns
-------
List
- str: Overall statistics (exchanges, lowest, average and median)
- pd.DataFrame: Exchange, Withdrawal Fee, Minimum Withdrawal Amount
"""
crypto_withdrawal_fees = BeautifulSoup(
requests.get(
f"https://withdrawalfees.com/coins/{symbol}",
headers={"User-Agent": get_user_agent()},
).text,
"lxml",
)
if crypto_withdrawal_fees is None:
return ["", pd.DataFrame()]
table = crypto_withdrawal_fees.find_all("table")
html_stats = crypto_withdrawal_fees.find("div", {"class": "details"})
if len(table) == 0 or html_stats is None:
return ["", pd.DataFrame()]
df = pd.read_html(str(table))[0]
df["Withdrawal Fee"] = df["Withdrawal Fee"].apply(
lambda x: f'{x[:x.index(".")+3]} ({x[x.index(".")+3:]})'
if "." in x and isinstance(x, str)
else x
)
df["Minimum Withdrawal Amount"] = df["Minimum Withdrawal Amount"].apply(
lambda x: f'{x[:x.index(".")+3]} ({x[x.index(".")+3:]})'
if isinstance(x, str) and "." in x
else x
)
df = df.fillna("")
stats = html_stats.find_all("div", recursive=False)
exchanges = stats[0].find("div", {"class": "value"}).text
lowest = stats[1].find("div", {"class": "value"}).text
average = stats[2].find("div", {"class": "value"}).text
median = stats[3].find("div", {"class": "value"}).text
stats_string = (
f"{symbol} is available on {exchanges} exchanges with alowest fee of "
)
stats_string += f"{lowest}, average of {average} and median of {median}"
return [stats_string, df] | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/withdrawalfees_model.py | 0.719975 | 0.237267 | withdrawalfees_model.py | pypi |
import logging
import os
from typing import List, Optional
from datetime import datetime
from matplotlib import pyplot as plt
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.cryptocurrency.overview.blockchaincenter_model import (
DAYS,
get_altcoin_index,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_altcoin_index(
period: int = 365,
start_date: str = "2010-01-01",
end_date: Optional[str] = None,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Displays altcoin index overtime
[Source: https://blockchaincenter.net]
Parameters
----------
start_date : str
Initial date, format YYYY-MM-DD
end_date : Optional[str]
Final date, format YYYY-MM-DD
period: int
Number of days to check the performance of coins and calculate the altcoin index.
E.g., 365 will check yearly performance , 90 will check seasonal performance (90 days),
30 will check monthly performance (30 days).
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
if period in DAYS:
df = get_altcoin_index(period, start_date, end_date)
if df.empty:
console.print("\nError scraping blockchain central\n")
else:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.set_ylabel("Altcoin Index")
ax.axhline(y=75, color=theme.up_color, label="Altcoin Season (75)")
ax.axhline(y=25, color=theme.down_color, label="Bitcoin Season (25)")
ax.set_title(f"Altcoin Index (Performance based on {period} days)")
ax.plot(df.index, df["Value"], label="Altcoin Index")
ax.legend(loc="best")
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"altindex",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/blockchaincenter_view.py | 0.856842 | 0.329014 | blockchaincenter_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import squarify
from matplotlib import pyplot as plt
from matplotlib import ticker
from matplotlib import cm
from pandas.plotting import register_matplotlib_converters
from openbb_terminal import config_terminal as cfg
import openbb_terminal.cryptocurrency.overview.pycoingecko_model as gecko
from openbb_terminal import feature_flags as obbff
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_long_number_format_with_type_check,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
register_matplotlib_converters()
# pylint: disable=R0904, C0302
@log_start_end(log=logger)
def display_crypto_heatmap(
category: str = "",
limit: int = 15,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Shows cryptocurrencies heatmap [Source: CoinGecko]
Parameters
----------
caterogy: str
Category (e.g., stablecoins). Empty for no category (default: )
limit: int
Number of top cryptocurrencies to display
export: str
Export dataframe data to csv,json,xlsx
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = gecko.get_coins(limit, category)
if df.empty:
console.print("\nNo cryptocurrencies found\n")
else:
df = df.fillna(
0
) # to prevent errors with rounding when values aren't available
max_abs = max(
-df.price_change_percentage_24h_in_currency.min(),
df.price_change_percentage_24h_in_currency.max(),
)
cmapred = cm.get_cmap("Reds", 100)
cmapgreen = cm.get_cmap("Greens", 100)
colors = list()
for val in df.price_change_percentage_24h_in_currency / max_abs:
if val > 0:
colors.append(cmapgreen(round(val * 100)))
else:
colors.append(cmapred(-round(val * 100)))
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
category_str = f"[{category}]" if category else ""
df_copy = df
the_row = "price_change_percentage_24h_in_currency"
df_copy["symbol"] = df_copy.apply(
lambda row: f"{row['symbol'].upper()}\n{round(row[the_row], 2)}%",
axis=1,
)
# index needs to get sorted - was matching with different values
df.sort_index(inplace=True)
df_copy.sort_index(inplace=True)
squarify.plot(
df["market_cap"],
alpha=0.5,
color=colors,
)
text_sizes = squarify.normalize_sizes(df["market_cap"], 100, 100)
rects = squarify.squarify(text_sizes, 0, 0, 100, 100)
for la, r in zip(df_copy["symbol"], rects):
x, y, dx, dy = r["x"], r["y"], r["dx"], r["dy"]
ax.text(
x + dx / 2,
y + dy / 2,
la,
va="center",
ha="center",
color="black",
size=(
text_sizes[df_copy.index[df_copy["symbol"] == la].tolist()[0]]
** 0.5
* 0.8
),
)
ax.set_title(f"Top {limit} Cryptocurrencies {category_str}")
ax.set_axis_off()
cfg.theme.style_primary_axis(ax)
if not external_axes:
cfg.theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"hm",
df,
)
@log_start_end(log=logger)
def display_holdings_overview(
symbol: str, show_bar: bool = False, export: str = "", limit: int = 15
) -> None:
"""Shows overview of public companies that holds ethereum or bitcoin. [Source: CoinGecko]
Parameters
----------
symbol: str
Cryptocurrency: ethereum or bitcoin
show_bar : bool
Whether to show a bar graph for the data
export: str
Export dataframe data to csv,json,xlsx
limit: int
The number of rows to show
"""
res = gecko.get_holdings_overview(symbol)
stats_string = res[0]
df = res[1]
df = df.head(limit)
if df.empty:
console.print("\nZero companies holding this crypto\n")
else:
if show_bar:
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
for _, row in df.iterrows():
ax.bar(x=row["Symbol"], height=row["Total Holdings"])
if symbol == "bitcoin":
ax.set_ylabel("BTC Number")
else:
ax.set_ylabel("ETH Number")
ax.get_yaxis().set_major_formatter(
ticker.FuncFormatter(
lambda x, _: lambda_long_number_format_with_type_check(x)
)
)
ax.set_xlabel("Company Symbol")
fig.tight_layout(pad=8)
if symbol == "bitcoin":
ax.set_title("Total BTC Holdings per company")
else:
ax.set_title("Total ETH Holdings per company")
ax.tick_params(axis="x", labelrotation=90)
console.print(f"\n{stats_string}\n")
df = df.applymap(lambda x: lambda_long_number_format_with_type_check(x))
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Public Companies Holding BTC or ETH",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cghold",
df,
)
@log_start_end(log=logger)
def display_exchange_rates(
sortby: str = "Name", ascend: bool = False, limit: int = 15, export: str = ""
) -> None:
"""Shows list of crypto, fiats, commodity exchange rates. [Source: CoinGecko]
Parameters
----------
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
export : str
Export dataframe data to csv,json,xlsx file
"""
df = gecko.get_exchange_rates(sortby, ascend)
if not df.empty:
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Exchange Rates",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"exrates",
df,
)
else:
console.print("Unable to retrieve data from CoinGecko.")
@log_start_end(log=logger)
def display_global_market_info(pie: bool = False, export: str = "") -> None:
"""Shows global statistics about crypto. [Source: CoinGecko]
- market cap change
- number of markets
- icos
- number of active crypto
- market_cap_pct
Parameters
----------
pie: bool
Whether to show a pie chart
export : str
Export dataframe data to csv,json,xlsx file
"""
df = gecko.get_global_info()
if not df.empty:
if pie:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax.pie(
[
round(
df.loc[df["Metric"] == "Btc Market Cap In Pct"]["Value"].item(),
2,
),
round(
df.loc[df["Metric"] == "Eth Market Cap In Pct"]["Value"].item(),
2,
),
round(
df.loc[df["Metric"] == "Altcoin Market Cap In Pct"][
"Value"
].item(),
2,
),
],
labels=["BTC", "ETH", "Altcoins"],
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
autopct="%1.0f%%",
startangle=90,
)
ax.set_title("Market cap distribution")
if obbff.USE_ION:
plt.ion()
plt.show()
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Global Statistics"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cgglobal",
df,
)
else:
console.print("Unable to retrieve data from CoinGecko.")
@log_start_end(log=logger)
def display_global_defi_info(export: str = "") -> None:
"""Shows global statistics about Decentralized Finances. [Source: CoinGecko]
Parameters
----------
export : str
Export dataframe data to csv,json,xlsx file
"""
df = gecko.get_global_defi_info()
if not df.empty:
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Global DEFI Statistics",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"defi",
df,
)
else:
console.print("Unable to retrieve data from CoinGecko.")
@log_start_end(log=logger)
def display_stablecoins(
limit: int = 15,
export: str = "",
sortby: str = "Market_Cap_[$]",
ascend: bool = False,
pie: bool = True,
) -> None:
"""Shows stablecoins data [Source: CoinGecko]
Parameters
----------
limit: int
Number of records to display
sortby: str
Key by which to sort data, default is Market_Cap_[$]
ascend: bool
Flag to sort data ascending
pie: bool
Whether to show a pie chart, default is True
export : str
Export dataframe data to csv,json,xlsx file
pie : bool
Whether to show a pie chart
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.crypto.ov.stables_chart(sortby="Volume_[$]", ascend=True, limit=10)
"""
df = gecko.get_stable_coins(limit, sortby=sortby, ascend=ascend)
if not df.empty:
total_market_cap = int(df["Market_Cap_[$]"].sum())
df.columns = df.columns.str.replace("_", " ")
if pie:
stables_to_display = df[df[f"Percentage [%] of top {limit}"] >= 1]
other_stables = df[df[f"Percentage [%] of top {limit}"] < 1]
values_list = list(
stables_to_display[f"Percentage [%] of top {limit}"].values
)
values_list.append(other_stables[f"Percentage [%] of top {limit}"].sum())
labels_list = list(stables_to_display["Name"].values)
labels_list.append("Others")
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax.pie(
values_list,
labels=labels_list,
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
autopct="%1.0f%%",
startangle=90,
)
ax.set_title(f"Market cap distribution of top {limit} Stablecoins")
if obbff.USE_ION:
plt.ion()
plt.show()
console.print(
f"First {limit} stablecoins have a total "
f"{lambda_long_number_format_with_type_check(total_market_cap)}"
"dollars of market cap.\n"
)
df = df.applymap(lambda x: lambda_long_number_format_with_type_check(x))
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Stablecoin Data",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cgstables",
df,
)
else:
console.print("\nUnable to retrieve data from CoinGecko.\n")
@log_start_end(log=logger)
def display_categories(
sortby: str = "market_cap_desc",
limit: int = 15,
export: str = "",
pie: bool = False,
) -> None:
"""Shows top cryptocurrency categories by market capitalization
The cryptocurrency category ranking is based on market capitalization. [Source: CoinGecko]
Parameters
----------
sortby: str
Key by which to sort data
limit: int
Number of records to display
export: str
Export dataframe data to csv,json,xlsx file
pie: bool
Whether to show the pie chart
"""
df = gecko.get_top_crypto_categories(sortby)
df_data = df
if not df.empty:
if pie:
df_data[f"% relative to top {limit}"] = (
df_data["Market Cap"] / df_data["Market Cap"].sum()
) * 100
stables_to_display = df_data[df_data[f"% relative to top {limit}"] >= 1]
other_stables = df_data[df_data[f"% relative to top {limit}"] < 1]
values_list = list(stables_to_display[f"% relative to top {limit}"].values)
values_list.append(other_stables[f"% relative to top {limit}"].sum())
labels_list = list(stables_to_display["Name"].values)
labels_list.append("Others")
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax.pie(
values_list,
labels=labels_list,
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
autopct="%1.0f%%",
startangle=90,
)
ax.set_title(f"Market Cap distribution of top {limit} crypto categories")
if obbff.USE_ION:
plt.ion()
plt.show()
df = df.applymap(lambda x: lambda_long_number_format_with_type_check(x))
print_rich_table(
df.head(limit),
headers=list(df.columns),
floatfmt=".2f",
show_index=False,
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cgcategories",
df_data,
)
else:
console.print("\nUnable to retrieve data from CoinGecko.\n")
@log_start_end(log=logger)
def display_exchanges(
sortby: str = "Rank",
ascend: bool = False,
limit: int = 15,
links: bool = False,
export: str = "",
) -> None:
"""Shows list of top exchanges from CoinGecko. [Source: CoinGecko]
Parameters
----------
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data descending
links: bool
Flag to display urls
export : str
Export dataframe data to csv,json,xlsx file
"""
df = gecko.get_exchanges(sortby, ascend)
if not df.empty:
if links is True:
df = df[["Rank", "Name", "Url"]]
else:
df.drop("Url", axis=1, inplace=True)
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Top CoinGecko Exchanges",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"exchanges",
df,
)
else:
console.print("Unable to retrieve data from CoinGecko.")
@log_start_end(log=logger)
def display_platforms(
sortby: str = "Name", ascend: bool = True, limit: int = 15, export: str = ""
) -> None:
"""Shows list of financial platforms. [Source: CoinGecko]
Parameters
----------
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
export : str
Export dataframe data to csv,json,xlsx file
"""
df = gecko.get_financial_platforms(sortby, ascend)
if not df.empty:
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Financial Platforms",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"platforms",
df,
)
else:
console.print("\nUnable to retrieve data from CoinGecko.\n")
@log_start_end(log=logger)
def display_products(
sortby: str = "Platform", ascend: bool = False, limit: int = 15, export: str = ""
) -> None:
"""Shows list of financial products. [Source: CoinGecko]
Parameters
----------
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data descending
export : str
Export dataframe data to csv,json,xlsx file
"""
df = gecko.get_finance_products(sortby=sortby, ascend=ascend)
if not df.empty:
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Financial Products",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"products",
df,
)
else:
console.print("Unable to retrieve data from CoinGecko.")
@log_start_end(log=logger)
def display_indexes(
sortby: str = "Name", ascend: bool = True, limit: int = 15, export: str = ""
) -> None:
"""Shows list of crypto indexes. [Source: CoinGecko]
Parameters
----------
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data descending
export : str
Export dataframe data to csv,json,xlsx file
"""
df = gecko.get_indexes(sortby=sortby, ascend=ascend)
if not df.empty:
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Crypto Indexes",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"indexes",
df,
)
else:
console.print("Unable to retrieve data from CoinGecko.")
@log_start_end(log=logger)
def display_derivatives(
sortby: str = "Rank", ascend: bool = False, limit: int = 15, export: str = ""
) -> None:
"""Shows list of crypto derivatives. [Source: CoinGecko]
Parameters
----------
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data descending
export : str
Export dataframe data to csv,json,xlsx file
"""
df = gecko.get_derivatives(sortby=sortby, ascend=ascend)
if not df.empty:
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Crypto Derivatives",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"derivatives",
df,
)
else:
console.print("Unable to retrieve data from CoinGecko.") | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/pycoingecko_view.py | 0.780286 | 0.353345 | pycoingecko_view.py | pypi |
__docformat__ = "numpy"
import logging
import textwrap
from datetime import datetime
import pandas as pd
from dateutil import parser
from openbb_terminal.cryptocurrency.coinpaprika_helpers import PaprikaSession
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
MARKETS_FILTERS = [
"rank",
"name",
"symbol",
"price",
"volume_24h",
"mcap_change_24h",
"pct_change_1h",
"pct_change_24h",
"ath_price",
"pct_from_ath",
]
EXMARKETS_FILTERS = [
"pair",
"base_currency_name",
"quote_currency_name",
"category",
"reported_volume_24h_share",
"trust_score",
"market_url",
]
INFO_FILTERS = [
"rank",
"name",
"symbol",
"price",
"volume_24h",
"circulating_supply",
"total_supply",
"max_supply",
"ath_price",
"market_cap",
"beta_value",
]
EXCHANGES_FILTERS = [
"rank",
"name",
"currencies",
"markets",
"fiats",
"confidence",
"volume_24h",
"volume_7d",
"volume_30d",
"sessions_per_month",
]
CONTRACTS_FILTERS = ["id", "type", "active"]
@log_start_end(log=logger)
def get_global_info() -> pd.DataFrame:
"""Return data frame with most important global crypto statistics like:
market_cap_usd, volume_24h_usd, bitcoin_dominance_percentage, cryptocurrencies_number,
market_cap_ath_value, market_cap_ath_date, volume_24h_ath_value, volume_24h_ath_date,
market_cap_change_24h, volume_24h_change_24h, last_updated. [Source: CoinPaprika]
Returns
-------
pd.DataFrame
Most important global crypto statistics
Metric, Value
"""
session = PaprikaSession()
global_markets = session.make_request(session.ENDPOINTS["global"])
global_markets["last_updated"] = datetime.fromtimestamp(
global_markets["last_updated"]
)
for key, date in global_markets.items():
if "date" in key:
try:
if isinstance(date, datetime):
new_date = date
else:
new_date = parser.parse(date)
global_markets[key] = new_date.strftime("%Y-%m-%d %H:%M:%S")
except (KeyError, ValueError, TypeError) as e:
logger.exception(str(e))
console.print(e)
df = pd.Series(global_markets).to_frame().reset_index()
df.columns = ["Metric", "Value"]
return df
@log_start_end(log=logger)
def _get_coins_info_helper(symbols: str = "USD") -> pd.DataFrame:
"""Helper method that call /tickers endpoint which returns for all coins quoted in provided currency/crypto
{
"id": "btc-bitcoin",
"name": "Bitcoin",
"symbol": "BTC",
"rank": 1,
"circulating_supply": 17007062,
"total_supply": 17007062,
"max_supply": 21000000,
"beta_value": 0.735327,
"first_data_at": "2010-11-14T07:20:41Z",
"last_updated": "2018-11-14T07:20:41Z",
"quotes" : {
"USD": {
"price": 5162.15941296,
"volume_24h": 7304207651.1585,
"volume_24h_change_24h": -2.5,
"market_cap": 91094433242,
"market_cap_change_24h": 1.6,
"percent_change_15m": 0,
"percent_change_30m": 0,
"percent_change_1h": 0,
"percent_change_6h": 0,
"percent_change_12h": -0.09,
"percent_change_24h": 1.59,
"percent_change_7d": 0.28,
"percent_change_30d": 27.39,
"percent_change_1y": -37.99,
"ath_price": 20089,
"ath_date": "2017-12-17T12:19:00Z",
"percent_from_price_ath": -74.3
}
}
}
[Source: CoinPaprika]
Parameters
----------
symbols: Comma separated quotes to return e.g quotes=USD,BTC
Returns
-------
pd.DataFrame
id, name, symbol, rank, circulating_supply, total_supply, max_supply, beta_value, first_data_at,
last_updated, price, volume_24h, volume_24h_change_24h, market_cap, market_cap_change_24h,
percent_change_15m, percent_change_30m, percent_change_1h, percent_change_6h, percent_change_12h,
percent_change_24h, percent_change_7d, percent_change_30d, percent_change_1y,
ath_price, ath_date, percent_from_price_ath
"""
session = PaprikaSession()
tickers = session.make_request(session.ENDPOINTS["tickers"], quotes=symbols)
data = pd.json_normalize(tickers)
try:
data.columns = [
col.replace(f"quotes.{symbols}.", "") for col in data.columns.tolist()
]
data.columns = [col.replace("percent", "pct") for col in list(data.columns)]
except KeyError as e:
logger.exception(str(e))
console.print(e)
data.rename(
columns={
"market_cap_change_24h": "mcap_change_24h",
"pct_from_price_ath": "pct_from_ath",
},
inplace=True,
)
return data
@log_start_end(log=logger)
def get_coins_info(
symbols: str = "USD", sortby: str = "rank", ascend: bool = True
) -> pd.DataFrame: # > format big numbers fix
"""Returns basic coin information for all coins from CoinPaprika API [Source: CoinPaprika]
Parameters
----------
symbols: str
Comma separated quotes to return e.g quotes=USD,BTC
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data descending
Returns
-------
pd.DataFrame
rank, name, symbol, price, volume_24h, circulating_supply, total_supply,
max_supply, market_cap, beta_value, ath_price,
"""
cols = [
"rank",
"name",
"symbol",
"price",
"volume_24h",
"circulating_supply",
"total_supply",
"max_supply",
"market_cap",
"beta_value",
"ath_price",
]
df = _get_coins_info_helper(symbols)[cols]
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_coins_market_info(
symbols: str = "USD", sortby: str = "rank", ascend: bool = True
) -> pd.DataFrame:
"""Returns basic coin information for all coins from CoinPaprika API [Source: CoinPaprika]
Parameters
----------
symbols: str
Comma separated quotes to return e.g quotes=USD,BTC
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascend
Returns
-------
pd.DataFrame
rank, name, symbol, price, volume_24h, mcap_change_24h,
pct_change_1h, pct_change_24h, ath_price, pct_from_ath,
"""
cols = [
"rank",
"name",
"symbol",
"price",
"volume_24h",
"mcap_change_24h",
"pct_change_1h",
"pct_change_24h",
"ath_price",
"pct_from_ath",
]
df = _get_coins_info_helper(symbols=symbols)[cols].sort_values(by="rank")
if sortby == "rank":
df = df.sort_values(by=sortby, ascending=not ascend)
else:
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_list_of_exchanges(
symbols: str = "USD", sortby: str = "rank", ascend: bool = True
) -> pd.DataFrame:
"""
List exchanges from CoinPaprika API [Source: CoinPaprika]
Parameters
----------
symbols: str
Comma separated quotes to return e.g quotes=USD,BTC
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascend
Returns
-------
pd.DataFrame
rank, name, currencies, markets, fiats, confidence_score, reported_volume_24h,
reported_volume_7d ,reported_volume_30d, sessions_per_month,
"""
session = PaprikaSession()
exchanges = session.make_request(session.ENDPOINTS["exchanges"], quotes=symbols)
df = pd.json_normalize(exchanges)
try:
df.columns = [
col.replace(f"quotes.{symbols}.", "") for col in df.columns.tolist()
]
except KeyError as e:
logger.exception(str(e))
console.print(e)
df = df[df["active"]]
cols = [
"adjusted_rank",
"id",
"name",
"currencies",
"markets",
"fiats",
"confidence_score",
"reported_volume_24h",
"reported_volume_7d",
"reported_volume_30d",
"sessions_per_month",
]
df["fiats"] = df["fiats"].apply(lambda x: len([i["symbol"] for i in x if x]))
df = df[cols]
df = df.applymap(
lambda x: "\n".join(textwrap.wrap(x, width=28)) if isinstance(x, str) else x
)
df = df.rename(
columns={"adjusted_rank": "Rank", "confidence_score": "confidence"},
)
df.columns = [x.replace("reported_", "") for x in df.columns]
if sortby.lower() == "rank":
df = df.sort_values(by="Rank", ascending=not ascend)
else:
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_exchanges_market(
exchange_id: str = "binance",
symbols: str = "USD",
sortby: str = "pair",
ascend: bool = True,
) -> pd.DataFrame:
"""List markets by exchange ID [Source: CoinPaprika]
Parameters
----------
exchange_id: str
identifier of exchange e.g for Binance Exchange -> binance
symbols: str
Comma separated quotes to return e.g quotes=USD,BTC
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
pair, base_currency_name, quote_currency_name, market_url,
category, reported_volume_24h_share, trust_score,
"""
session = PaprikaSession()
data = session.make_request(
session.ENDPOINTS["exchange_markets"].format(exchange_id), quotes=symbols
)
if "error" in data:
console.print(data)
return pd.DataFrame()
cols = [
"exchange_id",
"pair",
"base_currency_name",
"quote_currency_name",
"category",
"reported_volume_24h_share",
"trust_score",
"market_url",
]
df = pd.DataFrame(data)
df["exchange_id"] = exchange_id
df = df[cols]
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_all_contract_platforms() -> pd.DataFrame:
"""List all smart contract platforms like ethereum, solana, cosmos, polkadot, kusama ... [Source: CoinPaprika]
Returns
-------
pd.DataFrame
index, platform_id
"""
session = PaprikaSession()
contract_platforms = session.make_request(session.ENDPOINTS["contract_platforms"])
df = pd.DataFrame(contract_platforms).reset_index()
df.columns = ["index", "platform_id"]
# pylint: disable=unsupported-assignment-operation
df["index"] = df["index"] + 1
return df
@log_start_end(log=logger)
def get_contract_platform(
platform_id: str = "eth-ethereum", sortby: str = "active", ascend: bool = True
) -> pd.DataFrame:
"""Gets all contract addresses for given platform [Source: CoinPaprika]
Parameters
----------
platform_id: str
Blockchain platform like eth-ethereum
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascend
Returns
-------
pd.DataFrame
id, type, active
"""
session = PaprikaSession()
contract_platforms = session.make_request(
session.ENDPOINTS["contract_platform_addresses"].format(platform_id)
)
df = pd.DataFrame(contract_platforms)[["id", "type", "active"]]
df = df.sort_values(by=sortby, ascending=ascend)
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/coinpaprika_model.py | 0.542621 | 0.254405 | coinpaprika_model.py | pypi |
import argparse
import logging
from typing import Any, Dict
import pandas as pd
import requests
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent, log_and_raise
logger = logging.getLogger(__name__)
api_url = "https://api.loanscan.io"
PLATFORMS = [
"MakerDao",
"Compound",
"Poloniex",
"Bitfinex",
"dYdX",
"CompoundV2",
"Linen",
"Hodlonaut",
"InstaDapp",
"Zerion",
"Argent",
"DeFiSaver",
"MakerDaoV2",
"Ddex",
"AaveStable",
"AaveVariable",
"YearnFinance",
"BlockFi",
"Nexo",
"CryptoCom",
"Soda",
"Coinbase",
"SaltLending",
"Ledn",
"Bincentive",
"Inlock",
"Bitwala",
"Zipmex",
"Vauld",
"Delio",
"Yield",
"Vesper",
"Reflexer",
"SwissBorg",
"MushroomsFinance",
"ElementFi",
"Maple",
"CoinRabbit",
"WirexXAccounts",
"Youhodler",
"YieldApp",
"NotionalFinance",
"IconFi",
]
CRYPTOS = [
"ZRX",
"BAT",
"REP",
"ETH",
"SAI",
"BTC",
"XRP",
"LTC",
"EOS",
"BCH",
"XMR",
"DOGE",
"USDC",
"USDT",
"BSV",
"NEO",
"ETC",
"OMG",
"ZEC",
"BTG",
"SAN",
"DAI",
"UNI",
"WBTC",
"COMP",
"LUNA",
"UST",
"BUSD",
"KNC",
"LEND",
"LINK",
"MANA",
"MKR",
"SNX",
"SUSD",
"TUSD",
"eCRV-DAO",
"HEGIC",
"YFI",
"1INCH",
"CRV-IB",
"CRV-HBTC",
"BOOST",
"CRV-sBTC",
"CRV-renBTC",
"CRV-sAave",
"CRV-oBTC",
"CRV-pBTC",
"CRV-LUSD",
"CRV-BBTC",
"CRV-tBTC",
"CRV-FRAX",
"CRV-yBUSD",
"CRV-COMP",
"CRV-GUSD",
"yUSD",
"CRV-3pool",
"CRV-TUSD",
"CRV-BUSD",
"CRV-DUSD",
"CRV-UST",
"CRV-mUSD",
"sUSD",
"CRV-sUSD",
"CRV-LINK",
"CRV-USDN",
"CRV-USDP",
"CRV-alUSD",
"CRV-Aave",
"CRV-HUSD",
"CRV-EURS",
"RAI",
"CRV-triCrypto",
"CRV-Pax",
"CRV-USDT",
"CRV-USDK",
"CRV-RSV",
"CRV-3Crypto",
"GUSD",
"PAX",
"USD",
"ILK",
"BNB",
"PAXG",
"ADA",
"FTT",
"SOL",
"SRM",
"RAY",
"XLM",
"SUSHI",
"CRV",
"BAL",
"AAVE",
"MATIC",
"GRT",
"ENJ",
"USDP",
"IOST",
"AMP",
"PERP",
"SHIB",
"ALICE",
"ALPHA",
"ANKR",
"ATA",
"AVA",
"AXS",
"BAKE",
"BAND",
"BNT",
"BTCST",
"CELR",
"CFX",
"CHR",
"COS",
"COTI",
"CTSI",
"DUSK",
"EGLD",
"ELF",
"FET",
"FLOW",
"FTM",
"INJ",
"IOTX",
"MDX",
"NEAR",
"OCEAN",
"ONT",
"POLS",
"REEF",
"WRX",
"XEC",
"XTZ",
"XVS",
"ZIL",
"DOT",
"FIL",
"TRX",
"CAKE",
"ADX",
"FIRO",
"SXP",
"ATOM",
"IOTA",
"AKRO",
"AUDIO",
"BADGER",
"CVC",
"DENT",
"DYDX",
"FORTH",
"GNO",
"HOT",
"LPT",
"LRC",
"NKN",
"NMR",
"NU",
"OGN",
"OXT",
"POLY",
"QNT",
"RLC",
"RSR",
"SAND",
"SKL",
"STMX",
"STORJ",
"TRB",
"UMA",
"DPI",
"VSP",
"CHSB",
"EURT",
"GHST",
"3CRV",
"CRVRENWBTC",
"MIR-UST UNI LP",
"ALCX",
"ALUSD",
"USDP3CRV",
"RENBTC",
"YVECRV",
"CVX",
"USDTTRC20",
"AUD",
"HKD",
"GBP",
"EUR",
"HUSD",
"HT",
"DASH",
"EURS",
"AVAX",
"BTT",
"GALA",
"ILV",
"APE",
]
@log_start_end(log=logger)
def get_rates(rate_type: str = "borrow") -> pd.DataFrame:
"""Returns crypto {borrow,supply} interest rates for cryptocurrencies across several platforms
[Source: https://loanscan.io/]
Parameters
----------
rate_type : str
Interest rate type: {borrow, supply}. Default: supply
Returns
-------
pd.DataFrame
crypto interest rates per platform
"""
if rate_type not in ("supply", "borrow"):
raise Exception("Rate type not supported. Supported rates: borrow, supply")
response = requests.get(
f"{api_url}/v1/interest-rates",
headers={"User-Agent": get_user_agent()},
)
if response.status_code != 200:
raise Exception(f"Status code: {response.status_code}. Reason: {response.text}")
data = response.json()
cryptos: Dict[Any, Any] = {}
for provider in data:
provider_name = provider["provider"].lower()
for crypto in provider[rate_type]:
symbol = crypto["symbol"]
if symbol not in cryptos:
cryptos[symbol] = {}
cryptos[symbol][provider_name] = crypto["rate"]
df = pd.DataFrame(cryptos, columns=sorted(cryptos.keys()))
for platform in PLATFORMS:
if platform.lower() not in df.index:
df = pd.concat(
[df, pd.Series(name=platform.lower(), dtype="object")],
axis=0,
join="outer",
)
return df
def check_valid_coin(value) -> str:
"""Argparse type to check valid coins argument"""
cryptos = value.split(",")
cryptos_lowered = [x.lower() for x in CRYPTOS]
for crypto in cryptos:
if crypto.lower() not in cryptos_lowered:
log_and_raise(
argparse.ArgumentTypeError(
f"{crypto} is not supported. Options: {','.join(CRYPTOS)}"
)
)
return value
def check_valid_platform(value) -> str:
"""Argparse type to check valid platform argument"""
platforms = value.split(",")
platforms_lowered = [x.lower() for x in PLATFORMS]
for platform in platforms:
if platform.lower() not in platforms_lowered:
log_and_raise(
argparse.ArgumentTypeError(
f"{platform} is not supported. Options: {','.join(PLATFORMS)}"
)
)
return value | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/loanscan_model.py | 0.708616 | 0.34381 | loanscan_model.py | pypi |
__docformat__ = "numpy"
import pandas as pd
from openbb_terminal.cryptocurrency.overview import pycoingecko_model, coinpaprika_model
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_long_number_format_with_type_check,
)
def globe(source: str = "CoinGecko") -> pd.DataFrame:
"""Get global crypto market data.
Parameters
----------
source : str, optional
Source of data, by default "CoinGecko"
Returns
-------
pd.DataFrame
DataFrame with global crypto market data
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> global_market_data = openbb.crypto.ov.globe()
To get data from CoinPaprika, use the source parameter:
>>> global_market_data = openbb.crypto.ov.globe(source="coinpaprika")
"""
if source.lower() == "coingecko":
df = pycoingecko_model.get_global_info()
return df
if source.lower() == "coinpaprika":
df = coinpaprika_model.get_global_info()
return df
return pd.DataFrame()
def exchanges(source: str = "CoinGecko") -> pd.DataFrame:
"""Show top crypto exchanges.
Parameters
----------
source : str, optional
Source to get exchanges, by default "CoinGecko"
Returns
-------
pd.DataFrame
DataFrame with top crypto exchanges
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> exchanges = openbb.crypto.ov.exchanges()
"""
if source.lower() == "coingecko":
df = pycoingecko_model.get_exchanges().sort_values(by="Rank", ascending=True)
return df
if source.lower() == "coinpaprika":
df = coinpaprika_model.get_list_of_exchanges("USD")
cols = [col for col in df.columns if col != "Rank"]
df[cols] = df[cols].applymap(
lambda x: lambda_long_number_format_with_type_check(x)
)
return df.sort_values(by="Rank", ascending=True).reset_index(drop=True).head(20)
return pd.DataFrame() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/sdk_helpers.py | 0.827689 | 0.404155 | sdk_helpers.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional
from openbb_terminal.cryptocurrency.overview import cryptopanic_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.decorators import check_api_key
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_CRYPTO_PANIC_KEY"])
def display_news(
post_kind: str = "news",
region: str = "en",
filter_: Optional[str] = None,
limit: int = 25,
sortby: str = "published_at",
ascend: bool = False,
links: bool = False,
export: str = "",
) -> None:
"""Display recent posts from CryptoPanic news aggregator platform.
[Source: https://cryptopanic.com/]
Parameters
----------
limit: int
number of news to display
post_kind: str
Filter by category of news. Available values: news or media.
filter_: Optional[str]
Filter by kind of news. One from list: rising|hot|bullish|bearish|important|saved|lol
region: str
Filter news by regions. Available regions are: en (English), de (Deutsch), nl (Dutch),
es (Español), fr (Français), it (Italiano), pt (Português), ru (Русский)
sortby: str
Key to sort by.
ascend: bool
Sort in ascending order.
links: bool
Show urls for news
export : str
Export dataframe data to csv,json,xlsx file
"""
df = cryptopanic_model.get_news(
limit=limit,
post_kind=post_kind,
filter_=filter_,
region=region,
sortby=sortby,
ascend=ascend,
)
if not df.empty:
if not links:
df.drop("link", axis=1, inplace=True)
else:
df = df[["title", "link"]]
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Recent CryptoPanic Posts",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"news",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/cryptopanic_view.py | 0.805288 | 0.19433 | cryptopanic_view.py | pypi |
import logging
import os
from openbb_terminal.cryptocurrency.overview.withdrawalfees_model import (
get_crypto_withdrawal_fees,
get_overall_exchange_withdrawal_fees,
get_overall_withdrawal_fees,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_overall_withdrawal_fees(limit: int = 15, export: str = "") -> None:
"""Top coins withdrawal fees
[Source: https://withdrawalfees.com/]
Parameters
----------
limit: int
Number of coins to search
export : str
Export dataframe data to csv,json,xlsx file
"""
df_fees = get_overall_withdrawal_fees(limit)
if df_fees.empty:
console.print("\nError in withdrawal fees request\n")
else:
console.print("\nWithdrawal fees on exchanges:")
print_rich_table(
df_fees.head(limit),
headers=list(df_fees.columns),
show_index=False,
title="Top Withdrawal Fees",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"withdrawal_fees",
df_fees,
)
@log_start_end(log=logger)
def display_overall_exchange_withdrawal_fees(export: str = "") -> None:
"""Exchange withdrawal fees
[Source: https://withdrawalfees.com/]
Parameters
----------
export : str
Export dataframe data to csv,json,xlsx file
"""
df_fees = get_overall_exchange_withdrawal_fees()
if df_fees.empty:
console.print("\nError in withdrawal fees request\n")
else:
console.print("\nWithdrawal fees per exchange:")
print_rich_table(
df_fees,
headers=list(df_fees.columns),
show_index=False,
title="Withdrawal Fees",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"exchange_withdrawal_fees",
df_fees,
)
@log_start_end(log=logger)
def display_crypto_withdrawal_fees(symbol: str, export: str = "") -> None:
"""Coin withdrawal fees per exchange
[Source: https://withdrawalfees.com/]
Parameters
----------
symbol: str
Coin to check withdrawal fees
export : str
Export dataframe data to csv,json,xlsx file
"""
res = get_crypto_withdrawal_fees(symbol)
stats_string = res[0]
df_fees = res[1]
if df_fees.empty:
console.print("\nError in withdrawal fees request\n")
else:
console.print(f"\nWithdrawal fees for {symbol}:")
console.print(f"\n{stats_string}\n")
print_rich_table(
df_fees,
headers=list(df_fees.columns),
show_index=False,
title="Withdrawal Fees per Exchange",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"crypto_withdrawal_fees",
df_fees,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/withdrawalfees_view.py | 0.47926 | 0.211559 | withdrawalfees_view.py | pypi |
import logging
from typing import List
import pandas as pd
from tokenterminal import TokenTerminal
from openbb_terminal.decorators import log_start_end
from openbb_terminal import config_terminal as cfg
logger = logging.getLogger(__name__)
token_terminal = TokenTerminal(key=cfg.API_TOKEN_TERMINAL_KEY)
# Fetch all data for projects'
try:
PROJECTS_DATA = token_terminal.get_all_projects()
except Exception as e:
logger.error(e)
PROJECTS_DATA = [
"0x",
"1inch",
"88mph",
"aave",
"abracadabra-money",
"alchemist",
"alchemix-finance",
"algorand",
"alpha-finance",
"arweave",
"autofarm",
"avalanche",
"axie-infinity",
"balancer",
"bancor",
"barnbridge",
"basket-dao",
"benqi",
"binance-smart-chain",
"bitcoin",
"cap",
"cardano",
"centrifuge",
"clipper",
"compound",
"convex-finance",
"cosmos",
"cryptex",
"curve",
"decentral-games",
"decred",
"dforce",
"dhedge",
"dodo",
"dogecoin",
"dydx",
"ellipsis-finance",
"elrond",
"enzyme-finance",
"erasure-protocol",
"ethereum",
"ethereum-name-service",
"euler",
"fantom",
"fei-protocol",
"filecoin",
"futureswap",
"gmx",
"goldfinch",
"harvest-finance",
"helium",
"hurricaneswap",
"idle-finance",
"index-cooperative",
"instadapp",
"integral-protocol",
"karura",
"keeperdao",
"keep-network",
"kusama",
"kyber",
"lido-finance",
"liquity",
"litecoin",
"livepeer",
"looksrare",
"loopring",
"maiar",
"makerdao",
"maple-finance",
"mcdex",
"metamask",
"mstable",
"near-protocol",
"nexus-mutual",
"nftx",
"notional-finance",
"opensea",
"optimism",
"osmosis",
"pancakeswap",
"pangolin",
"perpetual-protocol",
"piedao",
"pocket-network",
"polkadot",
"polygon",
"polymarket",
"pooltogether",
"powerpool",
"quickswap",
"rarible",
"rari-capital",
"reflexer",
"ren",
"ribbon-finance",
"rocket-pool",
"saddle-finance",
"set-protocol",
"solana",
"solend",
"spookyswap",
"stake-dao",
"stellar",
"sushiswap",
"synthetix",
"terra",
"tezos",
"the-graph",
"thorchain",
"tokemak",
"tokenlon",
"tornado-cash",
"trader-joe",
"uma",
"uniswap",
"unit-protocol",
"venus",
"vesper-finance",
"volmex",
"wakaswap",
"yearn-finance",
"yield-guild-games",
"yield-yak",
"zcash",
"zora",
]
TIMELINES = ["24h", "7d", "30d", "90d", "180d", "365d"]
CATEGORIES = [
"Asset Management",
"Blockchain",
"DeFi",
"Exchange",
"Gaming",
"Insurance",
"Interoperability",
"Lending",
"NFT",
"Other",
"Prediction Market",
"Stablecoin",
]
METRICS = [
"twitter_followers",
"gmv_annualized",
"market_cap",
"take_rate",
"revenue",
"revenue_protocol",
"tvl",
"pe",
"pe_circulating",
"ps",
"ps_circulating",
]
@log_start_end(log=logger)
def get_possible_timelines() -> List[str]:
"""This function returns the available timelines.
Returns
-------
List[str]
A list with the available timelines values.
"""
return TIMELINES
@log_start_end(log=logger)
def get_possible_categories() -> List[str]:
"""This function returns the available categories.
Returns
-------
List[str]
A list with the available categories values.
"""
return CATEGORIES
@log_start_end(log=logger)
def get_possible_metrics() -> List[str]:
"""This function returns the available metrics.
Returns
-------
List[str]
A list with the available metrics values.
"""
return METRICS
@log_start_end(log=logger)
def get_fundamental_metrics(
metric: str,
category: str = "",
timeline: str = "24h",
ascend: bool = False,
) -> pd.Series:
"""Get fundamental metrics [Source: Token Terminal]
Parameters
----------
metric : str
The metric of interest. See `get_possible_metrics()` for available metrics.
category : str
The category of interest. See `get_possible_categories()` for available categories.
The default value is an empty string which means that all categories are considered.
timeline : str
The category of interest. See `get_possible_timelines()` for available timelines.
ascend : bool
Direction of the sort. If True, the data is sorted in ascending order.
Returns
-------
pd.Series
Project, Metric value
"""
metric_values = {}
for project in PROJECTS_DATA:
if category and (
(
"," in project["category_tags"]
and category in project["category_tags"].split(",")
)
or project["category_tags"] == category
):
# check that is in metrics with a timeline
if metric in [
"revenue",
"revenue_protocol",
"tvl",
"pe",
"pe_circulating",
"ps",
"ps_circulating",
]:
val = project[metric + "_" + timeline]
else:
val = project[metric]
if isinstance(val, (float, int)):
if project["symbol"]:
metric_values[f"{project['name']} ({project['symbol']})"] = float(
val
)
else:
metric_values[f"{project['name']}"] = float(val)
else:
# check that is in metrics with a timeline
if metric in [
"revenue",
"revenue_protocol",
"tvl",
"pe",
"pe_circulating",
"ps",
"ps_circulating",
]:
val = project[metric + "_" + timeline]
else:
val = project[metric]
if isinstance(val, (float, int)):
if project["symbol"]:
metric_values[f"{project['name']} ({project['symbol']})"] = float(
val
)
else:
metric_values[f"{project['name']}"] = float(val)
metric_values = dict(
sorted(metric_values.items(), key=lambda item: item[1], reverse=not ascend)
)
return pd.Series(metric_values) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/tokenterminal_model.py | 0.741112 | 0.22762 | tokenterminal_model.py | pypi |
import logging
import os
from openbb_terminal.cryptocurrency.overview import rekt_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
print_rich_table,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_crypto_hacks(
limit: int = 15,
sortby: str = "Platform",
ascend: bool = False,
slug: str = "polyntwork-rekt",
export: str = "",
) -> None:
"""Display list of major crypto-related hacks. If slug is passed
individual crypto hack is displayed instead of list of crypto hacks
[Source: https://rekt.news]
Parameters
----------
slug: str
Crypto hack slug to check (e.g., polynetwork-rekt)
limit: int
Number of hacks to search
sortby: str
Key by which to sort data {Platform,Date,Amount [$],Audit,Slug,URL}
ascend: bool
Flag to sort data ascending
export : str
Export dataframe data to csv,json,xlsx file
"""
if slug:
text = rekt_model.get_crypto_hack(slug)
if text:
console.print(text)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ch",
text,
)
else:
df = rekt_model.get_crypto_hacks(sortby, ascend)
if df.empty:
console.print("\nError in rekt request\n")
else:
df["Amount [$]"] = df["Amount [$]"].apply(
lambda x: lambda_long_number_format(x)
)
df["Date"] = df["Date"].dt.date
print_rich_table(
df.head(limit),
headers=list(df.columns),
floatfmt=".1f",
show_index=False,
title="Major Crypto Hacks",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ch",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/rekt_view.py | 0.485844 | 0.182662 | rekt_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from pandas.plotting import register_matplotlib_converters
import openbb_terminal.cryptocurrency.overview.coinpaprika_model as paprika
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_long_number_format_with_type_check,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
register_matplotlib_converters()
# pylint: disable=inconsistent-return-statements
# pylint: disable=C0302, too-many-lines
CURRENCIES = [
"BTC",
"ETH",
"USD",
"EUR",
"PLN",
"KRW",
"GBP",
"CAD",
"JPY",
"RUB",
"TRY",
"NZD",
"AUD",
"CHF",
"UAH",
"HKD",
"SGD",
"NGN",
"PHP",
"MXN",
"BRL",
"THB",
"CLP",
"CNY",
"CZK",
"DKK",
"HUF",
"IDR",
"ILS",
"INR",
"MYR",
"NOK",
"PKR",
"SEK",
"TWD",
"ZAR",
"VND",
"BOB",
"COP",
"PEN",
"ARS",
"ISK",
]
# see https://github.com/OpenBB-finance/OpenBBTerminal/pull/562#issuecomment-887842888
# EXCHANGES = paprika.get_list_of_exchanges()
@log_start_end(log=logger)
def display_global_market(export: str = "") -> None:
"""Return data frame with most important global crypto statistics like:
market_cap_usd, volume_24h_usd, bitcoin_dominance_percentage, cryptocurrencies_number,
market_cap_ath_value, market_cap_ath_date, volume_24h_ath_value, volume_24h_ath_date,
market_cap_change_24h, volume_24h_change_24h, last_updated [Source: CoinPaprika]
Parameters
----------
export : str
Export dataframe data to csv,json,xlsx file
"""
df = paprika.get_global_info()
df_data = df.copy()
df["Value"] = df["Value"].apply( # pylint:disable=unsupported-assignment-operation
lambda x: lambda_long_number_format_with_type_check(x)
)
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Global Crypto Statistics"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"global",
df_data,
)
@log_start_end(log=logger)
def display_all_coins_market_info(
symbol: str,
sortby: str = "rank",
ascend: bool = True,
limit: int = 15,
export: str = "",
) -> None:
"""Displays basic market information for all coins from CoinPaprika API. [Source: CoinPaprika]
Parameters
----------
symbol: str
Quoted currency
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
links: bool
Flag to display urls
export : str
Export dataframe data to csv,json,xlsx file
"""
df = paprika.get_coins_market_info(symbols=symbol, sortby=sortby, ascend=ascend)
df_data = df.copy()
if df.empty:
console.print("No data found", "\n")
return
cols = [col for col in df.columns if col != "rank"]
df[cols] = df[cols].applymap(lambda x: lambda_long_number_format_with_type_check(x))
console.print(f"\nDisplaying data vs {symbol}")
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Basic Market Information",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"markets",
df_data,
)
@log_start_end(log=logger)
def display_all_coins_info(
symbol: str,
sortby: str = "rank",
ascend: bool = True,
limit: int = 15,
export: str = "",
) -> None:
"""Displays basic coin information for all coins from CoinPaprika API. [Source: CoinPaprika]
Parameters
----------
symbol: str
Quoted currency
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data descending
links: bool
Flag to display urls
export : str
Export dataframe data to csv,json,xlsx file
"""
df = paprika.get_coins_info(symbols=symbol, sortby=sortby, ascend=ascend)
df_data = df.copy()
if df.empty:
console.print("Not data found", "\n")
return
cols = [col for col in df.columns if col != "rank"]
df[cols] = df[cols].applymap(lambda x: lambda_long_number_format_with_type_check(x))
console.print(f"Displaying data vs {symbol}")
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Basic Coin Information",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"info",
df_data,
)
@log_start_end(log=logger)
def display_all_exchanges(
symbol: str,
sortby: str = "rank",
ascend: bool = True,
limit: int = 15,
export: str = "",
) -> None:
"""List exchanges from CoinPaprika API. [Source: CoinPaprika]
Parameters
----------
currency: str
Quoted currency
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
links: bool
Flag to display urls
export : str
Export dataframe data to csv,json,xlsx file
"""
df = paprika.get_list_of_exchanges(symbols=symbol, sortby=sortby, ascend=ascend)
df_data = df.copy()
if df.empty:
console.print("No data found", "\n")
return
cols = [col for col in df.columns if col != "rank"]
df[cols] = df[cols].applymap(lambda x: lambda_long_number_format_with_type_check(x))
console.print(f"\nDisplaying data vs {symbol}")
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="List Exchanges",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"exchanges",
df_data,
)
@log_start_end(log=logger)
def display_exchange_markets(
exchange: str = "binance",
sortby: str = "pair",
ascend: bool = True,
limit: int = 15,
links: bool = False,
export: str = "",
) -> None:
"""Get all markets for given exchange [Source: CoinPaprika]
Parameters
----------
exchange: str
Exchange identifier e.g Binance
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data descending
links: bool
Flag to display urls
export : str
Export dataframe data to csv,json,xlsx file
"""
df = paprika.get_exchanges_market(
exchange_id=exchange, sortby=sortby, ascend=ascend
)
df_data = df.copy()
if df.empty:
console.print("No data found", "\n")
return
if links is True:
df = df[["exchange_id", "pair", "trust_score", "market_url"]]
else:
df.drop("market_url", axis=1, inplace=True)
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Exchange Markets",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"exmarkets",
df_data,
)
@log_start_end(log=logger)
def display_all_platforms(export: str) -> None:
"""List all smart contract platforms like ethereum, solana, cosmos, polkadot, kusama.
[Source: CoinPaprika]
Parameters
----------
export : str
Export dataframe data to csv,json,xlsx file
"""
df = paprika.get_all_contract_platforms()
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Smart Contract Platforms"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"platforms",
df,
)
@log_start_end(log=logger)
def display_contracts(
symbol: str,
sortby: str = "active",
ascend: bool = True,
limit: int = 15,
export: str = "",
) -> None:
"""Gets all contract addresses for given platform. [Source: CoinPaprika]
Parameters
----------
platform: str
Blockchain platform like eth-ethereum
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
export : str
Export dataframe data to csv,json,xlsx file
"""
df = paprika.get_contract_platform(symbol, sortby, ascend)
if df.empty:
console.print(f"Nothing found for platform: {symbol}", "\n")
return
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Contract Addresses",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"contracts",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/coinpaprika_view.py | 0.614163 | 0.299278 | coinpaprika_view.py | pypi |
import json
import logging
from datetime import datetime
from typing import Optional
import pandas as pd
import requests
from bs4 import BeautifulSoup
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent, str_date_to_timestamp
logger = logging.getLogger(__name__)
DAYS = [30, 90, 365]
@log_start_end(log=logger)
def get_altcoin_index(
period: int = 30,
start_date: str = "2010-01-01",
end_date: Optional[str] = None,
) -> pd.DataFrame:
"""Get altcoin index overtime
[Source: https://blockchaincenter.net]
Parameters
----------
period: int
Number of days {30,90,365} to check performance of coins and calculate the altcoin index.
E.g., 365 checks yearly performance, 90 will check seasonal performance (90 days),
30 will check monthly performance (30 days).
start_date : str
Initial date, format YYYY-MM-DD
end_date : Optional[str]
Final date, format YYYY-MM-DD
Returns
-------
pd.DataFrame
Date, Value (Altcoin Index)
"""
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
if period not in DAYS:
return pd.DataFrame()
soup = BeautifulSoup(
requests.get(
"https://www.blockchaincenter.net/altcoin-season-index/",
headers={"User-Agent": get_user_agent()},
).content,
"html.parser",
)
script = soup.select_one(f'script:-soup-contains("chartdata[{period}]")')
string = script.contents[0].strip()
initiator = string.index(f"chartdata[{period}] = ") + len(f"chartdata[{period}] = ")
terminator = string.index(";")
dict_data = json.loads(string[initiator:terminator])
df = pd.DataFrame(
zip(dict_data["labels"]["all"], dict_data["values"]["all"]),
columns=("Date", "Value"),
)
df["Date"] = pd.to_datetime(df["Date"])
df["Value"] = df["Value"].astype(int)
df = df.set_index("Date")
ts_start_date = str_date_to_timestamp(start_date)
ts_end_date = str_date_to_timestamp(end_date)
df = df[
(df.index > datetime.fromtimestamp(ts_start_date))
& (df.index < datetime.fromtimestamp(ts_end_date))
]
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/blockchaincenter_model.py | 0.865039 | 0.275914 | blockchaincenter_model.py | pypi |
import logging
import os
from typing import List, Optional
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from openbb_terminal.cryptocurrency.overview import loanscan_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal import config_terminal as cfg
from openbb_terminal.config_plot import PLOT_DPI
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_crypto_rates(
symbols: str,
platforms: str,
rate_type: str = "borrow",
limit: int = 10,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Displays crypto {borrow,supply} interest rates for cryptocurrencies across several platforms
[Source: https://loanscan.io/]
Parameters
----------
rate_type: str
Interest rate type: {borrow, supply}. Default: supply
symbols: str
Crypto separated by commas. Default: BTC,ETH,USDT,USDC
platforms: str
Platforms separated by commas. Default: BlockFi,Ledn,SwissBorg,Youhodler
limit: int
Number of records to show
export : str
Export dataframe data to csv,json,xlsx file
"""
df = loanscan_model.get_rates(rate_type=rate_type)
if df.empty:
console.print("\nError in loanscan request\n")
else:
valid_platforms = [
platform
for platform in platforms.lower().split(",")
if platform in df.index
]
df = df[symbols.upper().split(",")].loc[valid_platforms]
df = df.sort_values(df.columns[0], ascending=False, na_position="last")
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
df_non_null = pd.melt(df.reset_index(), id_vars=["index"]).dropna()
assets = df_non_null.variable.unique().tolist()
colors = iter(cfg.theme.get_colors(reverse=True))
for asset in assets:
width = df_non_null.loc[(df_non_null.variable == asset)]
# silence Setcopywarnings
pd.options.mode.chained_assignment = None
width["id"] = width["index"] + width["variable"]
ax.barh(
y=width["id"],
width=width.value * 100,
label=asset,
height=0.5,
color=next(colors),
)
ylabels = df_non_null["index"].values.tolist()
ax.set_yticks(np.arange(len(ylabels)))
ax.set_yticklabels(ylabels)
ax.set_xlabel("Rate (%)")
ax.set_ylabel("Platform")
ax.set_title(f"Cryptos {rate_type} interest rate")
cfg.theme.style_primary_axis(ax)
ax.tick_params(axis="y", labelsize=8)
ax.yaxis.set_label_position("left")
ax.yaxis.set_ticks_position("left")
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], loc="best")
if not external_axes:
cfg.theme.visualize_output()
df = df.fillna("-")
df = df.applymap(lambda x: str(round(100 * x, 2)) + "%" if x != "-" else x)
print_rich_table(
df.head(limit),
headers=list(df.columns),
index_name="Platform",
show_index=True,
title=f"Crypto {rate_type.capitalize()} Interest Rates",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cr",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/loanscan_view.py | 0.799364 | 0.311335 | loanscan_view.py | pypi |
import logging
import os
from datetime import datetime
from typing import List, Optional
import matplotlib
import numpy as np
import pandas as pd
from matplotlib import dates as mdates
from matplotlib import pyplot as plt
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import check_api_key
from openbb_terminal import config_plot as cfgPlot
from openbb_terminal.cryptocurrency.overview.glassnode_model import (
get_btc_rainbow,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
is_valid_axes_count,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_GLASSNODE_KEY"])
def display_btc_rainbow(
start_date: str = "2010-01-01",
end_date: Optional[str] = None,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Displays bitcoin rainbow chart
[Price data from source: https://glassnode.com]
[Inspired by: https://blockchaincenter.net]
Parameters
----------
start_date : str
Initial date, format YYYY-MM-DD
end_date : Optional[str]
Final date, format YYYY-MM-DD
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
df_data = get_btc_rainbow(start_date, end_date)
if df_data.empty:
return
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
d0 = datetime.strptime("2012-01-01", "%Y-%m-%d")
dend = datetime.strptime(end_date, "%Y-%m-%d")
x = range((df_data.index[0] - d0).days, (dend - d0).days + 1)
y0 = [10 ** ((2.90 * ln_x) - 19.463) for ln_x in [np.log(val + 1400) for val in x]]
y1 = [10 ** ((2.886 * ln_x) - 19.463) for ln_x in [np.log(val + 1375) for val in x]]
y2 = [10 ** ((2.872 * ln_x) - 19.463) for ln_x in [np.log(val + 1350) for val in x]]
y3 = [10 ** ((2.859 * ln_x) - 19.463) for ln_x in [np.log(val + 1320) for val in x]]
y4 = [
10 ** ((2.8445 * ln_x) - 19.463) for ln_x in [np.log(val + 1293) for val in x]
]
y5 = [
10 ** ((2.8295 * ln_x) - 19.463) for ln_x in [np.log(val + 1275) for val in x]
]
y6 = [10 ** ((2.815 * ln_x) - 19.463) for ln_x in [np.log(val + 1250) for val in x]]
y7 = [10 ** ((2.801 * ln_x) - 19.463) for ln_x in [np.log(val + 1225) for val in x]]
y8 = [10 ** ((2.788 * ln_x) - 19.463) for ln_x in [np.log(val + 1200) for val in x]]
x_dates = pd.date_range(df_data.index[0], dend, freq="d")
ax.fill_between(x_dates, y0, y1, color="red", alpha=0.7)
ax.fill_between(x_dates, y1, y2, color="orange", alpha=0.7)
ax.fill_between(x_dates, y2, y3, color="yellow", alpha=0.7)
ax.fill_between(x_dates, y3, y4, color="green", alpha=0.7)
ax.fill_between(x_dates, y4, y5, color="blue", alpha=0.7)
ax.fill_between(x_dates, y5, y6, color="violet", alpha=0.7)
ax.fill_between(x_dates, y6, y7, color="indigo", alpha=0.7)
ax.fill_between(x_dates, y7, y8, color="purple", alpha=0.7)
ax.semilogy(df_data.index, df_data["v"].values)
ax.set_xlim(df_data.index[0], dend)
ax.set_title("Bitcoin Rainbow Chart")
ax.set_ylabel("Price [USD]")
ax.legend(
[
"Bubble bursting imminent!!",
"SELL!",
"Everyone FOMO'ing....",
"Is this a bubble??",
"Still cheap",
"Accumulate",
"BUY!",
"Basically a Fire Sale",
"Bitcoin Price",
],
prop={"size": 8},
)
sample_dates = np.array(
[
datetime(2012, 11, 28),
datetime(2016, 7, 9),
datetime(2020, 5, 11),
datetime(2024, 4, 4),
]
)
sample_dates = mdates.date2num(sample_dates)
ax.vlines(x=sample_dates, ymin=0, ymax=max(y0), color="grey")
for i, x in enumerate(sample_dates):
if mdates.date2num(d0) < x < mdates.date2num(dend):
ax.text(x, 1, f"Halving {i+1}", rotation=-90, verticalalignment="center")
ax.minorticks_off()
ax.yaxis.set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, _: int(x) if x >= 1 else x)
)
ax.yaxis.set_major_locator(
matplotlib.ticker.LogLocator(base=100, subs=[1.0, 2.0, 5.0, 10.0])
)
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"btcrb",
df_data,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/glassnode_view.py | 0.804943 | 0.396419 | glassnode_view.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.