code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
__docformat__ = "numpy"
# pylint: disable=C0301, E1101
# pylint: disable=unsupported-assignment-operation
import logging
import re
from typing import List, Union
import numpy as np
import pandas as pd
from pycoingecko import CoinGeckoAPI
from openbb_terminal.cryptocurrency.dataframe_helpers import (
create_df_index,
lambda_long_number_format_with_type_check,
lambda_replace_underscores_in_column_names,
)
from openbb_terminal.cryptocurrency.discovery.pycoingecko_model import get_coins
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
# pylint: disable=unsupported-assignment-operation
HOLD_COINS = ["ethereum", "bitcoin"]
NEWS_FILTERS = ["Index", "Title", "Author", "Posted"]
CATEGORIES_FILTERS = [
"Rank",
"Name",
"Change_1h",
"Change_24h",
"Change_7d",
"Market_Cap",
"Volume_24h",
"Coins",
]
STABLES_FILTERS = [
"Rank",
"Name",
"Symbol",
"Price",
"Change_24h",
"Exchanges",
"Market_Cap",
"Change_30d",
]
PRODUCTS_FILTERS = [
"Rank",
"Platform",
"Identifier",
"Supply_Rate",
"Borrow_Rate",
]
PLATFORMS_FILTERS = ["Rank", "Name", "Category", "Centralized"]
EXCHANGES_FILTERS = [
"Rank",
"Trust_Score",
"Id",
"Name",
"Country",
"Year Established",
"Trade_Volume_24h_BTC",
]
EXRATES_FILTERS = ["Index", "Name", "Unit", "Value", "Type"]
INDEXES_FILTERS = ["Rank", "Name", "Id", "Market", "Last", "MultiAsset"]
DERIVATIVES_FILTERS = [
"Rank",
"Market",
"Symbol",
"Price",
"Pct_Change_24h",
"Contract_Type",
"Basis",
"Spread",
"Funding_Rate",
"Volume_24h",
]
COINS_COLUMNS = [
"symbol",
"name",
"current_price",
"market_cap",
"market_cap_rank",
"price_change_percentage_7d_in_currency",
"price_change_percentage_24h_in_currency",
"total_volume",
]
@log_start_end(log=logger)
def get_holdings_overview(endpoint: str = "bitcoin") -> List[Union[str, pd.DataFrame]]:
"""Returns public companies that holds ethereum or bitcoin [Source: CoinGecko]
Parameters
----------
endpoint : str
"bitcoin" or "ethereum"
Returns
-------
List[Union[str, pd.DataFrame]]
- str: Overall statistics
- pd.DataFrame: Companies holding crypto
"""
cg = CoinGeckoAPI()
data = cg.get_companies_public_treasury_by_coin_id(coin_id=endpoint)
stats_str = f"""{len(data["companies"])} companies hold a total of {lambda_long_number_format_with_type_check(data["total_holdings"])} {endpoint} ({data["market_cap_dominance"]}% of market cap dominance) with the current value of {lambda_long_number_format_with_type_check(int(data["total_value_usd"]))} USD dollars""" # noqa
df = pd.json_normalize(data, record_path=["companies"])
df.columns = list(
map(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x,
df.columns,
)
)
return [stats_str, df]
SORT_VALUES = [
"market_cap_desc",
"market_cap_asc",
"name_desc",
"name_asc",
"market_cap_change_24h_desc",
"market_cap_change_24h_asc",
]
def lambda_coin_formatter(n):
# TODO: can be improved
coins = []
re_str = "small/(.*)(.jpg|.png|.JPG|.PNG)"
for coin in n:
if re.search(re_str, coin):
coin_stripped = re.search(re_str, coin).group(1)
coins.append(coin_stripped)
return ",".join(coins)
@log_start_end(log=logger)
def get_top_crypto_categories(sort_filter: str = SORT_VALUES[0]) -> pd.DataFrame:
"""Returns top crypto categories [Source: CoinGecko]
Parameters
----------
sort_filter : str
Can be one of - "market_cap_desc", "market_cap_asc", "name_desc", "name_asc",
"market_cap_change_24h_desc", "market_cap_change_24h_asc"
Returns
-------
pd.DataFrame
Rank, Name, Change_1h, Change_7d, Market_Cap, Volume_24h,Coins, Url
"""
if sort_filter in SORT_VALUES:
client = CoinGeckoAPI()
data = client.get_coins_categories()
df = pd.DataFrame(data)
del df["id"]
del df["content"]
del df["updated_at"]
df["top_3_coins"] = df["top_3_coins"].apply(lambda_coin_formatter)
df.columns = [
lambda_replace_underscores_in_column_names(col)
if isinstance(col, str)
else col
for col in df.columns
]
return df
return pd.DataFrame()
# TODO: add string with overview
@log_start_end(log=logger)
def get_stable_coins(
limit: int = 15, sortby: str = "Market_Cap_[$]", ascend: bool = False
) -> pd.DataFrame:
"""Returns top stable coins [Source: CoinGecko]
Parameters
----------
limit: int
How many rows to show
sortby: str
Key by which to sort data, default is Market_Cap_[$]
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
Dataframe with stable coins data
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.crypto.ov.stables(sortby="Volume_[$]", ascend=True, limit=10)
"""
df = get_coins(limit=limit, category="stablecoins")
df = df[COINS_COLUMNS]
df = df.set_axis(
[
"Symbol",
"Name",
"Price_[$]",
"Market_Cap_[$]",
"Market_Cap_Rank",
"Change_7d_[%]",
"Change_24h_[%]",
"Volume_[$]",
],
axis=1,
copy=False,
)
total_market_cap = int(df["Market_Cap_[$]"].sum())
df[f"Percentage_[%]_of_top_{limit}"] = (
df["Market_Cap_[$]"] / total_market_cap
) * 100
sortby = sortby.replace(" ", "_")
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_exchanges(sortby: str = "Rank", ascend: bool = True) -> pd.DataFrame:
"""Get list of top exchanges from CoinGecko API [Source: CoinGecko]
Parameters
----------
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data descending
Returns
-------
pd.DataFrame
Trust_Score, Id, Name, Country, Year_Established, Trade_Volume_24h_BTC, Url
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_exchanges_list(per_page=250))
df.replace({float(np.NaN): None}, inplace=True)
df = df[
[
"trust_score",
"id",
"name",
"country",
"year_established",
"trade_volume_24h_btc",
"url",
]
]
df.columns = [
"Trust_Score",
"Id",
"Name",
"Country",
"Year_Established",
"Trade_Volume_24h_BTC",
"Url",
]
create_df_index(df, "Rank")
if sortby == "Rank":
df = df.sort_values(by=sortby, ascending=not ascend)
else:
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_financial_platforms(sortby: str = "Name", ascend: bool = True) -> pd.DataFrame:
"""Get list of financial platforms from CoinGecko API [Source: CoinGecko]
Parameters
----------
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
Rank, Name, Category, Centralized, Url
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_finance_platforms())
df.drop("facts", axis=1, inplace=True)
create_df_index(df, "rank")
df.columns = ["Rank", "Name", "Category", "Centralized", "Url"]
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_finance_products(sortby: str = "Name", ascend: bool = True) -> pd.DataFrame:
"""Get list of financial products from CoinGecko API
Parameters
----------
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
Rank, Platform, Identifier, Supply_Rate, Borrow_Rate
"""
client = CoinGeckoAPI()
df = pd.DataFrame(
client.get_finance_products(per_page=250),
columns=[
"platform",
"identifier",
"supply_rate_percentage",
"borrow_rate_percentage",
],
)
df.columns = ["Platform", "Identifier", "Supply_Rate", "Borrow_Rate"]
create_df_index(df, "Rank")
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_indexes(sortby: str = "Name", ascend: bool = True) -> pd.DataFrame:
"""Get list of crypto indexes from CoinGecko API [Source: CoinGecko]
Parameters
----------
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
Name, Id, Market, Last, MultiAsset
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_indexes(per_page=250))
df.columns = ["Name", "Id", "Market", "Last", "MultiAsset"]
create_df_index(df, "Rank")
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_derivatives(sortby: str = "Rank", ascend: bool = False) -> pd.DataFrame:
"""Get list of crypto derivatives from CoinGecko API [Source: CoinGecko]
Parameters
----------
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data descending
Returns
-------
pd.DataFrame
Rank, Market, Symbol, Price, Pct_Change_24h, Contract_Type, Basis, Spread,
Funding_Rate, Volume_24h,
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_derivatives(include_tickers="unexpired"))
df.drop(
["index", "last_traded_at", "expired_at", "index_id", "open_interest"],
axis=1,
inplace=True,
)
df.rename(columns={"price_percentage_change_24h": "pct_change_24h"}, inplace=True)
create_df_index(df, "rank")
df["price"] = df["price"].apply(
lambda x: "" if not x else float(x.strip("$").replace(",", ""))
)
df.columns = [
"Rank",
"Market",
"Symbol",
"Price",
"Pct_Change_24h",
"Contract_Type",
"Basis",
"Spread",
"Funding_Rate",
"Volume_24h",
]
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_exchange_rates(sortby: str = "Name", ascend: bool = False) -> pd.DataFrame:
"""Get list of crypto, fiats, commodity exchange rates from CoinGecko API [Source: CoinGecko]
Parameters
----------
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
Index, Name, Unit, Value, Type
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_exchange_rates()["rates"]).T.reset_index()
df.drop("index", axis=1, inplace=True)
create_df_index(df, "index")
df.columns = ["Index", "Name", "Unit", "Value", "Type"]
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_global_info() -> pd.DataFrame:
"""Get global statistics about crypto from CoinGecko API like:
- market cap change
- number of markets
- icos
- number of active crypto
[Source: CoinGecko]
Returns
-------
pd.DataFrame
Metric, Value
"""
client = CoinGeckoAPI()
results = client.get_global()
total_mcap = results.pop("market_cap_percentage")
btc, eth = total_mcap.get("btc"), total_mcap.get("eth")
for key in ["total_market_cap", "total_volume", "updated_at"]:
del results[key]
results["btc_market_cap_in_pct"] = btc
results["eth_market_cap_in_pct"] = eth
results["altcoin_market_cap_in_pct"] = 100 - (float(eth) + float(btc))
df = pd.Series(results).reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
return df
@log_start_end(log=logger)
def get_global_markets_info() -> pd.DataFrame:
"""Get global statistics about crypto markets from CoinGecko API like:
Market_Cap, Volume, Market_Cap_Percentage
[Source: CoinGecko]
Returns
-------
pd.DataFrame
Market_Cap, Volume, Market_Cap_Percentage
"""
columns = [
"Market_Cap",
"Volume",
"Market_Cap_Percentage",
]
data = []
client = CoinGeckoAPI()
results = client.get_global()
for key in columns:
data.append(results.get(key))
df = pd.DataFrame(data).T
df.columns = columns
df.replace({float("nan"): None}, inplace=True)
return df.reset_index()
@log_start_end(log=logger)
def get_global_defi_info() -> pd.DataFrame:
"""Get global statistics about Decentralized Finances [Source: CoinGecko]
Returns
-------
pd.DataFrame
Metric, Value
"""
client = CoinGeckoAPI()
results = client.get_global_decentralized_finance_defi()
for key, value in results.items():
try:
results[key] = round(float(value), 4)
except (ValueError, TypeError):
pass
df = pd.Series(results).reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/pycoingecko_model.py | 0.704262 | 0.254127 | pycoingecko_model.py | pypi |
import logging
from typing import List, Union
import pandas as pd
import requests
from bs4 import BeautifulSoup
from requests.adapters import HTTPAdapter, RetryError
from urllib3.util.retry import Retry
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
HACKS_COLUMNS = ["Platform", "Date", "Amount [$]", "Audit", "Slug", "URL"]
@log_start_end(log=logger)
def _retry_session(
url: str, retries: int = 3, backoff_factor: float = 1.0
) -> requests.Session:
"""Helper methods that retries to make request
Parameters
----------
url: str
Url to mount a session
retries: int
How many retries
backoff_factor: float
Backoff schema - time periods between retry
Returns
-------
requests.Session
Mounted session
"""
session = requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
status_forcelist=[500, 502, 503, 504],
backoff_factor=backoff_factor,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount(url, adapter)
return session
@log_start_end(log=logger)
def _make_request(url: str) -> Union[BeautifulSoup, None]:
"""Helper method to scrap
Parameters
----------
url : str
url to scrape
Returns
-------
Union[BeautifulSoup, None]
BeautifulSoup object or None
"""
headers = {"User-Agent": get_user_agent()}
session = _retry_session("https://www.coingecko.com")
try:
req = session.get(url, headers=headers, timeout=5)
except Exception as error:
logger.exception(str(error))
console.print(error)
raise RetryError(
"Connection error. Couldn't connect to CoinGecko and scrape the data. "
"Please visit CoinGecko site, and check if it's not under maintenance"
) from error
if req.status_code == 404:
return None
if req.status_code >= 400:
raise Exception(
f"Couldn't connect to {url}. Status code: {req.status_code}. Reason: {req.reason}"
)
return BeautifulSoup(req.text, features="lxml")
@log_start_end(log=logger)
def get_crypto_hacks(sortby: str = "Platform", ascend: bool = False) -> pd.DataFrame:
"""Get major crypto-related hacks
[Source: https://rekt.news]
Parameters
----------
sortby: str
Key by which to sort data {Platform,Date,Amount [$],Audit,Slug,URL}
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
Hacks with columns {Platform,Date,Amount [$],Audited,Slug,URL}
"""
soup = _make_request("https://rekt.news/leaderboard")
if soup:
rekt_list = soup.find("ol", {"class": "leaderboard-content"}).find_all("li")
df = pd.DataFrame(columns=HACKS_COLUMNS)
for item in rekt_list:
a = item.find("a", href=True)
audit = item.find("span", {"class": "leaderboard-audit"}).text
details = item.find("div", {"class": "leaderboard-row-details"}).text.split(
"|"
)
url = a["href"]
title = a.text
amount = int(details[0][1:].replace(",", ""))
date = details[1].replace(" ", "")
df.loc[len(df.index)] = [
title,
date,
amount,
audit,
url.replace("/", ""),
f"https://rekt.news{url}",
]
df["Date"] = pd.to_datetime(df["Date"])
if sortby in HACKS_COLUMNS:
df = df.sort_values(by=sortby, ascending=ascend)
return df
return pd.DataFrame()
@log_start_end(log=logger)
def get_crypto_hack(slug: str) -> Union[str, None]:
"""Get crypto hack
[Source: https://rekt.news]
Parameters
----------
slug: str
slug of crypto hack
Returns
-------
Union[str, None]
Crypto hack
"""
url = f"https://rekt.news/{slug}"
soup = _make_request(url)
if not soup:
console.print(f'Slug "{slug}" not found\n')
return None
title = soup.find("h1", {"class": "post-title"}).text
date = soup.find("time").text
content = (
soup.find("section", {"class": "post-content"})
.get_text("\n")
.replace("\r\n,", ", ")
.replace("\n,", ", ")
.replace("\r\n.", ".\n\t")
.replace("\n.", ".\n\t")
.replace("\r\n ", " ")
.replace("\n ", " ")
).split("""SUBSCRIBE""")[0]
final_str = f"""
{title}
{date}
{content}
Detailed history in {url}
"""
return final_str
@log_start_end(log=logger)
def get_crypto_hack_slugs() -> List[str]:
"""Get all crypto hack slugs
[Source: https://rekt.news]
Returns
-------
List[str]
List with slugs
"""
soup = _make_request("https://rekt.news/leaderboard")
href_list = []
if soup:
rekt_list = soup.find("ol", {"class": "leaderboard-content"}).find_all("li")
for item in rekt_list:
a = item.find("a", href=True)["href"].replace("/", "")
href_list.append(a)
return href_list
return href_list | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/rekt_model.py | 0.66356 | 0.190894 | rekt_model.py | pypi |
__docformat__ = "numpy"
# pylint: disable=R0904, C0302, W0622
import argparse
import difflib
import logging
from datetime import datetime, timedelta
from typing import List
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal import feature_flags as obbff
from openbb_terminal.cryptocurrency.overview.glassnode_view import (
display_btc_rainbow,
)
from openbb_terminal.cryptocurrency.overview import (
blockchaincenter_view,
coinbase_model,
coinbase_view,
coinpaprika_model,
coinpaprika_view,
cryptopanic_model,
cryptopanic_view,
loanscan_model,
loanscan_view,
pycoingecko_model,
pycoingecko_view,
rekt_model,
rekt_view,
withdrawalfees_model,
withdrawalfees_view,
tokenterminal_model,
tokenterminal_view,
)
from openbb_terminal.cryptocurrency.discovery.pycoingecko_model import (
get_categories_keys,
)
from openbb_terminal.cryptocurrency.overview.blockchaincenter_model import DAYS
from openbb_terminal.cryptocurrency.overview.coinpaprika_model import (
get_all_contract_platforms,
)
from openbb_terminal.cryptocurrency.overview.coinpaprika_view import CURRENCIES
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_BOTH_RAW_DATA_AND_FIGURES,
EXPORT_ONLY_FIGURES_ALLOWED,
EXPORT_ONLY_RAW_DATA_ALLOWED,
check_positive,
valid_date,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.rich_config import console, MenuText
logger = logging.getLogger(__name__)
class OverviewController(BaseController):
"""Overview Controller class"""
CHOICES_COMMANDS = [
"hm",
"global",
"defi",
"stables",
"exchanges",
"exrates",
"indexes",
"derivatives",
"categories",
"hold",
"markets",
"exmarkets",
"info",
"platforms",
"contracts",
"pairs",
"news",
"wf",
"ewf",
"wfpe",
"btcrb",
"altindex",
"ch",
"cr",
"fun",
]
PATH = "/crypto/ov/"
CHOICES_GENERATION = True
def __init__(self, queue: List[str] = None):
"""Constructor"""
super().__init__(queue)
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
choices["wfpe"].update(
{c: {} for c in withdrawalfees_model.POSSIBLE_CRYPTOS}
)
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
mt = MenuText("crypto/ov/", 105)
mt.add_cmd("global")
mt.add_cmd("defi")
mt.add_cmd("stables")
mt.add_cmd("exchanges")
mt.add_cmd("exrates")
mt.add_cmd("indexes")
mt.add_cmd("derivatives")
mt.add_cmd("categories")
mt.add_cmd("hold")
mt.add_cmd("hm")
mt.add_cmd("info")
mt.add_cmd("markets")
mt.add_cmd("exmarkets")
mt.add_cmd("platforms")
mt.add_cmd("contracts")
mt.add_cmd("pairs")
mt.add_cmd("news")
mt.add_cmd("wf")
mt.add_cmd("ewf")
mt.add_cmd("wfpe")
mt.add_cmd("altindex")
mt.add_cmd("btcrb")
mt.add_cmd("ch")
mt.add_cmd("cr")
mt.add_cmd("fun")
console.print(text=mt.menu_text, menu="Cryptocurrency - Overview")
@log_start_end(log=logger)
def call_hm(self, other_args):
"""Process hm command"""
parser = argparse.ArgumentParser(
prog="hm",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Display cryptocurrencies heatmap with daily percentage change [Source: https://coingecko.com]
Accepts --category or -c to display only coins of a certain category
(default no category to display all coins ranked by market cap).
You can look on only top N number of records with --limit.
""",
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=int,
help="Display N items",
default=10,
)
parser.add_argument(
"-c",
"--category",
default="",
dest="category",
help="Category (e.g., stablecoins). Empty for no category",
choices=get_categories_keys(),
metavar="CATEGORY",
)
if other_args and not other_args[0][0] == "-":
other_args.insert(0, "-c")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_FIGURES_ALLOWED
)
if ns_parser:
pycoingecko_view.display_crypto_heatmap(
category=ns_parser.category,
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_fun(self, other_args):
"""Process fun command"""
parser = argparse.ArgumentParser(
prog="fun",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Display fundamental metrics overview [Source: Token Terminal]""",
)
parser.add_argument(
"-m",
"--metric",
required=True,
choices=tokenterminal_model.METRICS,
dest="metric",
help="Choose metric of interest",
)
parser.add_argument(
"-c",
"--category",
default="",
choices=tokenterminal_model.CATEGORIES,
dest="category",
help="Choose category of interest",
)
parser.add_argument(
"-t",
"--timeline",
default="24h",
choices=tokenterminal_model.TIMELINES,
dest="timeline",
help="Choose timeline of interest",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=int,
help="Display N items",
default=10,
)
if other_args and not other_args[0][0] == "-":
other_args.insert(0, "-m")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_FIGURES_ALLOWED
)
if ns_parser:
tokenterminal_view.display_fundamental_metrics(
metric=ns_parser.metric,
category=ns_parser.category,
timeline=ns_parser.timeline,
ascend=ns_parser.reverse,
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_ch(self, other_args):
"""Process ch command"""
parser = argparse.ArgumentParser(
prog="ch",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Display list of major crypto-related hacks [Source: https://rekt.news]
Can be sorted by {Platform,Date,Amount [$],Audit,Slug,URL} with --sortby
and reverse the display order with --reverse
Show only N elements with --limit
Accepts --slug or -s to check individual crypto hack (e.g., -s polynetwork-rekt)
""",
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=int,
help="Display N items",
default=15,
)
parser.add_argument(
"--sortby",
dest="sortby",
type=str,
help="Sort by given column. Default: Amount [$]",
default="Amount [$]",
nargs="+",
choices=rekt_model.HACKS_COLUMNS,
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
parser.add_argument(
"-s",
"--slug",
dest="slug",
type=str,
help="Slug to check crypto hack (e.g., polynetwork-rekt)",
default="",
choices=rekt_model.get_crypto_hack_slugs(),
metavar="SORTBY",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-s")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
rekt_view.display_crypto_hacks(
slug=ns_parser.slug,
limit=ns_parser.limit,
export=ns_parser.export,
sortby=" ".join(ns_parser.sortby),
ascend=not ns_parser.reverse,
)
@log_start_end(log=logger)
def call_btcrb(self, other_args: List[str]):
"""Process btcrb command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="btcrb",
description="""Display bitcoin rainbow chart overtime including halvings.
[Price data from source: https://glassnode.com]
[Inspired by: https://blockchaincenter.net]""",
)
parser.add_argument(
"-s",
"--since",
dest="since",
type=valid_date,
help="Initial date. Default is initial BTC date: 2010-01-01",
default=datetime(2010, 1, 1).strftime("%Y-%m-%d"),
)
parser.add_argument(
"-u",
"--until",
dest="until",
type=valid_date,
help="Final date. Default is current date",
default=datetime.now().strftime("%Y-%m-%d"),
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
display_btc_rainbow(
start_date=ns_parser.since.strftime("%Y-%m-%d"),
end_date=ns_parser.until.strftime("%Y-%m-%d"),
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_altindex(self, other_args: List[str]):
"""Process altindex command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="altindex",
description="""Display altcoin index overtime.
If 75% of the Top 50 coins performed better than Bitcoin over periods of time
(30, 90 or 365 days) it is Altcoin Season. Excluded from the Top 50 are
Stablecoins (Tether, DAI…) and asset backed tokens (WBTC, stETH, cLINK,…)
[Source: https://blockchaincenter.net]
""",
)
parser.add_argument(
"-p",
"--period",
type=int,
help="Period of time to check if how altcoins have performed against btc (30, 90, 365)",
dest="period",
default=365,
choices=DAYS,
)
parser.add_argument(
"-s",
"--since",
dest="since",
type=valid_date,
help="Start date (default: 1 year before, e.g., 2021-01-01)",
default=(datetime.now() - timedelta(days=365)).strftime("%Y-%m-%d"),
)
parser.add_argument(
"-u",
"--until",
dest="until",
type=valid_date,
help="Final date. Default is current date",
default=datetime.now().strftime("%Y-%m-%d"),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-p")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
blockchaincenter_view.display_altcoin_index(
start_date=ns_parser.since.strftime("%Y-%m-%d"),
end_date=ns_parser.until.strftime("%Y-%m-%d"),
period=ns_parser.period,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_wf(self, other_args: List[str]):
"""Process wf command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="wf",
description="""
Display top coins withdrawal fees
[Source: https://withdrawalfees.com/]
""",
)
parser.add_argument(
"-l",
"--limit",
type=int,
help="Limit number of coins to display withdrawal fees. Default 10",
dest="limit",
default=10,
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
withdrawalfees_view.display_overall_withdrawal_fees(
limit=ns_parser.limit, export=ns_parser.export
)
@log_start_end(log=logger)
def call_ewf(self, other_args: List[str]):
"""Process ewf command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ewf",
description="""
Display exchange withdrawal fees
[Source: https://withdrawalfees.com/]
""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
withdrawalfees_view.display_overall_exchange_withdrawal_fees(
export=ns_parser.export
)
@log_start_end(log=logger)
def call_wfpe(self, other_args: List[str]):
"""Process wfpe command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="wfpe",
description="""
Coin withdrawal fees per exchange
[Source: https://withdrawalfees.com/]
""",
)
parser.add_argument(
"-c",
"--coin",
default="bitcoin",
type=str,
dest="coin",
help="Coin to check withdrawal fees in long format (e.g., bitcoin, ethereum)",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-c")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if ns_parser.coin:
if ns_parser.coin in withdrawalfees_model.POSSIBLE_CRYPTOS:
withdrawalfees_view.display_crypto_withdrawal_fees(
symbol=ns_parser.coin, export=ns_parser.export
)
else:
console.print(f"Coin '{ns_parser.coin}' does not exist.")
similar_cmd = difflib.get_close_matches(
ns_parser.coin,
withdrawalfees_model.POSSIBLE_CRYPTOS,
n=1,
cutoff=0.75,
)
if similar_cmd:
console.print(f"Replacing by '{similar_cmd[0]}'")
withdrawalfees_view.display_crypto_withdrawal_fees(
symbol=similar_cmd[0], export=ns_parser.export
)
else:
similar_cmd = difflib.get_close_matches(
ns_parser.coin,
withdrawalfees_model.POSSIBLE_CRYPTOS,
n=1,
cutoff=0.5,
)
if similar_cmd:
console.print(f"Did you mean '{similar_cmd[0]}'?")
else:
console.print(
f"Couldn't find any coin with provided name: {ns_parser.coin}. "
f"Please choose one from list: {withdrawalfees_model.POSSIBLE_CRYPTOS}\n"
)
@log_start_end(log=logger)
def call_hold(self, other_args):
"""Process hold command"""
parser = argparse.ArgumentParser(
prog="hold",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""
Shows overview of public companies that holds ethereum or bitcoin.
You can find there most important metrics like:
Total Bitcoin Holdings, Total Value (USD), Public Companies Bitcoin Dominance, Companies
""",
)
parser.add_argument(
"-c",
"--coin",
dest="coin",
type=str,
help="companies with ethereum or bitcoin",
default="bitcoin",
choices=pycoingecko_model.HOLD_COINS,
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=check_positive,
help="display N number of records",
default=5,
)
parser.add_argument(
"--bar",
action="store_true",
help="Flag to show bar chart",
dest="bar",
default=False,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-c")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
pycoingecko_view.display_holdings_overview(
symbol=ns_parser.coin,
export=ns_parser.export,
show_bar=ns_parser.bar,
limit=ns_parser.limit,
)
@log_start_end(log=logger)
def call_categories(self, other_args):
"""Process top_categories command"""
parser = argparse.ArgumentParser(
prog="categories",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Shows top cryptocurrency categories by market capitalization. It includes categories like:
stablecoins, defi, solana ecosystem, polkadot ecosystem and many others.
You can sort by {}, using --sortby parameter""",
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=check_positive,
help="display N number of records",
default=15,
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
type=str,
help="Sort by given column. Default: market_cap_desc",
default="Market_Cap",
choices=[
"Name",
"Market_Cap",
"Market_Cap_Change_24H",
"Top_3_Coins",
"Volume_24H",
],
)
parser.add_argument(
"--pie",
action="store_true",
help="Flag to show pie chart",
dest="pie",
default=False,
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
pycoingecko_view.display_categories(
limit=ns_parser.limit,
export=ns_parser.export,
sortby=ns_parser.sortby,
pie=ns_parser.pie,
)
# TODO: solve sort (similar to losers from discovery)
@log_start_end(log=logger)
def call_stables(self, other_args):
"""Process stables command"""
parser = argparse.ArgumentParser(
prog="stables",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Shows stablecoins by market capitalization.
Stablecoins are cryptocurrencies that attempt to peg their market value to some external reference
like the U.S. dollar or to a commodity's price such as gold.
You can display only N number of coins with --limit parameter.
You can sort data by {} with --sortby""",
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=check_positive,
help="display N number records",
default=15,
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
type=str,
help="Sort by given column. Default: market_cap",
default="Market_Cap_[$]",
choices=[
"Symbol",
"Name",
"Price_[$]",
"Market_Cap_[$]",
"Market_Cap_Rank",
"Change_7d_[%]",
"Change_24h_[%]",
"Volume_[$]",
],
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
parser.add_argument(
"--pie",
action="store_true",
help="Flag to show pie chart",
dest="pie",
default=False,
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
pycoingecko_view.display_stablecoins(
limit=ns_parser.limit,
export=ns_parser.export,
sortby=ns_parser.sortby,
ascend=ns_parser.reverse,
pie=ns_parser.pie,
)
@log_start_end(log=logger)
def call_cr(self, other_args):
"""Process cr command"""
parser = argparse.ArgumentParser(
prog="cr",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Displays crypto {borrow,supply} interest rates for cryptocurrencies across several platforms.
You can select rate type with --type {borrow,supply}
You can display only N number of platforms with --limit parameter.""",
)
parser.add_argument(
"-t",
"--type",
dest="type",
type=str,
help="Select interest rate type",
default="supply",
choices=["borrow", "supply"],
)
parser.add_argument(
"-c",
"--cryptocurrrencies",
dest="cryptos",
type=loanscan_model.check_valid_coin,
help=f"""Cryptocurrencies to search interest rates for separated by comma.
Default: BTC,ETH,USDT,USDC. Options: {",".join(loanscan_model.CRYPTOS)}""",
default="BTC,ETH,USDT,USDC",
choices=loanscan_model.CRYPTOS,
metavar="CRYPTOS",
)
parser.add_argument(
"-p",
"--platforms",
dest="platforms",
type=loanscan_model.check_valid_platform,
help=f"""Platforms to search interest rates in separated by comma.
Default: BlockFi,Ledn,SwissBorg,Youhodler. Options: {",".join(loanscan_model.PLATFORMS)}""",
default="BlockFi,Ledn,SwissBorg,Youhodler",
choices=loanscan_model.PLATFORMS,
metavar="PLATFORMS",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-t")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED, limit=10
)
if ns_parser:
loanscan_view.display_crypto_rates(
rate_type=ns_parser.type,
symbols=ns_parser.cryptos,
platforms=ns_parser.platforms,
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_exchanges(self, other_args):
"""Process exchanges command"""
filters = (
pycoingecko_model.EXCHANGES_FILTERS + coinpaprika_model.EXCHANGES_FILTERS
)
parser = argparse.ArgumentParser(
prog="exchanges",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Shows Top Crypto Exchanges
You can display only N number exchanges with --limit parameter.
You can sort data by Trust_Score, Id, Name, Country, Year_Established, Trade_Volume_24h_BTC with --sortby
Or you can sort data by 'name', 'currencies', 'markets', 'fiats', 'confidence',
'volume_24h', 'volume_7d', 'volume_30d', 'sessions_per_month'
if you are using the alternative source CoinPaprika
and also with --reverse flag to sort ascending.
Flag --urls will display urls.
Displays: Trust_Score, Id, Name, Country, Year_Established, Trade_Volume_24h_BTC""",
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=check_positive,
help="display N number records",
default=15,
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
type=str,
help="Sort by given column. Default: Rank",
default="Rank",
choices=filters,
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
parser.add_argument(
"-u",
"--urls",
dest="urls",
action="store_true",
help="Flag to add a url column. Works only with CoinGecko source",
default=False,
)
parser.add_argument(
"--vs",
help="Quoted currency. Default: USD. Works only with CoinPaprika source",
dest="vs",
default="USD",
type=str,
choices=CURRENCIES,
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if ns_parser.source == "CoinGecko":
pycoingecko_view.display_exchanges(
limit=ns_parser.limit,
export=ns_parser.export,
sortby=ns_parser.sortby,
ascend=ns_parser.reverse,
links=ns_parser.urls,
)
elif ns_parser.source == "CoinPaprika":
coinpaprika_view.display_all_exchanges(
symbol=ns_parser.vs,
limit=ns_parser.limit,
ascend=ns_parser.reverse,
sortby=ns_parser.sortby,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_exrates(self, other_args):
"""Process exchange_rates command"""
parser = argparse.ArgumentParser(
prog="exrates",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""
Shows list of crypto, fiats, commodity exchange rates from CoinGecko
You can look on only N number of records with --limit,
You can sort by Index, Name, Unit, Value, Type, and also use --reverse flag to sort descending.""",
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=check_positive,
help="display N number records",
default=15,
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
type=str,
help="Sort by given column. Default: Index",
default="Index",
choices=pycoingecko_model.EXRATES_FILTERS,
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in ascending order by default. "
"Reverse flag will sort it in an descending way. "
"Only works when raw data is displayed."
),
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
pycoingecko_view.display_exchange_rates(
sortby=ns_parser.sortby,
limit=ns_parser.limit,
ascend=not ns_parser.reverse,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_indexes(self, other_args):
"""Process indexes command"""
parser = argparse.ArgumentParser(
prog="indexes",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Shows list of crypto indexes from CoinGecko.
Each crypto index is made up of a selection of cryptocurrencies,
grouped together and weighted by market cap.
You can display only N number of indexes with --limit parameter.
You can sort data by Rank, Name, Id, Market, Last, MultiAsset with --sortby
and also with --reverse flag to sort descending.
Displays: Rank, Name, Id, Market, Last, MultiAsset
""",
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=check_positive,
help="display N number records",
default=15,
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
type=str,
help="Sort by given column. Default: Rank",
default="Rank",
choices=pycoingecko_model.INDEXES_FILTERS,
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in ascending order by default. "
"Reverse flag will sort it in an descending way. "
"Only works when raw data is displayed."
),
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
pycoingecko_view.display_indexes(
limit=ns_parser.limit,
sortby=ns_parser.sortby,
ascend=not ns_parser.reverse,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_derivatives(self, other_args):
"""Process derivatives command"""
parser = argparse.ArgumentParser(
prog="derivatives",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Shows list of crypto derivatives from CoinGecko
Crypto derivatives are secondary contracts or financial tools that derive their value from a primary
underlying asset. In this case, the primary asset would be a cryptocurrency such as Bitcoin.
The most popular crypto derivatives are crypto futures, crypto options, and perpetual contracts.
You can look on only N number of records with --limit,
You can sort by Rank, Market, Symbol, Price, Pct_Change_24h, Contract_Type, Basis, Spread, Funding_Rate,
Volume_24h with by and also with --reverse flag to set it to sort descending.
Displays:
Rank, Market, Symbol, Price, Pct_Change_24h, Contract_Type, Basis, Spread, Funding_Rate, Volume_24h
""",
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=check_positive,
help="display N number records",
default=15,
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
type=str,
help="Sort by given column. Default: Rank",
default="Rank",
choices=pycoingecko_model.DERIVATIVES_FILTERS,
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in ascending order by default. "
"Reverse flag will sort it in an descending way. "
"Only works when raw data is displayed."
),
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
pycoingecko_view.display_derivatives(
limit=ns_parser.limit,
sortby=ns_parser.sortby,
ascend=not ns_parser.reverse,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_global(self, other_args):
"""Process global command"""
parser = argparse.ArgumentParser(
prog="global",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Shows global statistics about Crypto Market""",
)
parser.add_argument(
"--pie",
action="store_true",
help="Flag to show pie chart with market cap distribution. Works only with CoinGecko source",
dest="pie",
default=False,
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if ns_parser.source == "CoinGecko":
pycoingecko_view.display_global_market_info(
export=ns_parser.export, pie=ns_parser.pie
)
elif ns_parser.source == "CoinPaprika":
coinpaprika_view.display_global_market(export=ns_parser.export)
@log_start_end(log=logger)
def call_defi(self, other_args):
"""Process defi command"""
parser = argparse.ArgumentParser(
prog="defi",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Shows global DeFi statistics
DeFi or Decentralized Finance refers to financial services that are built
on top of distributed networks with no central intermediaries.
Displays metrics like:
Market Cap, Trading Volume, Defi Dominance, Top Coins...""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
pycoingecko_view.display_global_defi_info(export=ns_parser.export)
@log_start_end(log=logger)
def call_markets(self, other_args):
"""Process markets command"""
parser = argparse.ArgumentParser(
prog="markets",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Show market related (price, supply, volume) coin information for all coins on CoinPaprika.
You can display only N number of coins with --limit parameter.
You can sort data by rank, name, symbol, price, volume_24h, mcap_change_24h, pct_change_1h, pct_change_24h,
ath_price, pct_from_ath, --sortby parameter and also with --reverse flag to sort ascending.
Displays:
rank, name, symbol, price, volume_24h, mcap_change_24h,
pct_change_1h, pct_change_24h, ath_price, pct_from_ath,
""",
)
parser.add_argument(
"--vs",
help="Quoted currency. Default USD",
dest="vs",
default="USD",
type=str,
choices=CURRENCIES,
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=check_positive,
help="display N number records",
default=15,
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
type=str,
help="Sort by given column. Default: rank",
default="rank",
choices=coinpaprika_model.MARKETS_FILTERS,
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
coinpaprika_view.display_all_coins_market_info(
symbol=ns_parser.vs,
limit=ns_parser.limit,
ascend=ns_parser.reverse,
export=ns_parser.export,
sortby=ns_parser.sortby,
)
@log_start_end(log=logger)
def call_exmarkets(self, other_args):
"""Process exmarkets command"""
parser = argparse.ArgumentParser(
prog="exmarkets",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Get all exchange markets found for given exchange
You can display only N number of records with --limit parameter.
You can sort data by pair, base_currency_name, quote_currency_name, market_url, category,
reported_volume_24h_share, trust_score --sortby parameter and also with --reverse flag to sort ascending.
You can use additional flag --urls to see urls for each market
Displays:
exchange_id, pair, base_currency_name, quote_currency_name, market_url,
category, reported_volume_24h_share, trust_score,""",
)
parser.add_argument(
"-e",
"--exchange",
help="Identifier of exchange e.g for Binance Exchange -> binance",
dest="exchange",
default="binance",
type=str,
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=check_positive,
help="display N number records",
default=10,
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
type=str,
help="Sort by given column. Default: reported_volume_24h_share",
default="reported_volume_24h_share",
choices=coinpaprika_model.EXMARKETS_FILTERS,
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
parser.add_argument(
"-u",
"--urls",
dest="urls",
action="store_true",
help="""Flag to show urls. If you will use that flag you will see only:
exchange, pair, trust_score, market_url columns""",
default=False,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-e")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
coinpaprika_view.display_exchange_markets(
exchange=ns_parser.exchange,
limit=ns_parser.limit,
export=ns_parser.export,
sortby=ns_parser.sortby,
ascend=ns_parser.reverse,
links=ns_parser.urls,
)
@log_start_end(log=logger)
def call_info(self, other_args):
"""Process info command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="info",
description="""Show basic coin information for all coins from CoinPaprika API
You can display only N number of coins with --limit parameter.
You can sort data by rank, name, symbol, price, volume_24h, circulating_supply,
total_supply, max_supply, market_cap, beta_value, ath_price --sortby parameter
and also with --reverse flag to sort descending.
Displays:
rank, name, symbol, price, volume_24h, circulating_supply,
total_supply, max_supply, market_cap, beta_value, ath_price
""",
)
parser.add_argument(
"--vs",
help="Quoted currency. Default USD",
dest="vs",
default="USD",
type=str,
choices=CURRENCIES,
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=check_positive,
help="display N number records",
default=20,
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
type=str,
help="Sort by given column. Default: rank",
default="rank",
choices=coinpaprika_model.INFO_FILTERS,
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in ascending order by default. "
"Reverse flag will sort it in an descending way. "
"Only works when raw data is displayed."
),
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
coinpaprika_view.display_all_coins_info(
symbol=ns_parser.vs,
limit=ns_parser.limit,
ascend=not ns_parser.reverse,
sortby=ns_parser.sortby,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_platforms(self, other_args):
"""Process platforms command"""
parser = argparse.ArgumentParser(
prog="platforms",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""List all smart contract platforms like ethereum, solana, cosmos, polkadot, kusama""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
coinpaprika_view.display_all_platforms(export=ns_parser.export)
@log_start_end(log=logger)
def call_contracts(self, other_args):
"""Process contracts command"""
platforms = get_all_contract_platforms()["platform_id"].tolist()
parser = argparse.ArgumentParser(
prog="contracts",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Gets all contract addresses for given platform.
Provide platform id with -p/--platform parameter
You can display only N number of smart contracts with --limit parameter.
You can sort data by id, type, active, balance --sortby parameter
and also with --reverse flag to sort descending.
Displays:
id, type, active, balance
""",
)
parser.add_argument(
"-p",
"--platform",
help="Blockchain platform like eth-ethereum",
dest="platform",
default="eth-ethereum",
type=str,
choices=platforms,
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=check_positive,
help="display N number records",
default=15,
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
type=str,
help="Sort by given column",
default="id",
choices=coinpaprika_model.CONTRACTS_FILTERS,
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in ascending order by default. "
"Reverse flag will sort it in an descending way. "
"Only works when raw data is displayed."
),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-p")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
coinpaprika_view.display_contracts(
symbol=ns_parser.platform,
limit=ns_parser.limit,
ascend=not ns_parser.reverse,
sortby=ns_parser.sortby,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_pairs(self, other_args):
"""Process pairs command"""
parser = argparse.ArgumentParser(
prog="pairs",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Shows available trading pairs on Coinbase ",
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=int,
help="display N number of pairs >=10",
default=15,
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
type=str,
help="Sort by given column. Default: id",
default="id",
choices=coinbase_model.PAIRS_FILTERS,
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in ascending order by default. "
"Reverse flag will sort it in an descending way. "
"Only works when raw data is displayed."
),
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
coinbase_view.display_trading_pairs(
limit=ns_parser.limit,
export=ns_parser.export,
sortby=ns_parser.sortby,
ascend=not ns_parser.reverse,
)
@log_start_end(log=logger)
def call_news(self, other_args):
"""Process news command"""
parser = argparse.ArgumentParser(
prog="news",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Display recent news from CryptoPanic aggregator platform. [Source: https://cryptopanic.com/]",
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=check_positive,
help="display N number records",
default=20,
)
parser.add_argument(
"-k",
"--kind",
dest="kind",
type=str,
help="Filter by category of news. Available values: news or media.",
default="news",
choices=cryptopanic_model.CATEGORIES,
)
parser.add_argument(
"--filter",
dest="filter",
type=str,
help="Filter by kind of news. One from list: rising|hot|bullish|bearish|important|saved|lol",
default=None,
required=False,
choices=cryptopanic_model.FILTERS,
)
parser.add_argument(
"-r",
"--region",
dest="region",
type=str,
help="Filter news by regions. Available regions are: en (English), de (Deutsch), nl (Dutch), es (Español), "
"fr (Français), it (Italiano), pt (Português), ru (Русский)",
default="en",
choices=cryptopanic_model.REGIONS,
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
type=str,
help="Sort by given column. Default: published_at",
default="published_at",
choices=cryptopanic_model.SORT_FILTERS,
)
parser.add_argument(
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
parser.add_argument(
"-u",
"--urls",
dest="urls",
action="store_true",
help="Flag to show urls column.",
default=False,
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
cryptopanic_view.display_news(
limit=ns_parser.limit,
export=ns_parser.export,
sortby=ns_parser.sortby,
ascend=ns_parser.reverse,
links=ns_parser.urls,
post_kind=ns_parser.kind,
filter_=ns_parser.filter,
region=ns_parser.region,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/overview/overview_controller.py | 0.643665 | 0.196633 | overview_controller.py | pypi |
__docformat__ = "numpy"
import logging
import os
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_very_long_number_formatter,
)
from openbb_terminal.cryptocurrency.dataframe_helpers import (
prettify_column_names,
)
from openbb_terminal.cryptocurrency.onchain import bitquery_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.decorators import check_api_key
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_BITQUERY_KEY"])
def display_dex_trades(
trade_amount_currency: str = "USD",
kind: str = "dex",
limit: int = 20,
days: int = 90,
sortby: str = "tradeAmount",
ascend: bool = True,
export: str = "",
) -> None:
"""Prints table showing Trades on Decentralized Exchanges aggregated by DEX or Month
[Source: https://graphql.bitquery.io/]
Parameters
----------
kind: str
Aggregate trades by dex or time
trade_amount_currency: str
Currency of displayed trade amount. Default: USD
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
days: int
Last n days to query data. Maximum 365 (bigger numbers can cause timeouts
on server side)
export : str
Export dataframe data to csv,json,xlsx file
"""
if kind == "time":
df = bitquery_model.get_dex_trades_monthly(trade_amount_currency, days, ascend)
else:
df = bitquery_model.get_dex_trades_by_exchange(
trade_amount_currency, days, sortby, ascend
)
if not df.empty:
df_data = df.copy()
column_names = ["tradeAmount", "trades"]
column_names = prettify_column_names(column_names)
df[column_names] = df[column_names].applymap(
lambda x: lambda_very_long_number_formatter(x)
)
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Trades on Decentralized Exchanges",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"lt",
df_data,
)
@log_start_end(log=logger)
@check_api_key(["API_BITQUERY_KEY"])
def display_daily_volume_for_given_pair(
symbol: str = "WBTC",
to_symbol: str = "USDT",
limit: int = 20,
sortby: str = "date",
ascend: bool = True,
export: str = "",
) -> None:
"""Prints table showing daily volume for given pair
[Source: https://graphql.bitquery.io/]
Parameters
----------
symbol: str
ERC20 token symbol or address
to_symbol: str
Quote currency.
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
export : str
Export dataframe data to csv,json,xlsx file
Returns
-------
pd.DataFrame
Token volume on different decentralized exchanges
"""
df = bitquery_model.get_daily_dex_volume_for_given_pair(
symbol=symbol,
to_symbol=to_symbol,
limit=limit,
sortby=sortby,
ascend=ascend,
)
if df.empty:
return
df_data = df.copy()
df[["Trade amount", "Trades"]] = df[["Trade amount", "Trades"]].applymap(
lambda x: lambda_very_long_number_formatter(x)
)
# The -d command takes the place of what would normally be -l. This means
# we want to print out all of the data from each --day. If there is
# more exchange data per day then we will have more than -d amount of
# rows. If we do not change this value then only -d amount of rows will
# be printed out, not -d amount of days which is what we want. So we set
# this to an arbitrary amount to cover the potential for more than
# one row per day
limit = 10
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Daily Volume for Pair",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"dvcp",
df_data,
)
@log_start_end(log=logger)
@check_api_key(["API_BITQUERY_KEY"])
def display_dex_volume_for_token(
symbol: str = "WBTC",
trade_amount_currency: str = "USD",
limit: int = 10,
sortby: str = "tradeAmount",
ascend: bool = True,
export: str = "",
) -> None:
"""Prints table showing token volume on different Decentralized Exchanges.
[Source: https://graphql.bitquery.io/]
Parameters
----------
symbol: str
ERC20 token symbol or address
trade_amount_currency: str
Currency of displayed trade amount. Default: USD
limit: int
Number of records to display
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
export : str
Export dataframe data to csv,json,xlsx file
Returns
-------
pd.DataFrame
Token volume on different decentralized exchanges
"""
df = bitquery_model.get_token_volume_on_dexes(
symbol=symbol,
trade_amount_currency=trade_amount_currency,
sortby=sortby,
ascend=ascend,
)
if not df.empty:
df_data = df.copy()
column_names = ["tradeAmount", "trades"]
column_names = prettify_column_names(column_names)
df[column_names] = df[column_names].applymap(
lambda x: lambda_very_long_number_formatter(x)
)
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Token Volume on Exchanges",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"tv",
df_data,
)
@log_start_end(log=logger)
@check_api_key(["API_BITQUERY_KEY"])
def display_ethereum_unique_senders(
interval: str = "days",
limit: int = 10,
sortby: str = "date",
ascend: bool = True,
export: str = "",
) -> None:
"""Prints table showing number of unique ethereum addresses which made a transaction in given time interval
[Source: https://graphql.bitquery.io/]
Parameters
----------
interval: str
Time interval in which ethereum address made transaction. month, week or day
limit: int
Number of records to display. It's calculated base on provided interval.
If interval is month then calculation is made in the way: limit * 30 = time period,
in case if interval is set to week, then time period is calculated as limit * 7.
For better user experience maximum time period in days is equal to 90.
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
export : str
Export dataframe data to csv,json,xlsx file
Returns
-------
pd.DataFrame
Number of unique ethereum addresses which made a transaction in given time interval
"""
df = bitquery_model.get_ethereum_unique_senders(interval, limit, sortby, ascend)
if not df.empty:
column_names = ["uniqueSenders", "transactions", "maximumGasPrice"]
column_names = prettify_column_names(column_names)
df[column_names] = df[column_names].applymap(
lambda x: lambda_very_long_number_formatter(x)
)
df_data = df.copy()
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Unique Ethereum Addresses",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ueat",
df_data,
)
@log_start_end(log=logger)
@check_api_key(["API_BITQUERY_KEY"])
def display_most_traded_pairs(
exchange: str = "Uniswap",
days: int = 10,
limit: int = 10,
sortby: str = "tradeAmount",
ascend: bool = True,
export: str = "",
) -> None:
"""Prints table showing most traded crypto pairs on given decentralized exchange in chosen time period.
[Source: https://graphql.bitquery.io/]
Parameters
----------
exchange: str
Decentralized exchange name
days: int
Number of days taken into calculation account.
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
export : str
Export dataframe data to csv,json,xlsx file
Returns
-------
pd.DataFrame
Most traded crypto pairs on given decentralized exchange in chosen time period.
"""
df = bitquery_model.get_most_traded_pairs(
exchange=exchange, limit=days, sortby=sortby, ascend=ascend
)
if not df.empty:
df_data = df.copy()
column_names = ["tradeAmount", "trades"]
column_names = prettify_column_names(column_names)
df[column_names] = df[column_names].applymap(
lambda x: lambda_very_long_number_formatter(x)
)
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Most Traded Crypto Pairs",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ttcp",
df_data,
)
@log_start_end(log=logger)
@check_api_key(["API_BITQUERY_KEY"])
def display_spread_for_crypto_pair(
symbol: str = "WETH",
to_symbol: str = "USDT",
limit: int = 10,
sortby: str = "date",
ascend: bool = True,
export: str = "",
) -> None:
"""Prints table showing an average bid and ask prices, average spread for given crypto pair for chosen
time period. [Source: https://graphql.bitquery.io/]
Parameters
----------
symbol: str
ERC20 token symbol
to_symbol: str
Quoted currency.
limit: int
Last n days to query data
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
export : str
Export dataframe data to csv,json,xlsx file
Returns
-------
pd.DataFrame
Average bid and ask prices, spread for given crypto pair for chosen time period
"""
df = bitquery_model.get_spread_for_crypto_pair(
symbol=symbol, to_symbol=to_symbol, limit=limit, sortby=sortby, ascend=ascend
)
if not df.empty:
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Average Spread for Given Crypto",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"baas",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/onchain/bitquery_view.py | 0.739422 | 0.304151 | bitquery_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from datetime import datetime
from typing import List, Optional
import matplotlib.pyplot as plt
from matplotlib import ticker
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.cryptocurrency.onchain import blockchain_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
plot_autoscale,
is_valid_axes_count,
str_date_to_timestamp,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_btc_circulating_supply(
start_date: str = "2010-01-01",
end_date: Optional[str] = None,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Returns BTC circulating supply [Source: https://api.blockchain.info/]
Parameters
----------
start_date : str
Initial date, format YYYY-MM-DD
end_date : Optional[str]
Final date, format YYYY-MM-DD
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
df = blockchain_model.get_btc_circulating_supply()
ts_start_date = str_date_to_timestamp(start_date)
ts_end_date = str_date_to_timestamp(end_date)
df = df[
(df["x"] > datetime.fromtimestamp(ts_start_date))
& (df["x"] < datetime.fromtimestamp(ts_end_date))
]
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(df["x"], df["y"])
ax.set_ylabel("BTC")
ax.set_title("BTC Circulating Supply")
ax.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"btccp",
df,
)
@log_start_end(log=logger)
def display_btc_confirmed_transactions(
start_date: str = "2010-01-01",
end_date: Optional[str] = None,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Returns BTC confirmed transactions [Source: https://api.blockchain.info/]
Parameters
----------
start_date : str
Initial date, format YYYY-MM-DD
end_date : Optional[str]
Final date, format YYYY-MM-DD
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
df = blockchain_model.get_btc_confirmed_transactions()
ts_start_date = str_date_to_timestamp(start_date)
ts_end_date = str_date_to_timestamp(end_date)
df = df[
(df["x"] > datetime.fromtimestamp(ts_start_date))
& (df["x"] < datetime.fromtimestamp(ts_end_date))
]
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(df["x"], df["y"], lw=0.8)
ax.set_ylabel("Transactions")
ax.set_title("BTC Confirmed Transactions")
ax.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"btcct",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/onchain/blockchain_view.py | 0.856663 | 0.343094 | blockchain_view.py | pypi |
__docformat__ = "numpy"
import datetime
import json
import logging
import os
from typing import Optional
import numpy as np
import pandas as pd
import requests
from requests.adapters import HTTPAdapter
from requests.exceptions import HTTPError
from openbb_terminal.cryptocurrency.dataframe_helpers import (
prettify_column_names,
)
from openbb_terminal import config_terminal as cfg
from openbb_terminal.rich_config import console
from openbb_terminal.decorators import check_api_key, log_start_end
logger = logging.getLogger(__name__)
class BitQueryApiKeyException(Exception):
"""Bit Query Api Key Exception object"""
@log_start_end(log=logger)
def __init__(self, message: str):
super().__init__(message)
self.message = message
@log_start_end(log=logger)
def __str__(self) -> str:
return f"BitQueryApiKeyException: {self.message}"
class BitQueryTimeoutException(Exception):
"""BitQuery Timeout Exception class"""
BQ_URL = "https://graphql.bitquery.io"
CURRENCIES = ["ETH", "USD", "BTC", "USDT"]
LT_FILTERS = ["exchange", "trades", "tradeAmount"]
LT_KIND = ["dex", "time"]
INTERVALS = ["day", "month", "week"]
DVCP_FILTERS = [
"date",
"exchange",
"base",
"quote",
"open",
"high",
"low",
"close",
"tradeAmount",
"trades",
]
UEAT_FILTERS = [
"date",
"uniqueSenders",
"transactions",
"averageGasPrice",
"mediumGasPrice",
"maximumGasPrice",
]
TTCP_FILTERS = ["base", "quoted", "trades", "tradeAmount"]
BAAS_FILTERS = [
"date",
"baseCurrency",
"quoteCurrency",
"dailySpread",
"averageBidPrice",
"averageAskPrice",
]
DECENTRALIZED_EXCHANGES = [
"1inch",
"AfroDex",
"AirSwap",
"Amplbitcratic",
"Balancer",
"BestSwap",
"Bitox",
"CellSwap",
"Cellswap",
"Cofix",
"Coinchangex",
"Curve",
"DDEX",
"DUBIex",
"DecentrEx",
"DeversiFi",
"Dodo",
"ETHERCExchange",
"EtherBlockchain",
"EtherDelta",
"Ethernext",
"Ethfinex",
"FEGex",
"FFFSwap",
"Fordex",
"GUDecks",
"GUDeks",
"HiSwap",
"IDEX",
"LedgerDex",
"Matcha",
"Miniswap",
"Mooniswap",
"Oasis",
"OpenRelay",
"S.Finance",
"SakeSwap",
"SeedDex",
"SingularX",
"StarBitEx",
"SushiSwap",
"SwapX",
"SwitchDex",
"TacoSwap",
"TokenJar",
"TokenStore",
"TokenTrove",
"Tokenlon",
"TradexOne",
"Uniswap",
"ZeusSwap",
"dYdX",
"dex.blue",
]
DECENTRALIZED_EXCHANGES_MAP = {e.lower(): e for e in DECENTRALIZED_EXCHANGES}
NETWORKS = ["bsc", "ethereum", "matic"]
@log_start_end(log=logger)
def _extract_dex_trades(data: dict) -> pd.DataFrame:
"""Helper method that extracts from bitquery api response data from nested dictionary:
response = {'ethereum' : {'dexTrades' : <data>}}. If 'dexTrades' is None, raises Exception.
Parameters
----------
data: dict
response data from bitquery api.
Returns
-------
pd.DataFrame
normalized pandas data frame with data
"""
dex_trades = data["ethereum"]["dexTrades"]
if not dex_trades:
raise ValueError("No data was returned in request response\n")
return pd.json_normalize(dex_trades)
@log_start_end(log=logger)
@check_api_key(["API_BITQUERY_KEY"])
def query_graph(url: str, query: str) -> dict:
"""Helper methods for querying graphql api. [Source: https://bitquery.io/]
Parameters
----------
url: str
Endpoint url
query: str
Graphql query
Returns
-------
dict
Dictionary with response data
"""
session = requests.Session()
session.mount("https://", HTTPAdapter(max_retries=5))
headers = {"x-api-key": cfg.API_BITQUERY_KEY}
timeout = 30
try:
response = session.post(
url, json={"query": query}, headers=headers, timeout=timeout
)
except requests.Timeout as e:
logger.exception("BitQuery timeout")
raise BitQueryTimeoutException(
f"BitQuery API didn't respond within {timeout} seconds.\n"
) from e
if response.status_code == 500:
raise HTTPError(f"Internal sever error {response.reason}")
if not 200 <= response.status_code < 300:
raise BitQueryApiKeyException(
f"Invalid Authentication: {response.status_code}. "
f"Please visit https://bitquery.io/pricing and generate you free api key\n"
)
try:
data = response.json()
if "error" in data:
raise ValueError(f"Invalid Response: {data['error']}\n")
except Exception as e:
logger.exception("Invalid Response: %s", str(e))
raise ValueError(f"Invalid Response: {response.text}\n") from e
return data["data"]
@log_start_end(log=logger)
def get_erc20_tokens() -> pd.DataFrame:
"""Helper method that loads ~1500 most traded erc20 token.
[Source: json file]
Returns
-------
pd.DataFrame
ERC20 tokens with address, symbol and name
"""
file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "data", "erc20_coins.json"
)
with open(file_path) as f:
data = json.load(f)
df = pd.json_normalize(data)
df.columns = ["count", "address", "symbol", "name"]
df = df[~df["symbol"].isin(["", None, np.NaN])]
return df[["name", "symbol", "address", "count"]]
@log_start_end(log=logger)
def find_token_address(symbol: str) -> Optional[str]:
"""Helper methods that search for ERC20 coin base on provided symbol or token address.
If erc20 token address is provided, then checks if it's proper address and returns it back.
In other case mapping data is loaded from file, and lookup for belonging token address.
Parameters
----------
symbol: str
ERC20 token symbol e.g. UNI, SUSHI, ETH, WBTC or token address e.g.
0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48
Returns
-------
str or None
ERC20 token address, or None if nothing found.
"""
if symbol.startswith("0x") and len(symbol) >= 38:
return symbol
if symbol == "ETH":
return symbol
token = "WBTC" if symbol == "BTC" else symbol
tokens_map: pd.DataFrame = get_erc20_tokens()
found_token = tokens_map.loc[tokens_map["symbol"] == token]
if found_token.empty:
return None
if len(found_token) > 1:
return found_token.sort_values(by="count", ascending=False).iloc[0]["address"]
return found_token.iloc[0]["address"]
@log_start_end(log=logger)
def get_dex_trades_by_exchange(
trade_amount_currency: str = "USD",
limit: int = 90,
sortby: str = "tradeAmount",
ascend: bool = True,
) -> pd.DataFrame:
"""Get trades on Decentralized Exchanges aggregated by DEX [Source: https://graphql.bitquery.io/]
Parameters
----------
trade_amount_currency: str
Currency of displayed trade amount. Default: USD
limit: int
Last n days to query data. Maximum 365 (bigger numbers can cause timeouts
on server side)
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
Trades on Decentralized Exchanges aggregated by DEX
"""
dt = (datetime.date.today() - datetime.timedelta(min(limit, 365))).strftime(
"%Y-%m-%d"
)
if trade_amount_currency not in CURRENCIES:
trade_amount_currency = "USD"
query = f"""
{{
ethereum {{
dexTrades(options: {{limit: 40, desc: ["count"]}}
date: {{since: "{dt}"}}
) {{
exchange {{
name
}}
count
tradeAmount(in: {trade_amount_currency})
}}
}}
}}
"""
try:
data = query_graph(BQ_URL, query)
except BitQueryApiKeyException:
logger.exception("Invalid API Key")
console.print("[red]Invalid API Key[/red]\n")
df = _extract_dex_trades(data)
df.columns = ["trades", "tradeAmount", "exchange"]
df = df[~df["exchange"].isin([None, np.NaN, ""])]
df = df[["exchange", "trades", "tradeAmount"]].sort_values(
by="tradeAmount", ascending=True
)
df = df.sort_values(by=sortby, ascending=ascend)
df.columns = prettify_column_names(df.columns)
return df
@log_start_end(log=logger)
def get_dex_trades_monthly(
trade_amount_currency: str = "USD", limit: int = 90, ascend: bool = True
) -> pd.DataFrame:
"""Get list of trades on Decentralized Exchanges monthly aggregated.
[Source: https://graphql.bitquery.io/]
Parameters
----------
trade_amount_currency: str
Currency of displayed trade amount. Default: USD
limit: int
Last n days to query data. Maximum 365 (bigger numbers can cause timeouts
on server side)
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
Trades on Decentralized Exchanges monthly aggregated
"""
if trade_amount_currency not in CURRENCIES:
trade_amount_currency = "USD"
dt = (datetime.date.today() - datetime.timedelta(min(limit, 365))).strftime(
"%Y-%m-%d"
)
query = f"""
{{
ethereum {{
dexTrades(
options: {{desc: ["date.year", "date.month", "count"]}}
date: {{since: "{dt}"}}
) {{
count
date {{
month
year
}}
tradeAmount(in: {trade_amount_currency})
}}
}}
}}
"""
try:
data = query_graph(BQ_URL, query)
except BitQueryApiKeyException:
logger.exception("Invalid API Key")
console.print("[red]Invalid API Key[/red]\n")
return pd.DataFrame()
if not data:
return pd.DataFrame()
df = _extract_dex_trades(data)
df["date"] = df.apply(
lambda x: datetime.date(int(x["date.year"]), int(x["date.month"]), 1), axis=1
)
df.rename(columns={"count": "trades"}, inplace=True)
df = df[["date", "trades", "tradeAmount"]]
df = df.sort_values(by="date", ascending=ascend)
df.columns = prettify_column_names(df.columns)
return df
@log_start_end(log=logger)
def get_daily_dex_volume_for_given_pair(
limit: int = 100,
symbol: str = "UNI",
to_symbol: str = "USDT",
sortby: str = "date",
ascend: bool = True,
) -> pd.DataFrame:
"""Get daily volume for given pair [Source: https://graphql.bitquery.io/]
Parameters
----------
limit: int
Last n days to query data
symbol: str
ERC20 token symbol
to_symbol: str
Quote currency.
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
Daily volume for given pair
"""
dt = (datetime.date.today() - datetime.timedelta(min(limit, 365))).strftime(
"%Y-%m-%d"
)
base, quote = find_token_address(symbol), find_token_address(to_symbol)
if not base or not quote:
raise ValueError("Provided coin or quote currency doesn't exist\n")
query = f"""
{{
ethereum(network: ethereum) {{
dexTrades(
options: {{desc: ["timeInterval.day", "trades"]}}
baseCurrency: {{is: "{base}"}}
quoteCurrency: {{is: "{quote}"}}
date: {{since: "{dt}" }}
) {{
timeInterval {{
day(count: 1)
}}
baseCurrency {{
symbol
}}
quoteCurrency {{
symbol
}}
exchange {{
fullName
}}
trades: count
tradeAmount(in: USD)
quotePrice
maximum_price: quotePrice(calculate: maximum)
minimum_price: quotePrice(calculate: minimum)
open_price: minimum(of: block, get: quote_price)
close_price: maximum(of: block, get: quote_price)
}}
}}
}}
"""
try:
data = query_graph(BQ_URL, query)
except BitQueryApiKeyException:
logger.exception("Invalid API Key")
console.print("[red]Invalid API Key[/red]\n")
return pd.DataFrame()
if not data:
return pd.DataFrame()
df = _extract_dex_trades(data)
df.columns = [
"trades",
"tradeAmount",
"price",
"high",
"low",
"open",
"close",
"date",
"base",
"quote",
"exchange",
]
df = df[
[
"date",
"exchange",
"base",
"quote",
"open",
"high",
"low",
"close",
"tradeAmount",
"trades",
]
]
df = df.sort_values(by=sortby, ascending=ascend)
df.columns = prettify_column_names(df.columns)
return df
@log_start_end(log=logger)
def get_token_volume_on_dexes(
symbol: str = "UNI",
trade_amount_currency: str = "USD",
sortby: str = "tradeAmount",
ascend: bool = True,
) -> pd.DataFrame:
"""Get token volume on different Decentralized Exchanges. [Source: https://graphql.bitquery.io/]
Parameters
----------
symbol: str
ERC20 token symbol.
trade_amount_currency: str
Currency to display trade amount in.
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
Token volume on Decentralized Exchanges
"""
if trade_amount_currency not in CURRENCIES:
trade_amount_currency = "USD"
token_address = find_token_address(symbol)
if token_address is None:
raise ValueError(f"Couldn't find token with symbol {symbol}\n")
query = f"""
{{
ethereum {{
dexTrades(
baseCurrency: {{is:"{token_address}"}}
) {{
baseCurrency{{
symbol
}}
exchange {{
name
fullName
}}
count
tradeAmount(in: {trade_amount_currency})
}}
}}
}}
"""
try:
data = query_graph(BQ_URL, query)
except BitQueryApiKeyException:
logger.exception("Invalid API Key")
console.print("[red]Invalid API Key[/red]\n")
return pd.DataFrame()
if not data:
return pd.DataFrame()
df = _extract_dex_trades(data)[["exchange.fullName", "tradeAmount", "count"]]
df.columns = LT_FILTERS
df = df[~df["exchange"].str.startswith("<")]
df = df.sort_values(by=sortby, ascending=ascend)
df.columns = prettify_column_names(df.columns)
return df
@log_start_end(log=logger)
def get_ethereum_unique_senders(
interval: str = "day",
limit: int = 90,
sortby: str = "tradeAmount",
ascend: bool = True,
) -> pd.DataFrame:
"""Get number of unique ethereum addresses which made a transaction in given time interval.
Parameters
----------
interval: str
Time interval in which count unique ethereum addresses which made transaction. day,
month or week.
limit: int
Number of records for data query.
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
Unique ethereum addresses which made a transaction
"""
intervals = {
"day": 1,
"month": 30,
"week": 7,
}
if interval not in intervals:
interval = "day"
days = min(limit * intervals[interval], 90)
dt = (datetime.date.today() - datetime.timedelta(days)).strftime("%Y-%m-%d")
query = f"""
{{
ethereum(network: ethereum) {{
transactions(options: {{desc: "date.date"}}, date: {{since: "{dt}"}}) {{
uniqueSenders: count(uniq: senders)
date {{
date:startOfInterval(unit: {interval})
}}
averageGasPrice: gasPrice(calculate: average)
mediumGasPrice: gasPrice(calculate: median)
maximumGasPrice: gasPrice(calculate: maximum)
transactions: count
}}
}}
}}
"""
try:
data = query_graph(BQ_URL, query)
except BitQueryApiKeyException:
logger.exception("Invalid API Key")
console.print("[red]Invalid API Key[/red]\n")
return pd.DataFrame()
if not data:
return pd.DataFrame()
df = pd.DataFrame(data["ethereum"]["transactions"])
df["date"] = df["date"].apply(lambda x: x["date"])
df = df[UEAT_FILTERS]
df = df.sort_values(by=sortby, ascending=ascend)
df.columns = prettify_column_names(df.columns)
return df
@log_start_end(log=logger)
def get_most_traded_pairs(
network: str = "ethereum",
exchange: str = "Uniswap",
limit: int = 90,
sortby: str = "tradeAmount",
ascend: bool = True,
) -> pd.DataFrame:
"""Get most traded crypto pairs on given decentralized exchange in chosen time period.
[Source: https://graphql.bitquery.io/]
Parameters
----------
network: str
EVM network. One from list: bsc (binance smart chain), ethereum or matic
exchange: st
Decentralized exchange name
limit: int
Number of days taken into calculation account.
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
Most traded crypto pairs on given decentralized exchange in chosen time period.
"""
dt = (datetime.date.today() - datetime.timedelta(limit)).strftime("%Y-%m-%d")
exchange = DECENTRALIZED_EXCHANGES_MAP.get(exchange, "Uniswap")
query = f"""
{{
ethereum(network: {network}){{
dexTrades(options: {{limit: 100, desc: "tradeAmount"}},
exchangeName: {{is: "{exchange}"}}
date: {{since: "{dt}"}}) {{
buyCurrency {{
symbol
}}
sellCurrency{{
symbol
}}
trades: count
tradeAmount(in: USD)
}}
}}
}}
"""
try:
data = query_graph(BQ_URL, query)
except BitQueryApiKeyException:
logger.exception("Invalid API Key")
console.print("[red]Invalid API Key[/red]\n")
return pd.DataFrame()
if not data:
return pd.DataFrame()
df = _extract_dex_trades(data)
df.columns = ["trades", "tradeAmount", "base", "quoted"]
df = df[TTCP_FILTERS]
df = df.sort_values(by=sortby, ascending=ascend)
df.columns = prettify_column_names(df.columns)
return df
@log_start_end(log=logger)
def get_spread_for_crypto_pair(
symbol: str = "WETH",
to_symbol: str = "USDT",
limit: int = 10,
sortby: str = "date",
ascend: bool = True,
) -> pd.DataFrame:
"""Get an average bid and ask prices, average spread for given crypto pair for chosen time period.
[Source: https://graphql.bitquery.io/]
Parameters
----------
symbol: str
ERC20 token symbol
to_symbol: str
Quoted currency.
limit: int
Last n days to query data
sortby: str
Key by which to sort data
ascend: bool
Flag to sort data ascending
Returns
-------
pd.DataFrame
Average bid and ask prices, spread for given crypto pair for chosen time period
"""
dt = (datetime.date.today() - datetime.timedelta(limit)).strftime("%Y-%m-%d")
base, quote = find_token_address(symbol), find_token_address(to_symbol)
if not base or not quote:
raise ValueError("Provided coin or quote currency doesn't exist\n")
query = f"""
{{
ethereum(network: ethereum){{
dexTrades(
date: {{since:"{dt}"}}
baseCurrency: {{is: "{base}"}},
quoteCurrency: {{is: "{quote}"}}) {{
date {{date}}
baseCurrency {{symbol}}
baseAmount
quoteCurrency {{
symbol
}}
quoteAmount
trades: count
quotePrice
side
}}
}}
}}
"""
try:
data = query_graph(BQ_URL, query)
except BitQueryApiKeyException:
logger.exception("Invalid API Key")
console.print("[red]Invalid API Key[/red]\n")
return pd.DataFrame()
if not data:
return pd.DataFrame()
df = _extract_dex_trades(data)
columns = ["quotePrice", "date.date", "baseCurrency.symbol", "quoteCurrency.symbol"]
bids = df.query("side == 'SELL'")[columns]
asks = df.query("side == 'BUY'")[columns]
bids.columns = ["averageBidPrice", "date", "baseCurrency", "quoteCurrency"]
asks.columns = ["averageAskPrice", "date", "baseCurrency", "quoteCurrency"]
daily_spread = pd.merge(asks, bids, on=["date", "baseCurrency", "quoteCurrency"])
daily_spread["dailySpread"] = abs(
daily_spread["averageBidPrice"] - daily_spread["averageAskPrice"]
)
df = daily_spread[
[
"date",
"baseCurrency",
"quoteCurrency",
"dailySpread",
"averageBidPrice",
"averageAskPrice",
]
]
df = df.sort_values(by=sortby, ascending=ascend)
df.columns = prettify_column_names(df.columns)
return df
POSSIBLE_CRYPTOS = list(get_erc20_tokens()["symbol"].unique()) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/onchain/bitquery_model.py | 0.747339 | 0.222764 | bitquery_model.py | pypi |
__docformat__ = "numpy"
import logging
import textwrap
from typing import Optional, Tuple, Any
import numpy as np
import pandas as pd
import requests
import openbb_terminal.config_terminal as cfg
from openbb_terminal.rich_config import console
from openbb_terminal.decorators import check_api_key, log_start_end
logger = logging.getLogger(__name__)
FILTERS = [
"date",
"symbol",
"blockchain",
"amount",
"amount_usd",
"from",
"to",
]
class ApiKeyException(Exception):
"""Api Key Exception object"""
@log_start_end(log=logger)
def __init__(self, message: str):
super().__init__(message)
self.message = message
@log_start_end(log=logger)
def __str__(self) -> str:
return f"ApiKeyException: {self.message}"
@log_start_end(log=logger)
@check_api_key(["API_WHALE_ALERT_KEY"])
def make_request(params: Optional[dict] = None) -> Tuple[Optional[int], Any]:
"""Helper methods for requests [Source: https://docs.whale-alert.io/]
Parameters
----------
params: dict
additional param
Returns
-------
Tuple[Optional[int], Any]
status code, response from api request
"""
api_key = cfg.API_WHALE_ALERT_KEY or ""
url = "https://api.whale-alert.io/v1/transactions?api_key=" + api_key
try:
response = requests.get(url, timeout=2, params=params)
except Exception:
return None, None
result = {}
if response.status_code == 200:
result = response.json()
elif response.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
logger.error("Invalid Authentication: %s", response.text)
elif response.status_code == 401:
console.print("[red]API Key not authorized for Premium Feature[/red]\n")
logger.error("Insufficient Authorization: %s", response.text)
elif response.status_code == 429:
console.print("[red]Exceeded number of calls per minute[/red]\n")
logger.error("Calls limit exceeded: %s", response.text)
else:
console.print(response.json()["message"])
logger.error("Error in request: %s", response.text)
return response.status_code, result
@log_start_end(log=logger)
def get_whales_transactions(
min_value: int = 800000,
limit: int = 100,
sortby: str = "date",
ascend: bool = False,
) -> pd.DataFrame:
"""Whale Alert's API allows you to retrieve live and historical transaction data from major blockchains.
Supported blockchain: Bitcoin, Ethereum, Ripple, NEO, EOS, Stellar and Tron. [Source: https://docs.whale-alert.io/]
Parameters
----------
min_value: int
Minimum value of trade to track.
limit: int
Limit of transactions. Max 100
sortby: str
Key to sort by.
ascend: str
Sort in ascending order.
Returns
-------
pd.DataFrame
Crypto wales transactions
"""
min_value = max(min_value, 800000)
limit = max(limit, 100)
params = {"limit": limit, "min_value": min_value}
status_code, response = make_request(params)
if status_code != 200:
return pd.DataFrame()
data = pd.json_normalize(response["transactions"]).sort_values(
"timestamp", ascending=False
)
data["date"] = pd.to_datetime(data["timestamp"], unit="s")
data.columns = [col.replace(".balance", "") for col in data.columns]
data["to_address"] = data["to.address"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=45)) if isinstance(x, str) else x
)
data["from_address"] = data["from.address"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=45)) if isinstance(x, str) else x
)
data["from"] = data.apply(
lambda x: x["from.owner"]
if x["from.owner"] not in [np.nan, None, np.NaN]
else x["from.owner_type"],
axis=1,
)
data["to"] = data.apply(
lambda x: x["to.owner"]
if x["to.owner"] not in [np.nan, None, np.NaN]
else x["to.owner_type"],
axis=1,
)
data.drop(
[
"id",
"transaction_count",
"from.owner_type",
"to.owner_type",
"to.owner",
"from.owner",
"transaction_type",
"hash",
"timestamp",
],
axis=1,
inplace=True,
)
df = data[
[
"date",
"symbol",
"blockchain",
"amount",
"amount_usd",
"from",
"to",
"from_address",
"to_address",
]
]
df = df.sort_values(by=sortby, ascending=ascend)
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/onchain/whale_alert_model.py | 0.846641 | 0.249584 | whale_alert_model.py | pypi |
__docformat__ = "numpy"
import logging
import textwrap
from datetime import datetime
from time import sleep
from typing import Any, Optional
import pandas as pd
import requests
import openbb_terminal.config_terminal as cfg
from openbb_terminal.rich_config import console
from openbb_terminal.cryptocurrency.dataframe_helpers import create_df_index
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
PRICES_FILTERS = [
"date",
"cap",
"volumeConverted",
"open",
"high",
"close",
"low",
]
TOP_FILTERS = [
"rank",
"name",
"symbol",
"price",
"txsCount",
"transfersCount",
"holdersCount",
]
TH_FILTERS = [
"value",
]
BALANCE_FILTERS = [
"index",
"balance",
"tokenName",
"tokenSymbol",
]
HIST_FILTERS = ["timestamp", "transactionHash", "token", "value"]
HOLDERS_FILTERS = [
"balance",
"share",
]
@log_start_end(log=logger)
def split_cols_with_dot(column: str) -> str:
"""Split column name in data frame columns whenever there is a dot between 2 words.
E.g. price.availableSupply -> priceAvailableSupply.
Parameters
----------
column: str
Pandas dataframe column value
Returns
-------
str
Value of column with replaced format.
"""
@log_start_end(log=logger)
def replace(string: str, char: str, index: int) -> str:
"""Helper method which replaces values with dot as a separator and converts it to camelCase format
Parameters
----------
string: str
String in which we remove dots and convert it to camelcase format.
char: str
First letter of given word.
index:
Index of string element.
Returns
----------
str
Camel case string with no dots. E.g. price.availableSupply -> priceAvailableSupply.
"""
return string[:index] + char + string[index + 1 :]
if "." in column:
part1, part2 = column.split(".")
part2 = replace(part2, part2[0].upper(), 0)
return part1 + part2
return column
@log_start_end(log=logger)
def enrich_social_media(dct: dict) -> None:
"""Searching inside dictionary if there are any information about twitter, reddit or coingecko.
If yes it updates dictionary with url to given social media site.
Parameters
----------
dct: dict
Dictionary in which we search for coingecko, twitter or reddit url.
"""
social_media = {
"twitter": "https://www.twitter.com/",
"reddit": "https://www.reddit.com/r/",
"coingecko": "https://www.coingecko.com/en/coins/",
}
for k, v in social_media.items():
if k in dct:
dct[k] = v + dct[k]
@log_start_end(log=logger)
def make_request(
endpoint: str, address: Optional[str] = None, **kwargs: Any
) -> Optional[dict]:
"""Helper method that handles request for Ethplorer API [Source: https://ethplorer.io/]
Parameters
----------
endpoint: str
endpoint we want to query e.g. https://api.ethplorer.io/<endpoint><arg>?=apiKey=freekey
address: str
balance argument for given endpoint. In most cases it's tx hash, or eth balance.
kwargs: Any
Additional keywords arguments e.g. limit of transactions
Returns
-------
Optional[dict]
dictionary with response data
"""
base_url = "https://api.ethplorer.io/"
url = f"{base_url}{endpoint}"
if address:
url = url + "/" + address
url += f"?apiKey={cfg.API_ETHPLORER_KEY}"
if "limit" in kwargs:
url += f"&limit={kwargs['limit']}"
sleep(0.5) # Limit is 2 API calls per 1 sec.
response = requests.get(url)
result = {}
if response.status_code == 200:
result = response.json()
if not result:
console.print("No data found")
elif response.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(response.json()["error"]["message"])
return result
@log_start_end(log=logger)
def get_token_decimals(address: str) -> Optional[int]:
"""Helper methods that gets token decimals number. [Source: Ethplorer]
Parameters
----------
address: str
Blockchain balance e.g. 0x1f9840a85d5af5bf1d1762f925bdaddc4201f984
Returns
-------
Optional[int]
Number of decimals for given token.
"""
response = make_request("getTokenInfo", address)
if response and "decimals" in response:
return 10 ** int(response["decimals"])
return None
@log_start_end(log=logger)
def get_address_info(
address: str, sortby: str = "index", ascend: bool = False
) -> pd.DataFrame:
"""Get info about tokens on you ethereum blockchain balance. Eth balance, balance of all tokens which
have name and symbol. [Source: Ethplorer]
Parameters
----------
address: str
Blockchain balance e.g. 0x3cD751E6b0078Be393132286c442345e5DC49699
sortby: str
Key to sort by.
ascend: str
Sort in descending order.
Returns
-------
pd.DataFrame
DataFrame with list of tokens and their balances.
"""
response = make_request("getAddressInfo", address)
tokens = []
if "tokens" in response:
tokens = response.pop("tokens")
for token in tokens:
token_info = token.pop("tokenInfo")
token.update(
{
"tokenName": token_info.get("name"),
"tokenSymbol": token_info.get("symbol"),
"tokenAddress": token_info.get("balance"),
"balance": token.get("balance")
/ (10 ** int(token_info.get("decimals"))),
}
)
elif "token_info" in response:
token_info = response.get("tokenInfo") or {}
tokens = [
{
"tokenName": token_info.get("name"),
"tokenSymbol": token_info.get("symbol"),
"tokenAddress": token_info.get("balance"),
"balance": token_info.get("balance")
/ (10 ** int(token_info.get("decimals"))),
}
]
eth = response.get("ETH") or {}
eth_balance = eth.get("balance")
eth_row = [
"Ethereum",
"ETH",
"0x0000000000000000000000000000000000000000",
eth_balance,
]
cols = [
"tokenName",
"tokenSymbol",
"tokenAddress",
"balance",
]
df = pd.DataFrame(tokens)
eth_row_df = pd.DataFrame([eth_row], columns=cols)
df = pd.concat([eth_row_df, df], ignore_index=True)
df = df[df["tokenName"].notna()][cols]
create_df_index(df, "index")
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_top_tokens(sortby: str = "rank", ascend: bool = False) -> pd.DataFrame:
"""Get top 50 tokens. [Source: Ethplorer]
Parameters
----------
sortby: str
Key to sort by.
ascend: str
Sort in descending order.
Returns
-------
pd.DataFrame
DataFrame with list of top 50 tokens.
"""
response = make_request("getTopTokens")
tokens = response["tokens"]
df = pd.DataFrame(tokens)[
[
"name",
"symbol",
"price",
"txsCount",
"transfersCount",
"holdersCount",
"twitter",
"coingecko",
]
]
df["price"] = df["price"].apply(lambda x: x["rate"] if x and "rate" in x else None)
create_df_index(df, "rank")
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_top_token_holders(
address: str, sortby: str = "balance", ascend: bool = True
) -> pd.DataFrame:
"""Get info about top token holders. [Source: Ethplorer]
Parameters
----------
address: str
Token balance e.g. 0x1f9840a85d5aF5bf1D1762F925BDADdC4201F984
sortby: str
Key to sort by.
ascend: str
Sort in descending order.
Returns
-------
pd.DataFrame
DataFrame with list of top token holders.
"""
response = make_request("getTopTokenHolders", address, limit=100)
df = pd.DataFrame(response["holders"])
sleep(0.5)
token_decimals_divider = get_token_decimals(address)
if token_decimals_divider:
df["balance"] = df["balance"] / token_decimals_divider
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_address_history(
address: str, sortby: str = "timestamp", ascend: bool = True
) -> pd.DataFrame:
"""Get information about balance historical transactions. [Source: Ethplorer]
Parameters
----------
address: str
Blockchain balance e.g. 0x3cD751E6b0078Be393132286c442345e5DC49699
sortby: str
Key to sort by.
ascend: str
Sort in ascending order.
Returns
-------
pd.DataFrame
DataFrame with balance historical transactions (last 100)
"""
response = make_request("getAddressHistory", address, limit=100)
operations = response.pop("operations")
if operations:
for operation in operations:
token = operation.pop("tokenInfo")
if token:
operation["token"] = token["name"]
operation["tokenAddress"] = token["address"]
operation["decimals"] = int(token["decimals"])
operation["timestamp"] = datetime.fromtimestamp(operation["timestamp"])
df = pd.DataFrame(operations)
cols = ["timestamp", "transactionHash", "token", "value"]
df["value"] = df["value"].astype(float) / (10 ** df["decimals"])
if df.empty:
console.print(f"No historical transaction found for {address}")
return pd.DataFrame(columns=cols)
df = df[cols]
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_token_info(address) -> pd.DataFrame:
"""Get info about ERC20 token. [Source: Ethplorer]
Parameters
----------
address: str
Token balance e.g. 0x1f9840a85d5aF5bf1D1762F925BDADdC4201F984
Returns
-------
pd.DataFrame
DataFrame with information about provided ERC20 token.
"""
response = make_request("getTokenInfo", address)
decimals = response.pop("decimals") if "decimals" in response else None
for name in [
"issuancesCount",
"lastUpdated",
"image",
"transfersCount",
"ethTransfersCount",
]:
try:
response.pop(name)
except KeyError as e:
logger.exception(str(e))
continue
enrich_social_media(response)
df = pd.json_normalize(response)
df.columns = [split_cols_with_dot(x) for x in df.columns.tolist()]
if "priceTs" in df:
df.drop("priceTs", axis=1, inplace=True)
for col in [
"owner",
"slot",
"facebook",
"priceDiff",
"priceDiff7d",
"priceDiff30d",
"priceVolDiff1",
"priceVolDiff7",
"priceVolDiff30",
"priceCurrency",
]:
if col in df.columns:
df.drop(col, axis=1, inplace=True)
df["totalSupply"] = df["totalSupply"].astype(float) / (10 ** int(decimals))
df = df.T.reset_index()
df.columns = ["Metric", "Value"]
# pylint: disable=unsupported-assignment-operation
df["Value"] = df["Value"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=70)) if isinstance(x, str) else x
)
return df
@log_start_end(log=logger)
def get_tx_info(tx_hash: str) -> pd.DataFrame:
"""Get info about transaction. [Source: Ethplorer]
Parameters
----------
tx_hash: str
Transaction hash e.g. 0x9dc7b43ad4288c624fdd236b2ecb9f2b81c93e706b2ffd1d19b112c1df7849e6
Returns
-------
pd.DataFrame
DataFrame with information about ERC20 token transaction.
"""
decimals = None
response = make_request("getTxInfo", tx_hash)
try:
response.pop("logs")
operations = response.pop("operations")[0]
if operations:
operations.pop("addresses")
token = operations.pop("tokenInfo")
decimals = token.get("decimals")
if token:
operations["token"] = token["name"]
operations["tokenAddress"] = token["address"]
operations["timestamp"] = datetime.fromtimestamp(operations["timestamp"])
response.update(operations)
response.pop("input")
df = pd.Series(response)
if decimals:
for col in ["intValue", "value"]:
df[col] = float(df[col]) // (10 ** int(decimals))
df = df.to_frame().reset_index()
df.columns = ["Metric", "Value"]
except KeyError as e:
logger.exception(str(e))
return pd.DataFrame()
return df
@log_start_end(log=logger)
def get_token_history(
address: str, sortby: str = "timestamp", ascend: bool = False
) -> pd.DataFrame:
"""Get info about token historical transactions. [Source: Ethplorer]
Parameters
----------
address: str
Token e.g. 0xf3db5fa2c66b7af3eb0c0b782510816cbe4813b8
sortby: str
Key to sort by.
ascend: str
Sort in descending order.
Returns
-------
pd.DataFrame
DataFrame with token historical transactions.
"""
response = make_request("getTokenHistory", address, limit=1000)
all_operations = []
operations = response["operations"]
try:
first_row = operations[0]["tokenInfo"]
name, symbol, _ = (
first_row.get("name"),
first_row.get("symbol"),
first_row.get("balance"),
)
decimals = first_row.get("decimals")
except Exception as e:
logger.exception(str(e))
name, symbol = "", ""
decimals = None
for operation in operations:
operation.pop("type")
operation.pop("tokenInfo")
operation["timestamp"] = datetime.fromtimestamp(operation["timestamp"])
all_operations.append(operation)
df = pd.DataFrame(all_operations)
if df.empty:
console.print(f"No historical transaction found for {address}")
return df
df[["name", "symbol"]] = name, symbol
df["value"] = df["value"].astype(float) / (10 ** int(decimals))
df = df[["timestamp", "name", "symbol", "value", "from", "to", "transactionHash"]]
df = df.sort_values(by=sortby, ascending=ascend)
return df
@log_start_end(log=logger)
def get_token_historical_price(
address: str,
sortby: str = "date",
ascend: bool = False,
) -> pd.DataFrame:
"""Get token historical prices with volume and market cap, and average price. [Source: Ethplorer]
Parameters
----------
address: str
Token e.g. 0xf3db5fa2c66b7af3eb0c0b782510816cbe4813b8
sortby: str
Key to sort by.
ascend: str
Sort in descending order.
Returns
-------
pd.DataFrame
DataFrame with token historical prices.
"""
response = make_request("getTokenPriceHistoryGrouped", address)
data = response["history"]
data.pop("current")
prices = data.get("prices")
if not prices:
console.print(f"No historical price found for {address}")
return pd.DataFrame()
prices_df = pd.DataFrame(prices)
prices_df["ts"] = prices_df["ts"].apply(lambda x: datetime.fromtimestamp(x))
if "tmp" in prices_df.columns:
prices_df.drop("tmp", axis=1, inplace=True)
df = prices_df[
["date", "open", "close", "high", "low", "volumeConverted", "cap", "average"]
]
df = df.sort_values(by=sortby, ascending=ascend)
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/onchain/ethplorer_model.py | 0.826081 | 0.283969 | ethplorer_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_very_long_number_formatter,
)
from openbb_terminal.cryptocurrency.onchain import ethplorer_model
from openbb_terminal.decorators import check_api_key
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
# pylint: disable=unsupported-assignment-operation
@log_start_end(log=logger)
@check_api_key(["API_ETHPLORER_KEY"])
def display_address_info(
address: str,
limit: int = 15,
sortby: str = "index",
ascend: bool = False,
export: str = "",
) -> None:
"""Display info about tokens for given ethereum blockchain balance e.g. ETH balance,
balance of all tokens with name and symbol. [Source: Ethplorer]
Parameters
----------
address: str
Ethereum balance.
limit: int
Limit of transactions. Maximum 100
sortby: str
Key to sort by.
ascend: str
Sort in descending order.
export : str
Export dataframe data to csv,json,xlsx file
"""
df = ethplorer_model.get_address_info(address, sortby=sortby, ascend=ascend)
df_data = df.copy()
df["balance"] = df["balance"].apply(
lambda x: lambda_very_long_number_formatter(x)
if x >= 10000
else round(float(x), 4)
)
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Blockchain Token Information",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"balance",
df_data,
)
@log_start_end(log=logger)
@check_api_key(["API_ETHPLORER_KEY"])
def display_top_tokens(
limit: int = 15,
sortby: str = "rank",
ascend: bool = True,
export: str = "",
) -> None:
"""Display top ERC20 tokens [Source: Ethplorer]
Parameters
----------
limit: int
Limit of transactions. Maximum 100
sortby: str
Key to sort by.
ascend: str
Sort in descending order.
export : str
Export dataframe data to csv,json,xlsx file
"""
df = ethplorer_model.get_top_tokens(sortby, ascend)
df_data = df.copy()
df.fillna("", inplace=True)
for col in ["txsCount", "transfersCount", "holdersCount"]:
if col in df.columns:
df[col] = df[col].apply(lambda x: lambda_very_long_number_formatter(x))
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Top ERC20 Tokens",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"top",
df_data,
)
@log_start_end(log=logger)
@check_api_key(["API_ETHPLORER_KEY"])
def display_top_token_holders(
address: str,
limit: int = 10,
sortby: str = "balance",
ascend: bool = True,
export: str = "",
) -> None:
"""Display info about top ERC20 token holders. [Source: Ethplorer]
Parameters
----------
address: str
Token balance e.g. 0x1f9840a85d5aF5bf1D1762F925BDADdC4201F984
limit: int
Limit of transactions. Maximum 100
sortby: str
Key to sort by.
ascend: str
Sort in descending order.
export : str
Export dataframe data to csv,json,xlsx file
"""
df = ethplorer_model.get_top_token_holders(address, sortby, ascend)
df_data = df.copy()
df["balance"] = df["balance"].apply(lambda x: lambda_very_long_number_formatter(x))
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="ERC20 Token Holder Info",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"holders",
df_data,
)
@log_start_end(log=logger)
@check_api_key(["API_ETHPLORER_KEY"])
def display_address_history(
address: str,
limit: int = 10,
sortby: str = "timestamp",
ascend: bool = True,
export: str = "",
) -> None:
"""Display information about balance historical transactions. [Source: Ethplorer]
Parameters
----------
address: str
Ethereum blockchain balance e.g. 0x3cD751E6b0078Be393132286c442345e5DC49699
limit: int
Limit of transactions. Maximum 100
sortby: str
Key to sort by.
ascend: str
Sort in ascending order.
export : str
Export dataframe data to csv,json,xlsx file
"""
df = ethplorer_model.get_address_history(address, sortby, ascend)
df_data = df.copy()
df["value"] = df["value"].apply(
lambda x: lambda_very_long_number_formatter(x)
if x >= 10000
else round(float(x), 4)
)
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Historical Transactions Information",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"hist",
df_data,
)
@log_start_end(log=logger)
@check_api_key(["API_ETHPLORER_KEY"])
def display_token_info(
address: str,
social: bool = False,
export: str = "",
) -> None:
"""Display info about ERC20 token. [Source: Ethplorer]
Parameters
----------
address: str
Token balance e.g. 0x1f9840a85d5aF5bf1D1762F925BDADdC4201F984
social: bool
Flag to display social media links
export : str
Export dataframe data to csv,json,xlsx file
"""
df = ethplorer_model.get_token_info(address)
df_data = df.copy()
df.loc[:, "Value"] = df["Value"].apply(
lambda x: lambda_very_long_number_formatter(x)
)
socials = ["website", "telegram", "reddit", "twitter", "coingecko"]
if social:
df = df[df["Metric"].isin(["balance", "name", "symbol"] + socials)]
else:
df = df[~df["Metric"].isin(socials)]
print_rich_table(
df, headers=list(df.columns), show_index=False, title="ERC20 Token Information"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"info",
df_data,
)
@log_start_end(log=logger)
@check_api_key(["API_ETHPLORER_KEY"])
def display_tx_info(
tx_hash: str,
export: str = "",
) -> None:
"""Display info about transaction. [Source: Ethplorer]
Parameters
----------
tx_hash: str
Transaction hash e.g. 0x9dc7b43ad4288c624fdd236b2ecb9f2b81c93e706b2ffd1d19b112c1df7849e6
export : str
Export dataframe data to csv,json,xlsx file
"""
df = ethplorer_model.get_tx_info(tx_hash)
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Information About Transactions",
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "tx", df)
@log_start_end(log=logger)
@check_api_key(["API_ETHPLORER_KEY"])
def display_token_history(
address: str,
limit: int = 10,
sortby: str = "timestamp",
ascend: bool = False,
hash_: bool = False,
export: str = "",
) -> None:
"""Display info about token history. [Source: Ethplorer]
Parameters
----------
address: str
Token balance e.g. 0x1f9840a85d5aF5bf1D1762F925BDADdC4201F984
limit: int
Limit of transactions. Maximum 100
sortby: str
Key to sort by.
ascend: str
Sort in descending order.
hash_: bool,
Flag to show transaction hash.
export : str
Export dataframe data to csv,json,xlsx file
"""
df = ethplorer_model.get_token_history(address, sortby, ascend)
df_data = df.copy()
if df.empty:
console.print(f"No results found for balance: {address}\n")
return
df.loc[:, "value"] = df["value"].apply(
lambda x: lambda_very_long_number_formatter(x)
)
if hash_:
df.drop(["from", "to"], axis=1, inplace=True)
else:
df.drop("transactionHash", inplace=True, axis=1)
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Token History Information",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"th",
df_data,
)
@log_start_end(log=logger)
@check_api_key(["API_ETHPLORER_KEY"])
def display_token_historical_prices(
address: str,
limit: int = 30,
sortby: str = "date",
ascend: bool = False,
export: str = "",
) -> None:
"""Display token historical prices with volume and market cap, and average price.
[Source: Ethplorer]
Parameters
----------
address: str
Token balance e.g. 0x1f9840a85d5aF5bf1D1762F925BDADdC4201F984
limit: int
Limit of transactions. Maximum 100
sortby: str
Key to sort by.
ascend: str
Sort in descending order.
export : str
Export dataframe data to csv,json,xlsx file
"""
df = ethplorer_model.get_token_historical_price(address, sortby, ascend)
df_data = df.copy()
if df.empty:
console.print(f"No results found for balance: {address}\n")
return
df["volumeConverted"] = df["volumeConverted"].apply(
lambda x: lambda_very_long_number_formatter(x)
)
df.loc[:, "cap"] = df["cap"].apply(lambda x: lambda_very_long_number_formatter(x))
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="Historical Token Prices",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"prices",
df_data,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/onchain/ethplorer_view.py | 0.656658 | 0.246182 | ethplorer_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
from matplotlib import pyplot as plt
from openbb_terminal import config_terminal as cfg
from openbb_terminal.config_terminal import theme
from openbb_terminal.cryptocurrency.onchain.shroom_model import (
get_daily_transactions,
get_dapp_stats,
get_total_value_locked,
)
from openbb_terminal.decorators import check_api_key
from openbb_terminal import config_plot as cfgPlot
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
is_valid_axes_count,
print_rich_table,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_SHROOM_KEY"])
def display_daily_transactions(
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Get daily transactions for certain symbols in ethereum blockchain
[Source: https://sdk.flipsidecrypto.xyz/shroomdk]
Parameters
----------
export : str
Export dataframe data to csv,json,xlsx file
"""
symbols = ["DAI", "USDT", "BUSD", "USDC"]
df = get_daily_transactions(symbols)
if df.empty:
return
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
for name in symbols:
ax.plot(df.index, df[name] / 1_000_000_000, label=name, lw=0.5)
ax.set_title("Daily Transactions in Ethereum")
ax.set_ylabel("Transactions [in billions]")
ax.set_xlabel("Date")
ax.set_xlim(df.index[0], df.index[-1])
ax.legend()
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"dt",
df,
)
@log_start_end(log=logger)
@check_api_key(["API_SHROOM_KEY"])
def display_dapp_stats(
platform: str,
raw: bool = False,
limit: int = 10,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Get daily transactions for certain symbols in ethereum blockchain
[Source: https://sdk.flipsidecrypto.xyz/shroomdk]
Parameters
----------
platform : str
Platform name (e.g., uniswap-v3)
raw : bool
Show raw data
limit : int
Limit of rows
export : str
Export dataframe data to csv,json,xlsx file
"""
df = get_dapp_stats(platform=platform)
if df.empty:
console.print("No data found.", "\n")
elif not df.empty:
if raw:
print_rich_table(df.head(limit), headers=list(df.columns), show_index=True)
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
ax.bar(df.index, df["n_users"], color=theme.down_color, label="Number of Users")
ax.set_xlim(
df.index[0],
df.index[-1],
)
ax2 = ax.twinx()
ax2.plot(df["fees"] / 1_000_000, color=theme.up_color, label="Platform Fees")
# ax2.plot(df["volume"], label="Volume")
ax2.set_ylabel("Number of Users", labelpad=30)
ax2.set_zorder(ax2.get_zorder() + 1)
ax.patch.set_visible(False)
ax2.yaxis.set_label_position("left")
ax.set_ylabel(
"Platforms Fees [USD M]", labelpad=30
) # attribute Deb because of $ -> USD
ax.set_title(f"{platform} stats")
ax.legend(loc="upper left")
ax2.legend(loc="upper right")
cfg.theme.style_primary_axis(ax)
if external_axes is None:
cfg.theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ds",
df,
)
@log_start_end(log=logger)
@check_api_key(["API_SHROOM_KEY"])
def display_total_value_locked(
user_address: str,
address_name: str,
symbol: str = "USDC",
interval: int = 1,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""
Get total value locked for a certain address
TVL measures the total amount of a token that is locked in a contract.
[Source: https://sdk.flipsidecrypto.xyz/shroomdk]
Parameters
----------
user_address : str
Address of the user (e.g., 0xa5407eae9ba41422680e2e00537571bcc53efbfd)
address_name : str
Name of the address (e.g., makerdao gem join usdc)
symbol : str
Symbol of the token
interval : int
Interval of months
export : str
Export dataframe data to csv,json,xlsx file
"""
# example user_address :
# example addres_name :
df = get_total_value_locked(
user_address=user_address,
address_name=address_name,
symbol=symbol,
interval=interval,
)
if df.empty:
console.print("No data found.", "\n")
return
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
# ax.plot(df.index, df["amount_usd"], label="", lw=0.5)
ax.bar(df.index, df["amount_usd"], color=theme.down_color, label="amount_usd")
ax.set_title("Total value locked Ethereum ERC20")
ax.set_ylabel("Amount [USD M]")
ax.set_xlabel("Date")
ax.set_xlim(df.index[0], df.index[-1])
ax.legend()
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"tvl",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/onchain/shroom_view.py | 0.845337 | 0.273259 | shroom_view.py | pypi |
__docformat__ = "numpy"
# flake8: noqa
import logging
import os
from typing import List, Optional
from matplotlib import pyplot as plt
from openbb_terminal import config_terminal as cfg
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.config_terminal import theme
from openbb_terminal.cryptocurrency.nft import nftpricefloor_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
is_valid_axes_count,
plot_autoscale,
print_rich_table,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_collections(
show_fp: bool = False, show_sales: bool = False, limit: int = 5, export: str = ""
):
"""Display NFT collections. [Source: https://nftpricefloor.com/]
Parameters
----------
show_fp : bool
Show NFT Price Floor for top collections
limit: int
Number of NFT collections to display
export : str
Export dataframe data to csv,json,xlsx file
"""
df = nftpricefloor_model.get_collections()
if df.empty:
console.print("No data found.", "\n")
else:
df = df[
[
"slug",
"floorInfo.currentFloorEth",
"totalSupply",
"listedCount",
"blockchain",
]
]
if show_fp or show_sales:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
for collection in df["slug"].head(limit).values:
df_collection = nftpricefloor_model.get_floor_price(collection)
if not df_collection.empty:
values = (
df_collection["floorEth"]
if show_fp
else df_collection["salesCount"]
)
ax.plot(df_collection.index, values, label=collection)
ax.set_ylabel("Floor Price [ETH]" if show_fp else "Sales")
cfg.theme.style_primary_axis(ax)
ax.legend()
ax.set_title("Collections Floor Price" if show_fp else "Collections Sales")
cfg.theme.visualize_output()
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
title="NFT Collections",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"collections",
df,
)
@log_start_end(log=logger)
def display_floor_price(
slug: str,
limit: int = 10,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
raw: bool = False,
):
"""Display NFT collection floor price over time. [Source: https://nftpricefloor.com/]
Parameters
----------
slug: str
NFT collection slug
raw: bool
Flag to display raw data
limit: int
Number of raw data to show
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
df = nftpricefloor_model.get_floor_price(slug)
if df.empty:
console.print("No data found.", "\n")
elif not df.empty:
if raw:
print_rich_table(
df.head(limit),
index_name="date",
headers=list(df.columns),
show_index=True,
title=f"{slug} Floor Price",
)
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
ax.bar(df.index, df["salesCount"], color=theme.down_color, label="Sales")
ax.set_xlim(
df.index[0],
df.index[-1],
)
ax2 = ax.twinx()
ax2.plot(df["floorEth"], color=theme.up_color, label="Floor Price")
ax2.set_ylabel("Sales", labelpad=20)
ax2.set_zorder(ax2.get_zorder() + 1)
ax.patch.set_visible(False)
ax2.yaxis.set_label_position("left")
ax.set_ylabel("Floor Price [ETH]", labelpad=30)
ax.set_title(f"{slug} Floor Price")
ax.legend(loc="upper left")
ax2.legend(loc="upper right")
cfg.theme.style_primary_axis(ax)
if external_axes is None:
cfg.theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"fp",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/nft/nftpricefloor_view.py | 0.800692 | 0.30178 | nftpricefloor_view.py | pypi |
import logging
from datetime import datetime
import pandas as pd
import requests
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
API_URL = "https://api.opensea.io/api/v1"
@log_start_end(log=logger)
def get_collection_stats(slug: str) -> pd.DataFrame:
"""Get stats of a nft collection [Source: opensea.io]
Parameters
----------
slug : str
Opensea collection slug. If the name of the collection is Mutant Ape Yacht Club the slug is mutant-ape-yacht-club
Returns
-------
pd.DataFrame
collection stats
"""
res = requests.get(f"{API_URL}/collection/{slug}")
if res.status_code == 200:
data = res.json()
collection = data["collection"]
stats = collection["stats"]
metrics = [
"Name",
"Floor Price (ETH)",
"Number of Owners",
"Market Cap (ETH)",
"Average Price ETH",
"One day volume (ETH)",
"One day change (%)",
"One day sales (ETH)",
"One day average price (ETH)",
"Thirty day volume (ETH)",
"Thirty day change (%)",
"Thirty day sales (ETH)",
"Thirty day average price (ETH)",
"Total Supply (ETH)",
"Total Sales (ETH)",
"Total Volume (ETH)",
"Creation Date",
"URL",
]
# T his variable is here because sometimes the created dates also have
# milliseconds in the string and we dont it, so this just gets the m,d,y,h,m,s
created_date = collection["created_date"][0:19]
values = [
collection["name"],
"-" if not stats["floor_price"] else float(stats["floor_price"]),
round(float(stats["num_owners"]), 2),
round(float(stats["market_cap"]), 2),
round(float(stats["average_price"]), 2),
round(float(stats["one_day_volume"]), 2),
round(float(stats["one_day_change"]) * 100, 2),
round(float(stats["one_day_sales"]), 2),
round(float(stats["one_day_average_price"]), 2),
round(float(stats["thirty_day_volume"]), 2),
round(float(stats["thirty_day_change"]) * 100, 2),
round(float(stats["thirty_day_sales"]), 2),
round(float(stats["thirty_day_average_price"]), 2),
round(float(stats["total_supply"]), 2),
round(float(stats["total_sales"]), 2),
round(float(stats["total_volume"]), 2),
datetime.strptime(created_date, "%Y-%m-%dT%H:%M:%S").strftime("%b %d, %Y"),
"-" if not collection["external_url"] else collection["external_url"],
]
df = pd.DataFrame({"Metric": metrics, "Value": values})
return df
return pd.DataFrame() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/nft/opensea_model.py | 0.70912 | 0.300412 | opensea_model.py | pypi |
__docformat__ = "numpy"
import argparse
import logging
from typing import List
import numpy as np
import pandas as pd
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal import feature_flags as obbff
from openbb_terminal.common.quantitative_analysis import qa_view, rolling_view
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_ONLY_FIGURES_ALLOWED,
EXPORT_ONLY_RAW_DATA_ALLOWED,
check_positive,
check_proportion_range,
check_list_dates,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import CryptoBaseController
from openbb_terminal.rich_config import console, MenuText
logger = logging.getLogger(__name__)
class QaController(CryptoBaseController):
"""Quantitative Analysis Controller class"""
CHOICES_COMMANDS = [
"load",
"pick",
"raw",
"summary",
"line",
"hist",
"cdf",
"bw",
"rolling",
"decompose",
"cusum",
"acf",
"spread",
"quantile",
"skew",
"kurtosis",
"normality",
"qqplot",
"unitroot",
]
FULLER_REG = ["c", "ct", "ctt", "nc"]
KPS_REG = ["c", "ct"]
PATH = "/crypto/qa/"
CHOICES_GENERATION = True
def __init__(
self,
symbol: str,
data: pd.DataFrame,
queue: List[str] = None,
):
"""Constructor"""
super().__init__(queue)
data["Returns"] = data["Close"].pct_change()
data["LogRet"] = np.log(data["Close"]) - np.log(data["Close"].shift(1))
data = data.dropna()
self.data = data
self.symbol = symbol
self.target = "Close"
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
choices["pick"].update({c: {} for c in self.data.columns.tolist()})
choices["load"] = {
"--interval": {
c: {}
for c in [
"1",
"5",
"15",
"30",
"60",
"240",
"1440",
"10080",
"43200",
]
},
"-i": "--interval",
"--exchange": {c: {} for c in self.exchanges},
"--source": {c: {} for c in ["CCXT", "YahooFinance", "CoingGecko"]},
"--vs": {c: {} for c in ["usd", "eur"]},
"--start": None,
"-s": "--start",
"--end": None,
"-e": "--end",
}
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
mt = MenuText("crypto/qa/")
mt.add_cmd("load")
mt.add_cmd("pick")
mt.add_raw("\n")
mt.add_param("_ticker", self.symbol)
mt.add_param("_target", self.target)
mt.add_raw("\n")
mt.add_info("_statistics_")
mt.add_cmd("summary")
mt.add_cmd("normality")
mt.add_cmd("unitroot")
mt.add_info("_plots_")
mt.add_cmd("line")
mt.add_cmd("hist")
mt.add_cmd("cdf")
mt.add_cmd("bw")
mt.add_cmd("acf")
mt.add_cmd("qqplot")
mt.add_info("_rolling_metrics_")
mt.add_cmd("rolling")
mt.add_cmd("spread")
mt.add_cmd("quantile")
mt.add_cmd("skew")
mt.add_cmd("kurtosis")
mt.add_info("_other_")
mt.add_cmd("raw")
mt.add_cmd("decompose")
mt.add_cmd("cusum")
console.print(text=mt.menu_text, menu="Cryptocurrency - Quantitative Analysis")
def custom_reset(self):
"""Class specific component of reset command"""
if self.symbol:
return ["crypto", f"load {self.symbol}", "qa"]
return []
@log_start_end(log=logger)
def call_pick(self, other_args: List[str]):
"""Process pick command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="pick",
description="""
Change target variable
""",
)
parser.add_argument(
"-t",
"--target",
dest="target",
choices=self.data.columns.tolist(),
help="Select variable to analyze",
)
if other_args and "-t" not in other_args and "-h" not in other_args:
other_args.insert(0, "-t")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
self.target = ns_parser.target
@log_start_end(log=logger)
def call_raw(self, other_args: List[str]):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="raw",
description="""
Print raw data to console
""",
)
parser.add_argument(
"-l",
"--limit",
help="Number to show",
type=check_positive,
default=20,
dest="limit",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
default=False,
dest="reverse",
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
parser.add_argument(
"-s",
"--sortby",
help="The column to sort by",
choices=[x.lower().replace(" ", "") for x in self.data.columns],
type=str.lower,
dest="sortby",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_raw(
data=self.data,
limit=ns_parser.limit,
sortby=ns_parser.sortby,
ascend=ns_parser.reverse,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_summary(self, other_args: List[str]):
"""Process summary command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="summary",
description="""
Summary statistics
""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_summary(data=self.data, export=ns_parser.export)
@log_start_end(log=logger)
def call_line(self, other_args: List[str]):
"""Process line command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="line",
description="Show line plot of selected data or highlight specific datetimes.",
)
parser.add_argument(
"--log",
help="Plot with y on log scale",
dest="log",
action="store_true",
default=False,
)
parser.add_argument(
"--ml",
help="Draw vertical line markers to highlight certain events (comma separated dates, e.g. 2020-01-01,2020-02-01)", # noqa: E501
dest="ml",
type=check_list_dates,
default="",
)
parser.add_argument(
"--ms",
help="Draw scatter markers to highlight certain events (comma separated dates, e.g. 2021-01-01,2021-02-01)",
dest="ms",
type=check_list_dates,
default="",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_FIGURES_ALLOWED
)
if ns_parser:
qa_view.display_line(
self.data[self.target],
title=f"{self.symbol} {self.target}",
log_y=ns_parser.log,
markers_lines=ns_parser.ml,
markers_scatter=ns_parser.ms,
)
@log_start_end(log=logger)
def call_hist(self, other_args: List[str]):
"""Process hist command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="hist",
description="""
Histogram with density and rug
""",
)
parser.add_argument(
"-b",
"--bins",
type=check_positive,
default=15,
dest="n_bins",
choices=range(10, 100),
metavar="N_BINS",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
qa_view.display_hist(
symbol=self.symbol,
data=self.data,
target=self.target,
bins=ns_parser.n_bins,
)
@log_start_end(log=logger)
def call_cdf(self, other_args: List[str]):
"""Process cdf command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="cdf",
description="""
Cumulative distribution function
""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_cdf(
symbol=self.symbol,
data=self.data,
target=self.target,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_bw(self, other_args: List[str]):
"""Process bwy command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
prog="bw",
description="""
Box and Whisker plot
""",
)
parser.add_argument(
"-y",
"--yearly",
action="store_true",
default=False,
dest="year",
help="Flag to show yearly bw plot",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
qa_view.display_bw(
symbol=self.symbol,
data=self.data,
target=self.target,
yearly=ns_parser.year,
)
@log_start_end(log=logger)
def call_decompose(self, other_args: List[str]):
"""Process decompose command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="decompose",
description="""
Decompose time series as:
- Additive Time Series = Level + CyclicTrend + Residual + Seasonality
- Multiplicative Time Series = Level * CyclicTrend * Residual * Seasonality
""",
)
parser.add_argument(
"-m",
"--multiplicative",
action="store_true",
default=False,
dest="multiplicative",
help="decompose using multiplicative model instead of additive",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_seasonal(
symbol=self.symbol,
data=self.data,
target=self.target,
multiplicative=ns_parser.multiplicative,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_cusum(self, other_args: List[str]):
"""Process cusum command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="cusum",
description="""
Cumulative sum algorithm (CUSUM) to detect abrupt changes in data
""",
)
parser.add_argument(
"-t",
"--threshold",
dest="threshold",
type=float,
default=(
max(self.data[self.target].values) - min(self.data[self.target].values)
)
/ 40,
help="threshold",
)
parser.add_argument(
"-d",
"--drift",
dest="drift",
type=float,
default=(
max(self.data[self.target].values) - min(self.data[self.target].values)
)
/ 80,
help="drift",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
qa_view.display_cusum(
data=self.data,
target=self.target,
threshold=ns_parser.threshold,
drift=ns_parser.drift,
)
@log_start_end(log=logger)
def call_acf(self, other_args: List[str]):
"""Process acf command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="acf",
description="""
Auto-Correlation and Partial Auto-Correlation Functions for diff and diff diff crypto data
""",
)
parser.add_argument(
"-l",
"--lags",
dest="lags",
type=check_positive,
default=15,
help="maximum lags to display in plots",
choices=range(5, 100),
metavar="LAGS",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if self.target != "Close":
console.print(
"Target not Close. For best results, use `pick Close` first."
)
qa_view.display_acf(
symbol=self.symbol,
data=self.data,
target=self.target,
lags=ns_parser.lags,
)
@log_start_end(log=logger)
def call_rolling(self, other_args: List[str]):
"""Process rolling command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="rolling",
description="""
Rolling mean and std deviation
""",
)
parser.add_argument(
"-w",
"--window",
action="store",
dest="n_window",
type=check_positive,
default=14,
help="Window length",
choices=range(5, 100),
metavar="N_WINDOW",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
rolling_view.display_mean_std(
symbol=self.symbol,
data=self.data,
target=self.target,
window=ns_parser.n_window,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_spread(self, other_args: List[str]):
"""Process spread command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="spread",
description="""Shows rolling spread measurement
""",
)
parser.add_argument(
"-w",
"--window",
action="store",
dest="n_window",
type=check_positive,
default=14,
help="Window length",
choices=range(5, 100),
metavar="N_WINDOW",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
rolling_view.display_spread(
symbol=self.symbol,
data=self.data,
target=self.target,
window=ns_parser.n_window,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_quantile(self, other_args: List[str]):
"""Process quantile command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="quantile",
description="""
The quantiles are values which divide the distribution such that
there is a given proportion of observations below the quantile.
For example, the median is a quantile. The median is the central
value of the distribution, such that half the points are less than
or equal to it and half are greater than or equal to it.
By default, q is set at 0.5, which effectively is median. Change q to
get the desired quantile (0<q<1).
""",
)
parser.add_argument(
"-w",
"--window",
action="store",
dest="n_window",
type=check_positive,
default=14,
help="window length",
choices=range(5, 100),
metavar="N_WINDOW",
)
parser.add_argument(
"-q",
"--quantile",
action="store",
dest="f_quantile",
type=check_proportion_range,
default=0.5,
help="quantile",
choices=np.arange(0.0, 1.0, 0.01).tolist(),
metavar="F_QUANTILE",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
rolling_view.display_quantile(
symbol=self.symbol,
data=self.data,
target=self.target,
window=ns_parser.n_window,
quantile=ns_parser.f_quantile,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_skew(self, other_args: List[str]):
"""Process skew command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="skew",
description="""
Skewness is a measure of asymmetry or distortion of symmetric
distribution. It measures the deviation of the given distribution
of a random variable from a symmetric distribution, such as normal
distribution. A normal distribution is without any skewness, as it is
symmetrical on both sides. Hence, a curve is regarded as skewed if
it is shifted towards the right or the left.
""",
)
parser.add_argument(
"-w",
"--window",
action="store",
dest="n_window",
type=check_positive,
default=14,
help="window length",
choices=range(5, 100),
metavar="N_WINDOW",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
rolling_view.display_skew(
symbol=self.symbol,
data=self.data,
target=self.target,
window=ns_parser.n_window,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_kurtosis(self, other_args: List[str]):
"""Process kurtosis command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="kurtosis",
description="""
Kurtosis is a measure of the "tailedness" of the probability distribution
of a real-valued random variable. Like skewness, kurtosis describes the shape
of a probability distribution and there are different ways of quantifying it
for a theoretical distribution and corresponding ways of estimating it from
a sample from a population. Different measures of kurtosis may have different
interpretations.
""",
)
parser.add_argument(
"-w",
"--window",
action="store",
dest="n_window",
type=check_positive,
default=14,
help="window length",
choices=range(5, 100),
metavar="N_WINDOW",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
rolling_view.display_kurtosis(
symbol=self.symbol,
data=self.data,
target=self.target,
window=ns_parser.n_window,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_normality(self, other_args: List[str]):
"""Process normality command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="normality",
description="""
Normality tests
""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_normality(
data=self.data, target=self.target, export=ns_parser.export
)
@log_start_end(log=logger)
def call_qqplot(self, other_args: List[str]):
"""Process qqplot command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="qqplot",
description="""
Display QQ plot vs normal quantiles
""",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
qa_view.display_qqplot(
symbol=self.symbol, data=self.data, target=self.target
)
@log_start_end(log=logger)
def call_unitroot(self, other_args: List[str]):
"""Process unitroot command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="unitroot",
description="""
Unit root test / stationarity (ADF, KPSS)
""",
)
parser.add_argument(
"-r",
"--fuller_reg",
help="Type of regression. Can be ‘c’,’ct’,’ctt’,’nc’ 'c' - Constant and t - trend order",
choices=self.FULLER_REG,
default="c",
type=str,
dest="fuller_reg",
)
parser.add_argument(
"-k",
"--kps_reg",
help="Type of regression. Can be ‘c’,’ct'",
choices=self.KPS_REG,
type=str,
dest="kpss_reg",
default="c",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
qa_view.display_unitroot(
data=self.data,
target=self.target,
fuller_reg=ns_parser.fuller_reg,
kpss_reg=ns_parser.kpss_reg,
export=ns_parser.export,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/quantitative_analysis/qa_controller.py | 0.606032 | 0.169784 | qa_controller.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Tuple
import pandas as pd
from openbb_terminal.cryptocurrency.tools.tools_helpers import (
calculate_hold_value,
calculate_pool_value,
)
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def calculate_apy(apr: float, compounding_times: int) -> Tuple[pd.DataFrame, str]:
"""Converts apr into apy
Parameters
----------
apr: float
value in percentage
compounding_times: int
number of compounded periods in a year
Returns
-------
Tuple[pd.DataFrame, str]
- pd.DataFrame: dataframe with results
- str: narrative version of results
"""
apy = (pow((1 + (apr / 100) / compounding_times), compounding_times) - 1) * 100
apy_str = f"""
An APR of {apr}% compounded {compounding_times} times per year equals to an APY of {round(apy,3)}%
"""
df = pd.DataFrame(
{
"Metric": [
"APR",
"Compounding Times",
"APY",
],
"Value": [
f"{apr}%",
compounding_times,
f"{round(apy, 3)}%",
],
}
)
return df, apy_str
@log_start_end(log=logger)
def calculate_il(
price_changeA: float,
price_changeB: float,
proportion: float,
initial_pool_value: float,
) -> Tuple[pd.DataFrame, str]:
"""Calculates Impermanent Loss in a custom liquidity pool
Parameters
----------
price_changeA: float
price change of crypto A in percentage
price_changeB: float
price change of crypto B in percentage
proportion: float
percentage of first token in pool
initial_pool_value: float
initial value that pool contains
Returns
-------
Tuple[pd.DataFrame, str]
- pd.DataFrame: dataframe with results
- str: narrative version of results
"""
pool_value = calculate_pool_value(price_changeA, price_changeB, proportion)
hold_value = calculate_hold_value(price_changeA, price_changeB, proportion)
il = abs(((pool_value / hold_value) - 1) * 100)
hold_value = hold_value * initial_pool_value
pool_value = pool_value * initial_pool_value
il_str = f"""
Ignoring fees/rewards and only accounting for impermanent loss:
A change of {price_changeA}% in token A and {price_changeB}% in token B in
a pool with a proportion of {proportion}/{100-proportion} and with an initial
value of ${initial_pool_value} would result in an impermant loss of {round(il,2)}%
If you just hold the tokens you would have ${round(hold_value,2)} whereas
in the pool you would have ${round(pool_value,2)}
"""
df = pd.DataFrame(
{
"Metric": [
"Price Change Token A",
"Price Change Token B",
"Initial Pool Value",
"Proportion",
"Impermanent Loss",
"Hold Value",
"Pool Value",
],
"Value": [
f"{price_changeA}%",
f"{price_changeB}%",
f"${initial_pool_value}",
f"{proportion}/{100-proportion}",
f"{round(il,2)}%",
f"${round(hold_value,2)}",
f"${round(pool_value,2)}",
],
}
)
return df, il_str | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/tools/tools_model.py | 0.922748 | 0.602734 | tools_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from openbb_terminal.cryptocurrency.tools.tools_model import calculate_apy, calculate_il
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_apy(
apr: float, compounding_times: int, narrative: bool = False, export: str = ""
):
"""Displays APY value converted from APR
Parameters
----------
apr: float
value in percentage
compounding_times: int
number of compounded periods in a year
narrative: str
display narrative version instead of dataframe
export : str
Export dataframe data to csv,json,xlsx file
Returns
-------
"""
df, apy_str = calculate_apy(apr, compounding_times)
if narrative:
console.print(apy_str)
else:
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="APR/APY Calculator",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"aprtoapy",
apy_str,
)
@log_start_end(log=logger)
def display_il(
price_changeA: int,
price_changeB: int,
proportion: int,
initial_pool_value: int,
narrative: bool = False,
export: str = "",
):
"""Displays Impermanent Loss in a custom liquidity pool
Parameters
----------
price_changeA: float
price change of crypto A in percentage
price_changeB: float
price change of crypto B in percentage
proportion: float
percentage of first token in pool
initial_pool_value: float
initial value that pool contains
narrative: str
display narrative version instead of dataframe
export : str
Export dataframe data to csv,json,xlsx file
Returns
-------
"""
df, il_str = calculate_il(
price_changeA=price_changeA,
price_changeB=price_changeB,
proportion=proportion,
initial_pool_value=initial_pool_value,
)
if narrative:
console.print(il_str)
else:
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Impermanent Loss Calculator",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"il",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/tools/tools_view.py | 0.725551 | 0.316211 | tools_view.py | pypi |
__docformat__ = "numpy"
# pylint: disable=C0302
import argparse
import logging
from typing import List
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal import feature_flags as obbff
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_ONLY_RAW_DATA_ALLOWED,
check_non_negative,
check_positive,
check_positive_float,
)
from openbb_terminal.cryptocurrency.tools import tools_view
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.rich_config import console, MenuText
logger = logging.getLogger(__name__)
class ToolsController(BaseController):
"""Tools Controller class"""
CHOICES_COMMANDS = ["aprtoapy", "il"]
PATH = "/crypto/tools/"
CHOICES_GENERATION = True
def __init__(self, queue: List[str] = None):
"""Constructor"""
super().__init__(queue)
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
mt = MenuText("crypto/tools/")
mt.add_cmd("aprtoapy")
mt.add_cmd("il")
console.print(text=mt.menu_text, menu="Cryptocurrency - Tools")
@log_start_end(log=logger)
def call_il(self, other_args: List[str]):
"""Process il command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="il",
description="""Tool to calculate Impermanent Loss in a custom liquidity pool.
Users can provide percentages increases for two tokens (and their weight in the liquidity pool)
and verify the impermanent loss that can occur.""",
)
parser.add_argument(
"-a",
"--priceChangeA",
dest="priceChangeA",
type=check_non_negative,
help="Token A price change in percentage",
default=0,
choices=range(1, 101),
metavar="PRICECHANGEA",
)
parser.add_argument(
"-b",
"--priceChangeB",
dest="priceChangeB",
type=check_non_negative,
help="Token B price change in percentage",
default=100,
choices=range(1, 101),
metavar="PRICECHANGEB",
)
parser.add_argument(
"-p",
"--proportion",
dest="proportion",
type=check_positive,
help="""Pool proportion. E.g., 50 means that pool contains 50%% of token A and 50%% of token B,
30 means that pool contains 30%% of token A and 70%% of token B""",
default=50,
choices=range(1, 101),
metavar="PROPORTION",
)
parser.add_argument(
"-v",
"--value",
dest="value",
type=check_positive_float,
help="Initial amount of dollars that user provides to liquidity pool",
default=1000,
)
parser.add_argument(
"-n",
"--narrative",
dest="narrative",
action="store_true",
help="Flag to show narrative instead of dataframe",
default=False,
)
if other_args and not other_args[0][0] == "-":
other_args.insert(0, "-a")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
tools_view.display_il(
price_changeA=ns_parser.priceChangeA,
price_changeB=ns_parser.priceChangeB,
proportion=ns_parser.proportion,
initial_pool_value=ns_parser.value,
narrative=ns_parser.narrative,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_aprtoapy(self, other_args: List[str]):
"""Process aprtoapy command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="aprtoapy",
description="""
Tool to calculate APY from APR value.
Compouding periods, i.e., the number of times compounded per year
can be defined with -c argument.
""",
)
parser.add_argument(
"--apr",
dest="apr",
type=check_positive,
help="APR value in percentage to convert",
default=100,
choices=range(1, 101),
metavar="APR",
)
parser.add_argument(
"-c",
"--compounding",
dest="compounding",
type=check_positive,
help="Number of compounded periods in a year. 12 means compounding monthly",
default=12,
choices=range(1, 101),
metavar="COMPOUNDING",
)
parser.add_argument(
"-n",
"--narrative",
dest="narrative",
action="store_true",
help="Flag to show narrative instead of dataframe",
default=False,
)
if other_args and not other_args[0][0] == "-":
other_args.insert(0, "--apr")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
tools_view.display_apy(
apr=ns_parser.apr,
compounding_times=ns_parser.compounding,
narrative=ns_parser.narrative,
export=ns_parser.export,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/tools/tools_controller.py | 0.671901 | 0.221119 | tools_controller.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
from coinmarketcapapi import CoinMarketCapAPI, CoinMarketCapAPIError
import openbb_terminal.config_terminal as cfg
from openbb_terminal.decorators import log_start_end, check_api_key
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
FILTERS = ["Symbol", "CMC_Rank", "LastPrice", "DayPctChange", "MarketCap"]
sort_map = {
"Symbol": "Symbol",
"CMC_Rank": "CMC_Rank",
"LastPrice": "Last Price",
"DayPctChange": "1 Day Pct Change",
"MarketCap": "Market Cap ($B)",
}
@log_start_end(log=logger)
@check_api_key(["API_CMC_KEY"])
def get_cmc_top_n(sortby: str = "CMC_Rank", ascend: bool = True) -> pd.DataFrame:
"""Shows top n coins. [Source: CoinMarketCap]
Parameters
----------
sortby: str
Key to sort data. The table can be sorted by every of its columns. Refer to
Coin Market Cap:s API documentation, see:
https://coinmarketcap.com/api/documentation/v1/#operation/getV1CryptocurrencyListingsLatest
ascend: bool
Whether to sort ascending or descending
Returns
-------
pd.DataFrame
Top coin on CoinMarketCap
"""
df = pd.DataFrame()
try:
cmc = CoinMarketCapAPI(cfg.API_CMC_KEY)
ratings = cmc.cryptocurrency_listings_latest().data
symbol, rank, price, pchange1d, mkt_cap = [], [], [], [], []
for coin in ratings:
symbol.append(coin["symbol"])
rank.append(coin["cmc_rank"])
price.append(coin["quote"]["USD"]["price"])
pchange1d.append(coin["quote"]["USD"]["percent_change_24h"])
mkt_cap.append(coin["quote"]["USD"]["market_cap"] / (10**9))
df = pd.DataFrame(data=[symbol, rank, price, pchange1d, mkt_cap]).transpose()
df.columns = [
"Symbol",
"CMC_Rank",
"Last Price",
"1 Day Pct Change",
"Market Cap ($B)",
]
except CoinMarketCapAPIError as e:
if "API Key" in str(e):
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(e)
df = df.sort_values(by=sort_map[sortby], ascending=ascend)
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/discovery/coinmarketcap_model.py | 0.564819 | 0.193909 | coinmarketcap_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_very_long_number_formatter,
)
from openbb_terminal.cryptocurrency.discovery import pycoingecko_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
register_matplotlib_converters()
# pylint: disable=R0904, C0302
COINS_COLUMNS = [
"Symbol",
"Name",
"Volume [$]",
"Market Cap",
"Market Cap Rank",
"7D Change [%]",
"24H Change [%]",
]
@log_start_end(log=logger)
def display_coins(
category: str,
limit: int = 250,
sortby: str = "Symbol",
export: str = "",
ascend: bool = False,
) -> None:
"""Prints table showing top coins [Source: CoinGecko]
Parameters
----------
category: str
If no category is passed it will search for all coins. (E.g., smart-contract-platform)
limit: int
Number of records to display
sortby: str
Key to sort data
export : str
Export dataframe data to csv,json,xlsx file
ascend: bool
Sort data in ascending order
"""
df = pycoingecko_model.get_coins(
limit=limit,
category=category,
sortby=sortby,
ascend=ascend,
)
if not df.empty:
df = df[
[
"symbol",
"name",
"total_volume",
"market_cap",
"market_cap_rank",
"price_change_percentage_7d_in_currency",
"price_change_percentage_24h_in_currency",
]
]
df = df.set_axis(
COINS_COLUMNS,
axis=1,
copy=True,
)
for col in ["Volume [$]", "Market Cap"]:
if col in df.columns:
df[col] = df[col].apply(lambda x: lambda_very_long_number_formatter(x))
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cgtop",
df,
)
else:
console.print("\nUnable to retrieve data from CoinGecko.\n")
@log_start_end(log=logger)
def display_gainers(
interval: str = "1h",
limit: int = 20,
sortby: str = "market_cap_rank",
export: str = "",
) -> None:
"""Prints table showing Largest Gainers - coins which gain the most in given period. [Source: CoinGecko]
Parameters
----------
interval: str
Time period by which data is displayed. One from [1h, 24h, 7d, 14d, 30d, 60d, 1y]
limit: int
Number of records to display
sortby: str
Key to sort data. The table can be sorted by every of its columns. Refer to
API documentation (see /coins/markets in https://www.coingecko.com/en/api/documentation)
export : str
Export dataframe data to csv,json,xlsx file
"""
df = pycoingecko_model.get_gainers(limit=limit, interval=interval, sortby=sortby)
if not df.empty:
for col in ["Volume [$]", "Market Cap"]:
if col in df.columns:
df[col] = df[col].apply(lambda x: lambda_very_long_number_formatter(x))
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"gainers",
df,
)
else:
console.print("\nUnable to retrieve data from CoinGecko.\n")
@log_start_end(log=logger)
def display_losers(
interval: str = "1h",
limit: int = 20,
export: str = "",
sortby: str = "Market Cap Rank",
) -> None:
"""Prints table showing Largest Losers - coins which lost the most in given period of time. [Source: CoinGecko]
Parameters
----------
interval: str
Time period by which data is displayed. One from [1h, 24h, 7d, 14d, 30d, 60d, 1y]
limit: int
Number of records to display
sortby: str
Key to sort data. The table can be sorted by every of its columns. Refer to
API documentation (see /coins/markets in https://www.coingecko.com/en/api/documentation)
export : str
Export dataframe data to csv,json,xlsx file
"""
df = pycoingecko_model.get_losers(limit=limit, interval=interval, sortby=sortby)
if not df.empty:
for col in ["Volume [$]", "Market Cap"]:
if col in df.columns:
df[col] = df[col].apply(lambda x: lambda_very_long_number_formatter(x))
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=False,
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cglosers",
df,
)
else:
console.print("\nUnable to retrieve data from CoinGecko.\n")
@log_start_end(log=logger)
def display_trending(export: str = "") -> None:
"""Prints table showing trending coins [Source: CoinGecko]
Parameters
----------
export : str
Export dataframe data to csv,json,xlsx file
"""
df = pycoingecko_model.get_trending_coins()
if not df.empty:
print_rich_table(
df,
headers=list(df.columns),
floatfmt=".4f",
show_index=False,
title="Trending coins on CoinGecko",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cgtrending",
df,
)
else:
console.print("\nUnable to retrieve data from CoinGecko.\n") | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/discovery/pycoingecko_view.py | 0.645455 | 0.297993 | pycoingecko_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Any, Optional
import pandas as pd
from openbb_terminal.cryptocurrency.coinpaprika_helpers import PaprikaSession
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
CATEGORIES = [
"currencies",
"exchanges",
"icos",
"people",
"tags",
"all",
]
FILTERS = ["category", "id", "name"]
@log_start_end(log=logger)
def get_search_results(
query: str,
category: Optional[Any] = None,
modifier: Optional[Any] = None,
sortby: str = "id",
ascend: bool = True,
) -> pd.DataFrame:
"""Search CoinPaprika. [Source: CoinPaprika]
Parameters
----------
query: str
phrase for search
category: Optional[Any]
one or more categories (comma separated) to search.
Available options: currencies|exchanges|icos|people|tags
Default: currencies,exchanges,icos,people,tags
modifier: Optional[Any]
set modifier for search results. Available options: symbol_search -
search only by symbol (works for currencies only)
sortby: str
Key to sort data. The table can be sorted by every of its columns. Refer to
API documentation (see https://api.coinpaprika.com/docs#tag/Tools/paths/~1search/get)
ascend: bool
Flag to sort data descending
Returns
-------
pd.DataFrame
Search Results
Columns: Metric, Value
"""
session = PaprikaSession()
if category is None:
category = "currencies,exchanges,icos,people,tags"
data = session.make_request(
session.ENDPOINTS["search"], q=query, c=category, modifier=modifier, limit=100
)
results = []
for item in data:
category = data[item]
for r in category:
results.append(
{
"id": r.get("id"),
"name": r.get("name"),
"category": item,
}
)
df = pd.DataFrame(results)
df = df.sort_values(by=sortby, ascending=ascend)
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/discovery/coinpaprika_model.py | 0.885545 | 0.227426 | coinpaprika_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
import numpy as np
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_very_long_number_formatter,
)
from openbb_terminal.cryptocurrency.discovery import dappradar_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_top_nfts(limit: int = 10, sortby: str = "", export: str = "") -> None:
"""Prints table showing top nft collections [Source: https://dappradar.com/]
Parameters
----------
limit: int
Number of records to display
sortby: str
Key by which to sort data
export : str
Export dataframe data to csv,json,xlsx file
"""
df = dappradar_model.get_top_nfts(sortby, limit)
if df.empty:
console.print("[red]Failed to fetch data from DappRadar[/red]\n")
return
for col in ["Floor Price [$]", "Avg Price [$]", "Market Cap [$]", "Volume [$]"]:
if col in df.columns:
df[col] = (
df[col]
.fillna(-1)
.apply(lambda x: lambda_very_long_number_formatter(x))
.replace(-1, np.nan)
)
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Top NFT collections",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"drnft",
df,
)
@log_start_end(log=logger)
def display_top_games(limit: int = 10, export: str = "", sortby: str = "") -> None:
"""Prints table showing top blockchain games [Source: https://dappradar.com/]
Parameters
----------
limit: int
Number of records to display
sortby: str
Key by which to sort data
export : str
Export dataframe data to csv,json,xlsx file
"""
df = dappradar_model.get_top_games(sortby, limit)
if df.empty:
console.print("[red]Failed to fetch data from DappRadar[/red]\n")
return
for col in ["Daily Users", "Daily Volume [$]"]:
if col in df.columns:
df[col] = df[col].apply(lambda x: lambda_very_long_number_formatter(x))
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Top Blockchain Games",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"drgames",
df,
)
@log_start_end(log=logger)
def display_top_dexes(limit: int = 10, export: str = "", sortby: str = "") -> None:
"""Prints table showing top decentralized exchanges [Source: https://dappradar.com/]
Parameters
----------
limit: int
Number of records to display
sortby: str
Key by which to sort data
export : str
Export dataframe data to csv,json,xlsx file
"""
df = dappradar_model.get_top_dexes(sortby, limit)
if df.empty:
console.print("[red]Failed to fetch data from DappRadar[/red]\n")
return
for col in ["Daily Users", "Daily Volume [$]"]:
if col in df.columns:
df[col] = df[col].apply(lambda x: lambda_very_long_number_formatter(x))
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Top Decentralized Exchanges",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"drdex",
df,
)
@log_start_end(log=logger)
def display_top_dapps(limit: int = 10, export: str = "", sortby: str = "") -> None:
"""Prints table showing top decentralized exchanges [Source: https://dappradar.com/]
Parameters
----------
limit: int
Number of records to display
sortby: str
Key by which to sort data
export : str
Export dataframe data to csv,json,xlsx file
"""
df = dappradar_model.get_top_dapps(sortby, limit)
if df.empty:
console.print("[red]Failed to fetch data from DappRadar[/red]\n")
return
for col in ["Daily Users", "Daily Volume [$]"]:
if col in df.columns:
df[col] = df[col].apply(lambda x: lambda_very_long_number_formatter(x))
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Top Decentralized Applications",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"drdapps",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/discovery/dappradar_view.py | 0.565539 | 0.273158 | dappradar_view.py | pypi |
__docformat__ = "numpy"
# pylint: disable=C0301,E1137
from typing import Optional
import logging
import pandas as pd
import requests
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
NFT_COLUMNS = [
"Name",
"Protocols",
"Floor Price [$]",
"Avg Price [$]",
"Market Cap [$]",
"Volume [$]",
]
DAPPS_COLUMNS = [
"Name",
"Category",
"Protocols",
"Daily Users",
"Daily Volume [$]",
]
DEX_COLUMNS = [
"Name",
"Category",
"Daily Users",
"Daily Volume [$]",
]
@log_start_end(log=logger)
def _make_request(url: str, verbose: bool = True) -> Optional[dict]:
"""Helper method handles dappradar api requests. [Source: https://dappradar.com/]
Parameters
----------
url: str
endpoint url
verbose: bool
whether to print the text from the response
Returns
-------
Optional[dict]:
dictionary with response data
"""
headers = {
"Accept": "application/json",
"User-Agent": get_user_agent(),
"referer": "https://dappradar.com/",
}
response = requests.get(url, headers=headers)
if not 200 <= response.status_code < 300:
if verbose:
console.print(f"[red]dappradar api exception: {response.text}[/red]")
return None
try:
return response.json()
except Exception as e: # noqa: F841
logger.exception("Invalid Response: %s", str(e))
if verbose:
console.print(f"[red]Invalid Response:: {response.text}[/red]")
return None
@log_start_end(log=logger)
def get_top_nfts(sortby: str = "", limit: int = 10) -> pd.DataFrame:
"""Get top nft collections [Source: https://dappradar.com/]
Parameters
----------
sortby: str
Key by which to sort data
Returns
-------
pd.DataFrame
NFTs Columns: Name, Protocols, Floor Price [$], Avg Price [$], Market Cap [$], Volume [$]
"""
response = _make_request(
"https://nft-sales-service.dappradar.com/v2/collection/day?limit=20&p"
"age=1¤cy=USD&sort=marketCapInFiat&order=desc"
)
if response:
data = response.get("results")
df = pd.DataFrame(
data,
columns=[
"name",
"activeProtocols",
"floorPriceInFiat",
"avgPriceInFiat",
"marketCapInFiat",
"volumeInFiat",
],
)
df = df.set_axis(
NFT_COLUMNS,
axis=1,
copy=False,
)
df["Protocols"] = df["Protocols"].apply(lambda x: ",".join(x))
if sortby in NFT_COLUMNS:
df = df.sort_values(by=sortby, ascending=False)
return df.head(limit)
return pd.DataFrame()
@log_start_end(log=logger)
def get_top_dexes(sortby: str = "", limit: int = 10) -> pd.DataFrame:
"""Get top dexes by daily volume and users [Source: https://dappradar.com/]
Parameters
----------
sortby: str
Key by which to sort data
Returns
-------
pd.DataFrame
Top decentralized exchanges. Columns: Name, Daily Users, Daily Volume [$]
"""
data = _make_request(
"https://dappradar.com/v2/api/dapps?params=WkdGd2NISmhaR0Z5Y0dGblpUMHhKbk5uY205MWNEMXR"
"ZWGdtWTNWeWNtVnVZM2s5VlZORUptWmxZWFIxY21Wa1BURW1jbUZ1WjJVOVpHRjVKbU5oZEdWbmIzSjVQV1Y0WTJ"
"oaGJtZGxjeVp6YjNKMFBYUnZkR0ZzVm05c2RXMWxTVzVHYVdGMEptOXlaR1Z5UFdSbGMyTW1iR2x0YVhROU1qWT0="
)
if data:
arr = [
[
dex["name"],
dex["category"],
dex["statistic"]["userActivity"],
dex["statistic"]["totalVolumeInFiat"],
]
for dex in data["dapps"]
]
df = pd.DataFrame(arr, columns=DEX_COLUMNS)
if sortby in DEX_COLUMNS:
df = df.sort_values(by=sortby, ascending=False)
df = df[df["Category"] == "exchanges"]
if df.empty:
return pd.DataFrame()
df.drop("Category", axis=1, inplace=True)
return df.head(limit)
return pd.DataFrame()
@log_start_end(log=logger)
def get_top_games(sortby: str = "", limit: int = 10) -> pd.DataFrame:
"""Get top blockchain games by daily volume and users [Source: https://dappradar.com/]
Parameters
----------
limit: int
Number of records to display
sortby: str
Key by which to sort data
Returns
-------
pd.DataFrame
Top blockchain games. Columns: Name, Daily Users, Daily Volume [$]
"""
data = _make_request(
"https://dappradar.com/v2/api/dapps?params=WkdGd2NISmhaR0Z5Y0dGblpUMHhKbk5uY205MWNEMX"
"RZWGdtWTNWeWNtVnVZM2s5VlZORUptWmxZWFIxY21Wa1BURW1jbUZ1WjJVOVpHRjVKbU5oZEdWbmIzSjVQV2R"
"oYldWekpuTnZjblE5ZFhObGNpWnZjbVJsY2oxa1pYTmpKbXhwYldsMFBUSTI="
)
if data:
arr = [
[
dex["name"],
dex["category"],
dex["statistic"]["userActivity"],
dex["statistic"]["totalVolumeInFiat"],
]
for dex in data["dapps"]
]
df = pd.DataFrame(
arr,
columns=DEX_COLUMNS,
).sort_values("Daily Users", ascending=False)
if sortby in df.columns:
df = df.sort_values(by=sortby, ascending=False)
df = df[df["Category"] == "games"]
df.drop("Category", axis=1, inplace=True)
return df.head(limit)
return pd.DataFrame()
@log_start_end(log=logger)
def get_top_dapps(sortby: str = "", limit: int = 10) -> pd.DataFrame:
"""Get top decentralized applications by daily volume and users [Source: https://dappradar.com/]
Parameters
----------
sortby: str
Key by which to sort data
Returns
-------
pd.DataFrame
Top decentralized exchanges.
Columns: Name, Category, Protocols, Daily Users, Daily Volume [$]
"""
data = _make_request(
"https://dappradar.com/v2/api/dapps?params=WkdGd2NISmhaR0Z5Y0dGblpUMHhKbk5uY205MWNEMX"
"RZWGdtWTNWeWNtVnVZM2s5VlZORUptWmxZWFIxY21Wa1BURW1jbUZ1WjJVOVpHRjVKbk52Y25ROWRYTmxjaVp"
"2Y21SbGNqMWtaWE5qSm14cGJXbDBQVEky",
False,
)
if data:
arr = [
[
dex["name"],
dex["category"],
dex["activeProtocols"],
dex["statistic"]["userActivity"],
dex["statistic"]["totalVolumeInFiat"],
]
for dex in data["dapps"]
]
df = pd.DataFrame(
arr,
columns=DAPPS_COLUMNS,
).sort_values("Daily Users", ascending=False)
df["Protocols"] = df["Protocols"].apply(lambda x: ",".join(x))
if sortby in DAPPS_COLUMNS:
df = df.sort_values(by=sortby, ascending=False)
return df.head(limit)
return pd.DataFrame() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/discovery/dappradar_model.py | 0.813831 | 0.291504 | dappradar_model.py | pypi |
__docformat__ = "numpy"
import json
import logging
import os
from typing import List
import pandas as pd
from pycoingecko import CoinGeckoAPI
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
COINS_COLUMNS_MAP = {
"Symbol": "symbol",
"Name": "name",
"Volume [$]": "total_volume",
"Market Cap": "market_cap",
"Market Cap Rank": "market_cap_rank",
"7D Change [%]": "price_change_percentage_7d_in_currency",
"24H Change [%]": "price_change_percentage_24h_in_currency",
}
PERIODS = {
"1h": "?time=h1",
"24h": "?time=h24",
"7d": "?time=d7",
"14d": "?time=d14",
"30d": "?time=d30",
"60d": "?time=d60",
"1y": "?time=y1",
}
API_PERIODS = ["14d", "1h", "1y", "200d", "24h", "30d", "7d"]
CATEGORIES = {
"trending": 0,
"most_voted": 1,
"positive_sentiment": 2,
"recently_added": 3,
"most_visited": 4,
}
GAINERS_FILTERS = ["Rank", "Symbol", "Name", "Volume", "Price", "Change"]
TRENDING_FILTERS = [
"Rank",
"Name",
"Price_BTC",
"Price_USD",
]
RECENTLY_FILTERS = [
"Rank",
"Name",
"Symbol",
"Price",
"Change_1h",
"Change_24h",
"Added",
"Url",
]
YFARMS_FILTERS = [
"Rank",
"Name",
"Value_Locked",
"Return_Year",
]
CAP_FILTERS = [
"Rank",
"Name",
"Symbol",
"Price",
"Change_1h",
"Change_24h",
"Change_7d",
"Volume_24h",
"Market_Cap",
]
DEX_FILTERS = [
"Name",
"Rank",
"Volume_24h",
"Coins",
"Pairs",
"Visits",
"Most_Traded",
"Market_Share",
]
GAINERS_LOSERS_COLUMNS = [
"Symbol",
"Name",
"Price [$]",
"Market Cap",
"Market Cap Rank",
"Volume [$]",
]
@log_start_end(log=logger)
def read_file_data(file_name: str) -> dict:
if file_name.split(".")[1] != "json":
raise TypeError("Please load json file")
par_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path = os.path.join(par_dir, "data", file_name)
with open(path, encoding="utf8") as f:
data = json.load(f)
return data
@log_start_end(log=logger)
def get_categories_keys() -> List[str]:
"""Get list of categories keys
Returns
-------
List[str]
List of categories keys
"""
categories = read_file_data("coingecko_categories.json")
return list(categories.keys())
@log_start_end(log=logger)
def get_coins(
limit: int = 250,
category: str = "",
sortby: str = "Symbol",
ascend: bool = False,
) -> pd.DataFrame:
"""Get N coins from CoinGecko [Source: CoinGecko]
Parameters
----------
limit: int
Number of top coins to grab from CoinGecko
category: str
Category of the coins we want to retrieve
sortby: str
Key to sort data
ascend: bool
Sort data in ascending order
Returns
-------
pd.DataFrame
N coins
"""
client = CoinGeckoAPI()
df = pd.DataFrame()
table_size = limit
# CoinGecko's API returns all coins without limit when the category is set.
if category:
kwargs = {
"vs_currency": "usd",
"order": "market_cap_desc",
"per_page": limit,
"sparkline": False,
"price_change_percentage": "1h,24h,7d,14d,30d,200d,1y",
"category": category,
}
data = client.get_coins_markets(**kwargs)
df = pd.concat([df, pd.DataFrame(data)], ignore_index=True)
else:
page_size = min(limit, 250)
page = 1
while limit > 0:
kwargs = {
"vs_currency": "usd",
"order": "market_cap_desc",
"per_page": page_size,
"sparkline": False,
"price_change_percentage": "1h,24h,7d,14d,30d,200d,1y",
"page": page,
}
data = client.get_coins_markets(**kwargs)
df = pd.concat([df, pd.DataFrame(data)], ignore_index=True)
limit -= page_size
page += 1
if sortby in COINS_COLUMNS_MAP:
df = df[(df["total_volume"].notna()) & (df["market_cap"].notna())]
df = df.sort_values(by=COINS_COLUMNS_MAP[sortby], ascending=ascend)
df = df.astype({"market_cap_rank": "Int64"})
return df.head(table_size)
@log_start_end(log=logger)
def get_gainers_or_losers(
limit: int = 20,
interval: str = "1h",
typ: str = "gainers",
sortby: str = "market_cap",
) -> pd.DataFrame:
"""Returns data about top gainers - coins which gain the most in given period and
top losers - coins that lost the most in given period of time. [Source: CoinGecko]
Parameters
----------
limit: int
Num of coins to get
sortby: str
Key to sort data. The table can be sorted by every of its columns. Refer to
API documentation (see /coins/markets in https://www.coingecko.com/en/api/documentation)
interval: str
One from {14d,1h,1y,200d,24h,30d,7d}
typ: str
Either "gainers" or "losers"
Returns
-------
pd.DataFrame
Top Gainers / Top Losers - coins which gain/lost most in price in given period of time.
Columns: Symbol, Name, Volume, Price, %Change_{interval}, Url
"""
if interval not in API_PERIODS:
raise ValueError(
f"Wrong time period\nPlease chose one from list: {API_PERIODS}"
)
df = get_coins(limit)
sorted_df = df.sort_values(
by=[f"price_change_percentage_{interval}_in_currency"],
ascending=typ != "gainers",
)
sorted_df = sorted_df[
[
"symbol",
"name",
"current_price",
"market_cap",
"market_cap_rank",
"total_volume",
f"price_change_percentage_{interval}_in_currency",
]
]
sorted_df = sorted_df.set_axis(
GAINERS_LOSERS_COLUMNS + [f"Change {interval} [%]"],
axis=1,
copy=True,
)
if sortby in GAINERS_LOSERS_COLUMNS:
sorted_df = sorted_df[
(sorted_df["Volume [$]"].notna()) & (sorted_df["Market Cap"].notna())
]
sorted_df = sorted_df.sort_values(by=sortby, ascending=True)
return sorted_df
def get_gainers(
interval: str = "1h",
limit: int = 50,
sortby: str = "market_cap_rank",
) -> pd.DataFrame:
"""Shows Largest Gainers - coins which gain the most in given period. [Source: CoinGecko]
Parameters
----------
interval: str
Time interval by which data is displayed. One from [1h, 24h, 7d, 14d, 30d, 60d, 1y]
limit: int
Number of records to display
sortby: str
Key to sort data. The table can be sorted by every of its columns. Refer to
API documentation (see /coins/markets in https://www.coingecko.com/en/api/documentation)
Returns
-------
pd.DataFrame
Top Gainers - coins which gain most in price in given period of time.
Columns: Symbol, Name, Volume, Price, %Change_{interval}, Url
"""
return get_gainers_or_losers(
limit=limit, interval=interval, typ="gainers", sortby=sortby
)
def get_losers(
interval: str = "1h",
limit: int = 50,
sortby: str = "market_cap_rank",
) -> pd.DataFrame:
"""Shows Largest Losers - coins which lose the most in given period. [Source: CoinGecko]
Parameters
----------
interval: str
Time interval by which data is displayed. One from [1h, 24h, 7d, 14d, 30d, 60d, 1y]
limit: int
Number of records to display
sortby: str
Key to sort data. The table can be sorted by every of its columns. Refer to
API documentation (see /coins/markets in https://www.coingecko.com/en/api/documentation)
Returns
-------
pd.DataFrame
Top Losers - coins which lost most in price in given period of time.
Columns: Symbol, Name, Volume, Price, %Change_{interval}, Url
"""
return get_gainers_or_losers(
limit=limit, interval=interval, typ="losers", sortby=sortby
)
@log_start_end(log=logger)
def get_trending_coins() -> pd.DataFrame:
"""Returns trending coins [Source: CoinGecko]
Parameters
----------
Returns
-------
pd.DataFrame
Trending Coins
"""
client = CoinGeckoAPI()
data = client.get_search_trending()
coins = data["coins"]
df = pd.DataFrame(columns=["Symbol", "Name", "market_cap Cap Rank"])
for i, coin in enumerate(coins):
coin = coin["item"]
df.loc[i] = [coin["id"], coin["name"], coin["market_cap_rank"]]
return df
@log_start_end(log=logger)
def get_coin_list() -> pd.DataFrame:
"""Get list of coins available on CoinGecko [Source: CoinGecko]
Returns
-------
pd.DataFrame
Coins available on CoinGecko
Columns: id, symbol, name
"""
client = CoinGeckoAPI()
return pd.DataFrame(
client.get_coins_list(),
columns=["id", "symbol", "name"],
).reset_index()
@log_start_end(log=logger)
def get_coins_for_given_exchange(exchange_id: str = "binance", page: int = 1) -> dict:
"""Helper method to get all coins available on binance exchange [Source: CoinGecko]
Parameters
----------
exchange_id: str
id of exchange
page: int
number of page. One page contains 100 records
Returns
-------
dict
dictionary with all trading pairs on binance
"""
client = CoinGeckoAPI()
binance_coins = client.get_exchanges_tickers_by_id(id=exchange_id, page=page)
return binance_coins["tickers"]
@log_start_end(log=logger)
def get_mapping_matrix_for_exchange(exchange_id: str, pages: int = 12) -> dict:
"""Creates a matrix with all coins available on Binance with corresponding coingecko coin_id. [Source: CoinGecko]
Parameters
----------
exchange_id: str
id of exchange: binance
pages: int
number of pages. One page contains 100 records
Returns
-------
dict
dictionary with all coins: {"ETH" : "ethereum"}
"""
coins_dct = {}
for i in range(pages):
coins = get_coins_for_given_exchange(exchange_id=exchange_id, page=i)
for coin in coins:
bin_symbol, gecko_id = coin["base"], coin["coin_id"]
if bin_symbol not in coins_dct:
coins_dct[bin_symbol] = gecko_id
return coins_dct | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/discovery/pycoingecko_model.py | 0.742328 | 0.297406 | pycoingecko_model.py | pypi |
__docformat__ = "numpy"
# pylint:disable=too-many-lines
import argparse
import logging
import webbrowser
from datetime import datetime
from typing import List
import numpy as np
import pandas as pd
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal import feature_flags as obbff
from openbb_terminal.common.technical_analysis import (
custom_indicators_view,
momentum_view,
overlap_model,
overlap_view,
trend_indicators_view,
volatility_model,
volatility_view,
volume_view,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_BOTH_RAW_DATA_AND_FIGURES,
check_non_negative,
check_positive,
check_positive_list,
check_positive_float,
valid_date,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import CryptoBaseController
from openbb_terminal.rich_config import console, MenuText
logger = logging.getLogger(__name__)
class TechnicalAnalysisController(CryptoBaseController):
"""Technical Analysis Controller class"""
CHOICES_COMMANDS = [
"load",
"ema",
"sma",
"wma",
"hma",
"vwap",
"zlma",
"cci",
"macd",
"rsi",
"stoch",
"fisher",
"cg",
"adx",
"aroon",
"bbands",
"donchian",
"kc",
"ad",
"adosc",
"obv",
"fib",
"tv",
]
PATH = "/crypto/ta/"
CHOICES_GENERATION = True
def __init__(
self,
coin: str,
start: datetime,
interval: str,
stock: pd.DataFrame,
queue: List[str] = None,
):
"""Constructor"""
super().__init__(queue)
self.coin = coin
self.start = start
self.interval = interval
self.stock = stock
self.stock["Adj Close"] = stock["Close"]
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
choices["load"] = {
"--interval": {
c: {}
for c in [
"1",
"5",
"15",
"30",
"60",
"240",
"1440",
"10080",
"43200",
]
},
"-i": "--interval",
"--exchange": {c: {} for c in self.exchanges},
"--source": {c: {} for c in ["CCXT", "YahooFinance", "CoingGecko"]},
"--vs": {c: {} for c in ["usd", "eur"]},
"--start": None,
"-s": "--start",
"--end": None,
"-e": "--end",
}
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
crypto_str = f" {self.coin} (from {self.start.strftime('%Y-%m-%d')})"
mt = MenuText("crypto/ta/", 90)
mt.add_param("_ticker", crypto_str)
mt.add_raw("\n")
mt.add_cmd("tv")
mt.add_raw("\n")
mt.add_info("_overlap_")
mt.add_cmd("ema")
mt.add_cmd("sma")
mt.add_cmd("wma")
mt.add_cmd("hma")
mt.add_cmd("zlma")
mt.add_cmd("vwap")
mt.add_info("_momentum_")
mt.add_cmd("cci")
mt.add_cmd("macd")
mt.add_cmd("rsi")
mt.add_cmd("stoch")
mt.add_cmd("fisher")
mt.add_cmd("cg")
mt.add_info("_trend_")
mt.add_cmd("adx")
mt.add_cmd("aroon")
mt.add_info("_volatility_")
mt.add_cmd("bbands")
mt.add_cmd("donchian")
mt.add_cmd("kc")
mt.add_info("_volume_")
mt.add_cmd("ad")
mt.add_cmd("adosc")
mt.add_cmd("obv")
mt.add_info("_custom_")
mt.add_cmd("fib")
console.print(text=mt.menu_text, menu="Cryptocurrency - Technical Analysis")
def custom_reset(self):
"""Class specific component of reset command"""
if self.coin:
return ["crypto", f"load {self.coin}", "ta"]
return []
@log_start_end(log=logger)
def call_tv(self, other_args):
"""Process tv command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="tv",
description="""View TradingView for technical analysis. [Source: TradingView]""",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
# temp USDT before we make changes to crypto ta_controller
webbrowser.open(
f"https://www.tradingview.com/chart/?symbol={self.coin}usdt"
)
# COMMON
# TODO: Go through all models and make sure all needed columns are in dfs
@log_start_end(log=logger)
def call_ema(self, other_args: List[str]):
"""Process ema command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ema",
description="""
The Exponential Moving Average is a staple of technical
analysis and is used in countless technical indicators. In a Simple Moving
Average, each value in the time period carries equal weight, and values outside
of the time period are not included in the average. However, the Exponential
Moving Average is a cumulative calculation, including all data. Past values have
a diminishing contribution to the average, while more recent values have a greater
contribution. This method allows the moving average to be more responsive to changes
in the data.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive_list,
default=overlap_model.WINDOW_LENGTHS,
help="Window lengths. Multiple values indicated as comma separated values.",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=check_non_negative,
default=0,
help="offset",
choices=range(0, 100),
metavar="N_OFFSET",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
overlap_view.view_ma(
ma_type="EMA",
symbol=self.coin,
data=self.stock["Adj Close"],
window=ns_parser.n_length,
offset=ns_parser.n_offset,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_sma(self, other_args: List[str]):
"""Process sma command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="sma",
description="""
Moving Averages are used to smooth the data in an array to
help eliminate noise and identify trends. The Simple Moving Average is literally
the simplest form of a moving average. Each output value is the average of the
previous n values. In a Simple Moving Average, each value in the time period carries
equal weight, and values outside of the time period are not included in the average.
This makes it less responsive to recent changes in the data, which can be useful for
filtering out those changes.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive_list,
default=overlap_model.WINDOW_LENGTHS,
help="Window lengths. Multiple values indicated as comma separated values. ",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=check_non_negative,
default=0,
help="offset",
choices=range(0, 100),
metavar="N_OFFSET",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
overlap_view.view_ma(
ma_type="SMA",
symbol=self.coin,
data=self.stock["Adj Close"],
window=ns_parser.n_length,
offset=ns_parser.n_offset,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_wma(self, other_args: List[str]):
"""Process wma command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="wma",
description="""
A Weighted Moving Average puts more weight on recent data and less on past data.
This is done by multiplying each bar’s price by a weighting factor. Because of its
unique calculation, WMA will follow prices more closely than a corresponding Simple
Moving Average.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive_list,
default=overlap_model.WINDOW_LENGTHS,
help="Window lengths. Multiple values indicated as comma separated values. ",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=check_non_negative,
default=0,
help="offset",
choices=range(0, 100),
metavar="N_OFFSET",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
overlap_view.view_ma(
ma_type="WMA",
symbol=self.coin,
data=self.stock["Adj Close"],
window=ns_parser.n_length,
offset=ns_parser.n_offset,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_hma(self, other_args: List[str]):
"""Process hma command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="hma",
description="""
The Hull Moving Average solves the age old dilemma of making a moving average
more responsive to current price activity whilst maintaining curve smoothness.
In fact the HMA almost eliminates lag altogether and manages to improve smoothing
at the same time.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive_list,
default=overlap_model.WINDOW_LENGTHS2,
help="Window lengths. Multiple values indicated as comma separated values. ",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=check_non_negative,
default=0,
help="offset",
choices=range(0, 100),
metavar="N_OFFSET",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
overlap_view.view_ma(
ma_type="HMA",
symbol=self.coin,
data=self.stock["Adj Close"],
window=ns_parser.n_length,
offset=ns_parser.n_offset,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_zlma(self, other_args: List[str]):
"""Process zlma command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="zlma",
description="""
The zero lag exponential moving average (ZLEMA) indicator
was created by John Ehlers and Ric Way. The idea is do a
regular exponential moving average (EMA) calculation but
on a de-lagged data instead of doing it on the regular data.
Data is de-lagged by removing the data from "lag" days ago
thus removing (or attempting to) the cumulative effect of
the moving average.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive_list,
default=[20],
help="Window lengths. Multiple values indicated as comma separated values.",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=check_non_negative,
default=0,
help="offset",
choices=range(0, 100),
metavar="N_OFFSET",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
overlap_view.view_ma(
ma_type="ZLMA",
symbol=self.coin,
data=self.stock["Adj Close"],
window=ns_parser.n_length,
offset=ns_parser.n_offset,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_vwap(self, other_args: List[str]):
"""Process vwap command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="vwap",
description="""
The Volume Weighted Average Price that measures the average typical price
by volume. It is typically used with intraday charts to identify general direction.
""",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=check_non_negative,
default=0,
help="offset",
choices=range(0, 100),
metavar="N_OFFSET",
)
parser.add_argument(
"--start",
dest="start",
type=valid_date,
help="Starting date to select",
required="--end" in other_args,
)
parser.add_argument(
"--end",
dest="end",
type=valid_date,
help="Ending date to select",
required="--start" in other_args,
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
# Daily
if self.interval == "1440min":
if not ns_parser.start:
console.print(
"If no date conditions, VWAP should be used with intraday data. \n"
)
return
interval_text = "Daily"
else:
interval_text = self.interval
overlap_view.view_vwap(
symbol=self.coin,
interval=interval_text,
data=self.stock,
start_date=ns_parser.start,
end_date=ns_parser.end,
offset=ns_parser.n_offset,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_cci(self, other_args: List[str]):
"""Process cci command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="cci",
description="""
The CCI is designed to detect beginning and ending market trends.
The range of 100 to -100 is the normal trading range. CCI values outside of this
range indicate overbought or oversold conditions. You can also look for price
divergence in the CCI. If the price is making new highs, and the CCI is not,
then a price correction is likely.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=14,
help="length",
choices=range(1, 100),
metavar="N_LENGTH",
)
parser.add_argument(
"-s",
"--scalar",
action="store",
dest="n_scalar",
type=check_positive,
default=0.015,
help="scalar",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_cci(
symbol=self.coin,
data=self.stock,
window=ns_parser.n_length,
scalar=ns_parser.n_scalar,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_macd(self, other_args: List[str]):
"""Process macd command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="macd",
description="""
The Moving Average Convergence Divergence (MACD) is the difference
between two Exponential Moving Averages. The Signal line is an Exponential Moving
Average of the MACD. \n \n The MACD signals trend changes and indicates the start
of new trend direction. High values indicate overbought conditions, low values
indicate oversold conditions. Divergence with the price indicates an end to the
current trend, especially if the MACD is at extreme high or low values. When the MACD
line crosses above the signal line a buy signal is generated. When the MACD crosses
below the signal line a sell signal is generated. To confirm the signal, the MACD
should be above zero for a buy, and below zero for a sell.
""",
)
parser.add_argument(
"--fast",
action="store",
dest="n_fast",
type=check_positive,
default=12,
help="The short period.",
choices=range(1, 100),
metavar="N_FAST",
)
parser.add_argument(
"--slow",
action="store",
dest="n_slow",
type=check_positive,
default=26,
help="The long period.",
)
parser.add_argument(
"--signal",
action="store",
dest="n_signal",
type=check_positive,
default=9,
help="The signal period.",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_macd(
symbol=self.coin,
data=self.stock["Adj Close"],
n_fast=ns_parser.n_fast,
n_slow=ns_parser.n_slow,
n_signal=ns_parser.n_signal,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_rsi(self, other_args: List[str]):
"""Process rsi command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="rsi",
description="""
The Relative Strength Index (RSI) calculates a ratio of the
recent upward price movements to the absolute price movement. The RSI ranges
from 0 to 100. The RSI is interpreted as an overbought/oversold indicator when
the value is over 70/below 30. You can also look for divergence with price. If
the price is making new highs/lows, and the RSI is not, it indicates a reversal.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=14,
help="length",
choices=range(1, 100),
metavar="N_LENGTH",
)
parser.add_argument(
"-s",
"--scalar",
action="store",
dest="n_scalar",
type=check_positive,
default=100,
help="scalar",
)
parser.add_argument(
"-d",
"--drift",
action="store",
dest="n_drift",
type=check_positive,
default=1,
help="drift",
choices=range(1, 100),
metavar="N_DRIFT",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_rsi(
symbol=self.coin,
data=self.stock["Adj Close"],
window=ns_parser.n_length,
scalar=ns_parser.n_scalar,
drift=ns_parser.n_drift,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_stoch(self, other_args: List[str]):
"""Process stoch command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="stoch",
description="""
The Stochastic Oscillator measures where the close is in relation
to the recent trading range. The values range from zero to 100. %D values over 75
indicate an overbought condition; values under 25 indicate an oversold condition.
When the Fast %D crosses above the Slow %D, it is a buy signal; when it crosses
below, it is a sell signal. The Raw %K is generally considered too erratic to use
for crossover signals.
""",
)
parser.add_argument(
"-k",
"--fastkperiod",
action="store",
dest="n_fastkperiod",
type=check_positive,
default=14,
help="The time period of the fastk moving average",
choices=range(1, 100),
metavar="N_FASTKPERIOD",
)
parser.add_argument(
"-d",
"--slowdperiod",
action="store",
dest="n_slowdperiod",
type=check_positive,
default=3,
help="The time period of the slowd moving average",
choices=range(1, 100),
metavar="N_SLOWDPERIOD",
)
parser.add_argument(
"--slowkperiod",
action="store",
dest="n_slowkperiod",
type=check_positive,
default=3,
help="The time period of the slowk moving average",
choices=range(1, 100),
metavar="N_SLOWKPERIOD",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_stoch(
symbol=self.coin,
data=self.stock,
fastkperiod=ns_parser.n_fastkperiod,
slowdperiod=ns_parser.n_slowdperiod,
slowkperiod=ns_parser.n_slowkperiod,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_fisher(self, other_args: List[str]):
"""Process fisher command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="fisher",
description="""
The Fisher Transform is a technical indicator created by John F. Ehlers
that converts prices into a Gaussian normal distribution.1 The indicator
highlights when prices have moved to an extreme, based on recent prices.
This may help in spotting turning points in the price of an asset. It also
helps show the trend and isolate the price waves within a trend.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=14,
help="length",
choices=range(1, 100),
metavar="N_LENGTH",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_fisher(
symbol=self.coin,
data=self.stock,
window=ns_parser.n_length,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_cg(self, other_args: List[str]):
"""Process cg command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="cg",
description="""
The Center of Gravity indicator, in short, is used to anticipate future price movements
and to trade on price reversals as soon as they happen. However, just like other oscillators,
the COG indicator returns the best results in range-bound markets and should be avoided when
the price is trending. Traders who use it will be able to closely speculate the upcoming
price change of the asset.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=14,
help="length",
choices=range(1, 100),
metavar="N_LENGTH",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
momentum_view.display_cg(
symbol=self.coin,
data=self.stock["Adj Close"],
window=ns_parser.n_length,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_adx(self, other_args: List[str]):
"""Process adx command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="adx",
description="""
The ADX is a Welles Wilder style moving average of the Directional Movement Index (DX).
The values range from 0 to 100, but rarely get above 60. To interpret the ADX, consider
a high number to be a strong trend, and a low number, a weak trend.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=14,
help="length",
choices=range(1, 100),
metavar="N_LENGTH",
)
parser.add_argument(
"-s",
"--scalar",
action="store",
dest="n_scalar",
type=check_positive,
default=100,
help="scalar",
)
parser.add_argument(
"-d",
"--drift",
action="store",
dest="n_drift",
type=check_positive,
default=1,
help="drift",
choices=range(1, 100),
metavar="N_DRIFT",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
trend_indicators_view.display_adx(
symbol=self.coin,
data=self.stock,
window=ns_parser.n_length,
scalar=ns_parser.n_scalar,
drift=ns_parser.n_drift,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_aroon(self, other_args: List[str]):
"""Process aroon command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="aroon",
description="""
The word aroon is Sanskrit for "dawn's early light." The Aroon
indicator attempts to show when a new trend is dawning. The indicator consists
of two lines (Up and Down) that measure how long it has been since the highest
high/lowest low has occurred within an n period range. \n \n When the Aroon Up is
staying between 70 and 100 then it indicates an upward trend. When the Aroon Down
is staying between 70 and 100 then it indicates an downward trend. A strong upward
trend is indicated when the Aroon Up is above 70 while the Aroon Down is below 30.
Likewise, a strong downward trend is indicated when the Aroon Down is above 70 while
the Aroon Up is below 30. Also look for crossovers. When the Aroon Down crosses above
the Aroon Up, it indicates a weakening of the upward trend (and vice versa).
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=25,
help="length",
choices=range(1, 100),
metavar="N_LENGTH",
)
parser.add_argument(
"-s",
"--scalar",
action="store",
dest="n_scalar",
type=check_positive,
default=100,
help="scalar",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
trend_indicators_view.display_aroon(
symbol=self.coin,
data=self.stock,
window=ns_parser.n_length,
scalar=ns_parser.n_scalar,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_bbands(self, other_args: List[str]):
"""Process bbands command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="bbands",
description="""
Bollinger Bands consist of three lines. The middle band is a simple
moving average (generally 20 periods) of the typical price (TP). The upper and lower
bands are F standard deviations (generally 2) above and below the middle band.
The bands widen and narrow when the volatility of the price is higher or lower,
respectively. \n \nBollinger Bands do not, in themselves, generate buy or sell signals;
they are an indicator of overbought or oversold conditions. When the price is near the
upper or lower band it indicates that a reversal may be imminent. The middle band
becomes a support or resistance level. The upper and lower bands can also be
interpreted as price targets. When the price bounces off of the lower band and crosses
the middle band, then the upper band becomes the price target.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=15,
help="length",
choices=range(1, 100),
metavar="N_LENGTH",
)
parser.add_argument(
"-s",
"--std",
action="store",
dest="n_std",
type=check_positive_float,
default=2,
help="std",
choices=np.arange(0.0, 10, 0.25).tolist(),
metavar="N_STD",
)
parser.add_argument(
"-m",
"--mamode",
action="store",
dest="s_mamode",
default="sma",
choices=volatility_model.MAMODES,
help="mamode",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volatility_view.display_bbands(
symbol=self.coin,
data=self.stock,
window=ns_parser.n_length,
n_std=ns_parser.n_std,
mamode=ns_parser.s_mamode,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_donchian(self, other_args: List[str]):
"""Process donchian command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="donchian",
description="""
Donchian Channels are three lines generated by moving average
calculations that comprise an indicator formed by upper and lower
bands around a midrange or median band. The upper band marks the
highest price of a security over N periods while the lower band
marks the lowest price of a security over N periods. The area
between the upper and lower bands represents the Donchian Channel.
""",
)
parser.add_argument(
"-u",
"--length_upper",
action="store",
dest="n_length_upper",
type=check_positive,
default=20,
help="length",
choices=range(1, 100),
metavar="N_LENGTH_UPPER",
)
parser.add_argument(
"-l",
"--length_lower",
action="store",
dest="n_length_lower",
type=check_positive,
default=20,
help="length",
choices=range(1, 100),
metavar="N_LENGTH_LOWER",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volatility_view.display_donchian(
symbol=self.coin,
data=self.stock,
upper_length=ns_parser.n_length_upper,
lower_length=ns_parser.n_length_lower,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_kc(self, other_args: List[str]):
"""Process kc command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="kc",
description="""
Keltner Channels are volatility-based bands that are placed
on either side of an asset's price and can aid in determining
the direction of a trend.The Keltner channel uses the average
true range (ATR) or volatility, with breaks above or below the top
and bottom barriers signaling a continuation.
""",
)
parser.add_argument(
"-l",
"--length",
action="store",
dest="n_length",
type=check_positive,
default=20,
help="Window length",
choices=range(1, 100),
metavar="N_LENGTH",
)
parser.add_argument(
"-s",
"--scalar",
action="store",
dest="n_scalar",
type=check_positive,
default=2,
help="scalar",
)
parser.add_argument(
"-m",
"--mamode",
action="store",
dest="s_mamode",
default="ema",
choices=volatility_model.MAMODES,
help="mamode",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=check_non_negative,
default=0,
help="offset",
choices=range(0, 100),
metavar="N_OFFSET",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volatility_view.view_kc(
symbol=self.coin,
data=self.stock,
window=ns_parser.n_length,
scalar=ns_parser.n_scalar,
mamode=ns_parser.s_mamode,
offset=ns_parser.n_offset,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_ad(self, other_args: List[str]):
"""Process ad command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ad",
description="""
The Accumulation/Distribution Line is similar to the On Balance
Volume (OBV), which sums the volume times +1/-1 based on whether the close is
higher than the previous close. The Accumulation/Distribution indicator, however
multiplies the volume by the close location value (CLV). The CLV is based on the
movement of the issue within a single bar and can be +1, -1 or zero. \n \n
The Accumulation/Distribution Line is interpreted by looking for a divergence in
the direction of the indicator relative to price. If the Accumulation/Distribution
Line is trending upward it indicates that the price may follow. Also, if the
Accumulation/Distribution Line becomes flat while the price is still rising (or falling)
then it signals an impending flattening of the price.
""",
)
parser.add_argument(
"--open",
action="store_true",
default=False,
dest="b_use_open",
help="uses open value of stock",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volume_view.display_ad(
symbol=self.coin,
data=self.stock,
use_open=ns_parser.b_use_open,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_adosc(self, other_args: List[str]):
"""Process adosc command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="adosc",
description="""
Accumulation/Distribution Oscillator, also known as the Chaikin Oscillator
is essentially a momentum indicator, but of the Accumulation-Distribution line
rather than merely price. It looks at both the strength of price moves and the
underlying buying and selling pressure during a given time period. The oscillator
reading above zero indicates net buying pressure, while one below zero registers
net selling pressure. Divergence between the indicator and pure price moves are
the most common signals from the indicator, and often flag market turning points.
""",
)
parser.add_argument(
"--open",
action="store_true",
default=False,
dest="b_use_open",
help="uses open value of stock",
)
parser.add_argument(
"--fast",
action="store",
dest="n_length_fast",
type=check_positive,
default=3,
help="fast length",
choices=range(1, 100),
metavar="N_LENGTH_FAST",
)
parser.add_argument(
"--slow",
action="store",
dest="n_length_slow",
type=check_positive,
default=10,
help="slow length",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volume_view.display_adosc(
symbol=self.coin,
data=self.stock,
use_open=ns_parser.b_use_open,
fast=ns_parser.n_length_fast,
slow=ns_parser.n_length_slow,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_obv(self, other_args: List[str]):
"""Process obv command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="obv",
description="""
The On Balance Volume (OBV) is a cumulative total of the up and
down volume. When the close is higher than the previous close, the volume is added
to the running total, and when the close is lower than the previous close, the volume
is subtracted from the running total. \n \n To interpret the OBV, look for the OBV
to move with the price or precede price moves. If the price moves before the OBV,
then it is a non-confirmed move. A series of rising peaks, or falling troughs, in the
OBV indicates a strong trend. If the OBV is flat, then the market is not trending.
""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
volume_view.display_obv(
symbol=self.coin,
data=self.stock,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_fib(self, other_args: List[str]):
"""Process fib command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="fib",
description="Calculates the fibonacci retracement levels",
)
parser.add_argument(
"-p",
"--period",
dest="period",
type=int,
help="Days to look back for retracement",
default=120,
choices=range(1, 960),
metavar="PERIOD",
)
parser.add_argument(
"--start",
dest="start",
type=valid_date,
help="Starting date to select",
required="--end" in other_args,
)
parser.add_argument(
"--end",
dest="end",
type=valid_date,
help="Ending date to select",
required="--start" in other_args,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-p")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
custom_indicators_view.fibonacci_retracement(
symbol=self.coin,
data=self.stock,
limit=ns_parser.period,
start_date=ns_parser.start,
end_date=ns_parser.end,
export=ns_parser.export,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/technical_analysis/ta_controller.py | 0.62223 | 0.182717 | ta_controller.py | pypi |
INTERVALS = [1, 5, 15, 30, 60]
SOURCES = ["YahooFinance", "AlphaVantage", "IEXCloud", "EODHD"]
CANDLE_SORT = [
"adjclose",
"open",
"close",
"high",
"low",
"volume",
"returns",
"logret",
]
market_coverage_suffix = {
"united_states": ["CBT", "CME", "NYB", "CMX", "NYM", "US", ""],
"argentina": ["BA"],
"austria": ["VI"],
"australia": ["AX"],
"belgium": ["BR"],
"brazil": ["SA"],
"canada": ["CN", "NE", "TO", "V"],
"chile": ["SN"],
"china": ["SS", "SZ"],
"czech_republic": ["PR"],
"denmark": ["CO"],
"egypt": ["CA"],
"estonia": ["TL"],
"europe": ["NX"],
"finland": ["HE"],
"france": ["PA"],
"germany": ["BE", "BM", "DU", "F", "HM", "HA", "MU", "SG", "DE"],
"greece": ["AT"],
"hong_kong": ["HK"],
"hungary": ["BD"],
"iceland": ["IC"],
"india": ["BO", "NS"],
"indonesia": ["JK"],
"ireland": ["IR"],
"israel": ["TA"],
"italy": ["MI"],
"japan": ["T", "S"],
"latvia": ["RG"],
"lithuania": ["VS"],
"malaysia": ["KL"],
"mexico": ["MX"],
"netherlands": ["AS"],
"new_zealand": ["NZ"],
"norway": ["OL"],
"portugal": ["LS"],
"qatar": ["QA"],
"russia": ["ME"],
"singapore": ["SI"],
"south_africa": ["JO"],
"south_korea": ["KS", "KQ"],
"spain": ["MC"],
"saudi_arabia": ["SAU"],
"sweden": ["ST"],
"switzerland": ["SW"],
"taiwan": ["TWO", "TW"],
"thailand": ["BK"],
"turkey": ["IS"],
"united_kingdom": ["L", "IL"],
"venezuela": ["CR"],
}
INCOME_PLOT = {
"AlphaVantage": [
"reported_currency",
"gross_profit",
"total_revenue",
"cost_of_revenue",
"cost_of_goods_and_services_sold",
"operating_income",
"selling_general_and_administrative",
"research_and_development",
"operating_expenses",
"investment_income_net",
"net_interest_income",
"interest_income",
"interest_expense",
"non_interest_income",
"other_non_operating_income",
"depreciation",
"depreciation_and_amortization",
"income_before_tax",
"income_tax_expense",
"interest_and_debt_expense",
"net_income_from_continuing_operations",
"comprehensive_income_net_of_tax",
"ebit",
"ebitda",
"net_income",
],
"Polygon": [
"cost_of_revenue",
"diluted_earnings_per_share",
"costs_and_expenses",
"gross_profit",
"non_operating_income_loss",
"operating_income_loss",
"participating_securities_distributed_and_undistributed_earnings_loss_basic",
"income_tax_expense_benefit",
"net_income_loss_attributable_to_parent",
"net_income_loss",
"income_tax_expense_benefit_deferred",
"preferred_stock_dividends_and_other_adjustments",
"operating_expenses",
"income_loss_from_continuing_operations_before_tax",
"net_income_loss_attributable_to_non_controlling_interest",
"income_loss_from_continuing_operations_after_tax",
"revenues",
"net_income_loss_available_to_common_stockholders_basic",
"benefits_costs_expenses",
"basic_earnings_per_share",
"interest_expense_operating",
"income_loss_before_equity_method_investments",
],
"YahooFinance": [
"total_revenue",
"cost_of_revenue",
"gross_profit",
"research_development",
"selling_general_and_administrative",
"total_operating_expenses",
"operating_income_or_loss",
"interest_expense",
"total_other_income/expenses_net",
"income_before_tax",
"income_tax_expense",
"income_from_continuing_operations",
"net_income",
"net_income_available_to_common_shareholders",
"basic_eps",
"diluted_eps",
"basic_average_shares",
"diluted_average_shares",
"ebitda",
],
"FinancialModelingPrep": [
"reported_currency",
"cik",
"filling_date",
"accepted_date",
"calendar_year",
"period",
"revenue",
"cost_of_revenue",
"gross_profit",
"gross_profit_ratio",
"research_and_development_expenses",
"general_and_administrative_expenses",
"selling_and_marketing_expenses",
"selling_general_and_administrative_expenses",
"other_expenses",
"operating_expenses",
"cost_and_expenses",
"interest_income",
"interest_expense",
"depreciation_and_amortization",
"ebitda",
"ebitda_ratio",
"operating_income",
"operating_income_ratio",
"total_other_income_expenses_net",
"income_before_tax",
"income_before_tax_ratio",
"income_tax_expense",
"net_income",
"net_income_ratio",
"eps",
"eps_diluted",
"weighted_average_shs_out",
"weighted_average_shs_out_dil",
"link",
"final_link",
],
}
BALANCE_PLOT = {
"AlphaVantage": [
"reported_currency",
"total_assets",
"total_current_assets",
"cash_and_cash_equivalents_at_carrying_value",
"cash_and_short_term_investments",
"inventory",
"current_net_receivables",
"total_non_current_assets",
"property_plant_equipment",
"accumulated_depreciation_amortization_ppe",
"intangible_assets",
"intangible_assets_excluding_goodwill",
"goodwill",
"investments",
"long_term_investments",
"short_term_investments",
"other_current_assets",
"other_non_currrent_assets",
"total_liabilities",
"total_current_liabilities",
"current_accounts_payable",
"deferred_revenue",
"current_debt",
"short_term_debt",
"total_non_current_liabilities",
"capital_lease_obligations",
"long_term_debt",
"current_long_term_debt",
"long_term_debt_non_current",
"short_long_term_debt_total",
"other_current_liabilities",
"other_non_current_liabilities",
"total_shareholder_equity",
"treasury_stock",
"retained_earnings",
"common_stock",
"common_stock_shares_outstanding",
],
"Polygon": [
"equity_attributable_to_non_controlling_interest",
"liabilities",
"non_current_assets",
"equity",
"assets",
"current_assets",
"equity_attributable_to_parent",
"current_liabilities",
"non_current_liabilities",
"fixed_assets",
"other_than_fixed_non_current_assets",
"liabilities_and_equity",
],
"YahooFinance": [
"cash_and_cash_equivalents",
"other_short-term_investments",
"total_cash",
"net_receivables",
"inventory",
"other_current_assets",
"total_current_assets",
"gross_property, plant_and_equipment",
"accumulated_depreciation",
"net_property, plant_and_equipment",
"equity_and_other_investments",
"other_long-term_assets",
"total_non-current_assets",
"total_assets",
"current_debt",
"accounts_payable",
"deferred_revenues",
"other_current_liabilities",
"total_current_liabilities",
"long-term_debt",
"deferred_tax_liabilities",
"deferred_revenues",
"other_long-term_liabilities",
"total_non-current_liabilities",
"total_liabilities",
"common_stock",
"retained_earnings",
"accumulated_other_comprehensive_income",
"total_stockholders'_equity",
"total_liabilities_and_stockholders'_equity",
],
"FinancialModelingPrep": [
"reported_currency",
"cik",
"filling_date",
"accepted_date",
"calendar_year",
"period",
"cash_and_cash_equivalents",
"short_term_investments",
"cash_and_short_term_investments",
"net_receivables",
"inventory",
"other_current_assets",
"total_current_assets",
"property_plant_equipment_net",
"goodwill",
"intangible_assets",
"goodwill_and_intangible_assets",
"long_term_investments",
"tax_assets",
"other_non_current_assets",
"total_non_current_assets",
"other_assets",
"total_assets",
"account_payables",
"short_term_debt",
"tax_payables",
"deferred_revenue",
"other_current_liabilities",
"total_current_liabilities",
"long_term_debt",
"deferred_revenue_non_current",
"deferred_tax_liabilities_non_current",
"other_non_current_liabilities",
"total_non_current_liabilities",
"other_liabilities",
"capital_lease_obligations",
"total_liabilities",
"preferred_stock",
"common_stock",
"retained_earnings",
"accumulated_other_comprehensive_income_loss",
"other_total_stockholders_equity",
"total_stockholders_equity",
"total_liabilities_and_stockholders_equity",
"minority_interest",
"total_equity",
"total_liabilities_and_total_equity",
"total_investments",
"total_debt",
"net_debt",
"link",
"final_link",
],
}
CASH_PLOT = {
"AlphaVantage": [
"reported_currency",
"operating_cash_flow",
"payments_for_operating_activities",
"proceeds_from_operating_activities",
"change_in_operating_liabilities",
"change_in_operating_assets",
"depreciation_depletion_and_amortization",
"capital_expenditures",
"change_in_receivables",
"change_in_inventory",
"profit_loss",
"cash_flow_from_investment",
"cash_flow_from_financing",
"proceeds_from_repayments_of_short_term_debt",
"payments_for_repurchase_of_common_stock",
"payments_for_repurchase_of_equity",
"payments_for_repurchase_of_preferred_stock",
"dividend_payout",
"dividend_payout_common_stock",
"dividend_payout_preferred_stock",
"proceeds_from_issuance_of_common_stock",
"proceeds_from_issuance_of_long_term_debt_and_capital_securities_net",
"proceeds_from_issuance_of_preferred_stock",
"proceeds_from_repurchase_of_equity",
"proceeds_from_sale_of_treasury_stock",
"change_in_cash_and_cash_equivalents",
"change_in_exchange_rate",
"net_income",
],
"Polygon": [
"net_cash_flow_from_financing_activities_continuing",
"net_cash_flow_continuing",
"net_cash_flow_from_investing_activities",
"net_cash_flow",
"net_cash_flow_from_operating_activities",
"net_cash_flow_from_financing_activities",
"net_cash_flow_from_operating_activities_continuing",
"net_cash_flow_from_investing_activities_continuing",
],
"YahooFinance": [
"net_income",
"depreciation_&_amortisation",
"deferred_income_taxes",
"stock-based_compensation",
"change_in working_capital",
"accounts_receivable",
"inventory",
"accounts_payable",
"other_working_capital",
"other_non-cash_items",
"net_cash_provided_by_operating_activities",
"investments_in_property, plant_and_equipment",
"acquisitions, net",
"purchases_of_investments",
"sales/maturities_of_investments",
"other_investing_activities",
"net_cash_used_for_investing_activities",
"debt_repayment",
"common_stock_issued",
"common_stock_repurchased",
"dividends_paid",
"other_financing_activities",
"net_cash_used_provided_by_(used_for)_financing_activities",
"net_change_in_cash",
"cash_at_beginning_of_period",
"cash_at_end_of_period",
"operating_cash_flow",
"capital_expenditure",
"free_cash_flow",
],
"FinancialModelingPrep": [
"reported_currency",
"cik",
"filling_date",
"accepted_date",
"calendar_year",
"period",
"net_income",
"depreciation_and_amortization",
"deferred_income_tax",
"stock_based_compensation",
"change_in_working_capital",
"accounts_receivables",
"inventory",
"accounts_payables",
"other_working_capital",
"other_non_cash_items",
"net_cash_provided_by_operating_activities",
"investments_in_property_plant_and_equipment",
"acquisitions_net",
"purchases_of_investments",
"sales_maturities_of_investments",
"other_investing_activites",
"net_cash_used_for_investing_activites",
"debt_repayment",
"common_stock_issued",
"common_stock_repurchased",
"dividends_paid",
"other_financing_activites",
"net_cash_used_provided_by_financing_activities",
"effect_of_forex_changes_on_cash",
"net_change_in_cash",
"cash_at_end_of_period",
"cash_at_beginning_of_period",
"operating_cash_flow",
"capital_expenditure",
"free_cash_flow",
"link",
"final_link",
],
} | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/stock_statics.py | 0.533397 | 0.428891 | stock_statics.py | pypi |
__docformat__ = "numpy"
# pylint: disable=unsupported-assignment-operation,too-many-lines
# pylint: disable=no-member,too-many-branches,too-many-arguments
# pylint: disable=inconsistent-return-statements
import logging
import os
from datetime import datetime, timedelta, date
from typing import Any, Union, Optional, Iterable, List, Dict
import financedatabase as fd
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.ticker import LogLocator, ScalarFormatter
import mplfinance as mpf
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import pytz
import requests
from requests.exceptions import ReadTimeout
import yfinance as yf
from plotly.subplots import make_subplots
from scipy import stats
from openbb_terminal import config_terminal as cfg
# pylint: disable=unused-import
from openbb_terminal.stocks.stock_statics import market_coverage_suffix
from openbb_terminal.stocks.stock_statics import INTERVALS # noqa: F401
from openbb_terminal.stocks.stock_statics import SOURCES # noqa: F401
from openbb_terminal.stocks.stock_statics import INCOME_PLOT # noqa: F401
from openbb_terminal.stocks.stock_statics import BALANCE_PLOT # noqa: F401
from openbb_terminal.stocks.stock_statics import CASH_PLOT # noqa: F401
from openbb_terminal.stocks.stock_statics import CANDLE_SORT # noqa: F401
from openbb_terminal.stocks.stocks_model import (
load_stock_av,
load_stock_yf,
load_stock_eodhd,
load_stock_iex_cloud,
load_stock_polygon,
)
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
lambda_long_number_format_y_axis,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
exch_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "mappings", "Mic_Codes.csv"
)
exchange_df = pd.read_csv(exch_file_path, index_col=0, header=None)
exchange_mappings = exchange_df.squeeze("columns").to_dict()
def check_datetime(
ck_date: Optional[Union[datetime, str]] = None, start: bool = True
) -> datetime:
"""Checks if given argument is string and attempts to convert to datetime.
Parameters
----------
ck_date : Optional[Union[datetime, str]], optional
Date to check, by default None
start : bool, optional
If True and string is invalid, will return 1100 days ago
If False and string is invalid, will return today, by default True
Returns
-------
datetime
Datetime object
"""
error_catch = (datetime.now() - timedelta(days=1100)) if start else datetime.now()
try:
if ck_date is None:
return error_catch
if isinstance(ck_date, datetime):
return ck_date
if isinstance(ck_date, str):
return datetime.strptime(ck_date, "%Y-%m-%d")
except Exception:
console.print(
f"Invalid date format (YYYY-MM-DD), "
f"Using {error_catch.strftime('%Y-%m-%d')} for {ck_date}"
)
return error_catch
def search(
query: str = "",
country: str = "",
sector: str = "",
industry: str = "",
exchange_country: str = "",
limit: int = 0,
export: str = "",
) -> None:
"""Search selected query for tickers.
Parameters
----------
query : str
The search term used to find company tickers
country: str
Search by country to find stocks matching the criteria
sector : str
Search by sector to find stocks matching the criteria
industry : str
Search by industry to find stocks matching the criteria
exchange_country: str
Search by exchange country to find stock matching
limit : int
The limit of companies shown.
export : str
Export data
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.stocks.search(country="united states", exchange_country="Germany")
"""
kwargs: Dict[str, Any] = {"exclude_exchanges": False}
if country:
kwargs["country"] = country.replace("_", " ").title()
if sector:
kwargs["sector"] = sector
if industry:
kwargs["industry"] = industry
try:
data = fd.select_equities(**kwargs)
except ReadTimeout:
console.print(
"[red]Unable to retrieve company data from GitHub which limits the search"
" capabilities. This tends to be due to access restrictions for GitHub.com,"
" please check if you can access this website without a VPN.[/red]\n"
)
data = {}
except ValueError:
console.print(
"[red]No companies were found that match the given criteria.[/red]\n"
)
return
if not data:
console.print("No companies found.\n")
return
if query:
d = fd.search_products(
data, query, search="long_name", case_sensitive=False, new_database=None
)
else:
d = data
if not d:
console.print("No companies found.\n")
return
df = pd.DataFrame.from_dict(d).T[["long_name", "country", "sector", "industry"]]
if exchange_country:
if exchange_country in market_coverage_suffix:
suffix_tickers = [
ticker.split(".")[1] if "." in ticker else ""
for ticker in list(df.index)
]
df = df[
[
val in market_coverage_suffix[exchange_country]
for val in suffix_tickers
]
]
exchange_suffix = {}
for k, v in market_coverage_suffix.items():
for x in v:
exchange_suffix[x] = k
df["exchange"] = [
exchange_suffix.get(ticker.split(".")[1]) if "." in ticker else "USA"
for ticker in list(df.index)
]
title = "Companies found"
if query:
title += f" on term {query}"
if exchange_country:
title += f" on an exchange in {exchange_country.replace('_', ' ').title()}"
if country:
title += f" in {country.replace('_', ' ').title()}"
if sector:
title += f" within {sector}"
if industry:
title += f" and {industry}"
if not sector and industry:
title += f" within {industry}"
df["exchange"] = df["exchange"].apply(
lambda x: x.replace("_", " ").title() if x else None
)
df["exchange"] = df["exchange"].apply(
lambda x: "United States" if x == "Usa" else None
)
print_rich_table(
df.iloc[:limit] if limit else df,
show_index=True,
headers=["Name", "Country", "Sector", "Industry", "Exchange"],
title=title,
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "search", df)
def load(
symbol: str,
start_date: Optional[Union[datetime, str]] = None,
interval: int = 1440,
end_date: Optional[Union[datetime, str]] = None,
prepost: bool = False,
source: str = "YahooFinance",
iexrange: str = "ytd",
weekly: bool = False,
monthly: bool = False,
verbose: bool = True,
):
"""Load a symbol to perform analysis using the string above as a template.
Optional arguments and their descriptions are listed above.
The default source is, yFinance (https://pypi.org/project/yfinance/).
Other sources:
- AlphaVantage (https://www.alphavantage.co/documentation/)
- IEX Cloud (https://iexcloud.io/docs/api/)
- Eod Historical Data (https://eodhistoricaldata.com/financial-apis/)
Please note that certain analytical features are exclusive to the specific source.
To load a symbol from an exchange outside of the NYSE/NASDAQ default, use yFinance as the source and
add the corresponding exchange to the end of the symbol. i.e. `BNS.TO`. Note this may be possible with
other paid sources check their docs.
BNS is a dual-listed stock, there are separate options chains and order books for each listing.
Opportunities for arbitrage may arise from momentary pricing discrepancies between listings
with a dynamic exchange rate as a second order opportunity in ForEx spreads.
Find the full list of supported exchanges here:
https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html
Certain analytical features, such as VWAP, require the ticker to be loaded as intraday
using the `-i x` argument. When encountering this error, simply reload the symbol using
the interval argument. i.e. `load -t BNS -s YYYY-MM-DD -i 1 -p` loads one-minute intervals,
including Pre/After Market data, using the default source, yFinance.
Certain features, such as the Prediction menu, require the symbol to be loaded as daily and not intraday.
Parameters
----------
symbol: str
Ticker to get data
start_date: str or datetime, optional
Start date to get data from with. - datetime or string format (YYYY-MM-DD)
interval: int
Interval (in minutes) to get data 1, 5, 15, 30, 60 or 1440
end_date: str or datetime, optional
End date to get data from with. - datetime or string format (YYYY-MM-DD)
prepost: bool
Pre and After hours data
source: str
Source of data extracted
iexrange: str
Timeframe to get IEX data.
weekly: bool
Flag to get weekly data
monthly: bool
Flag to get monthly data
verbose: bool
Display verbose information on what was the symbol that was loaded
Returns
-------
df_stock_candidate: pd.DataFrame
Dataframe of data
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=1100)).strftime("%Y-%m-%d")
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
start_date = check_datetime(start_date)
end_date = check_datetime(end_date, start=False)
# Daily
if int(interval) == 1440:
int_string = "Daily"
if weekly:
int_string = "Weekly"
if monthly:
int_string = "Monthly"
if source == "AlphaVantage":
df_stock_candidate = load_stock_av(symbol, start_date, end_date)
elif source == "YahooFinance":
df_stock_candidate = load_stock_yf(
symbol, start_date, end_date, weekly, monthly
)
elif source == "EODHD":
df_stock_candidate = load_stock_eodhd(
symbol, start_date, end_date, weekly, monthly
)
elif source == "IEXCloud":
df_stock_candidate = load_stock_iex_cloud(symbol, iexrange)
elif source == "Polygon":
df_stock_candidate = load_stock_polygon(
symbol, start_date, end_date, weekly, monthly
)
else:
console.print("[red]Invalid source for stock[/red]\n")
return
if df_stock_candidate.empty:
return df_stock_candidate
df_stock_candidate.index.name = "date"
s_start = df_stock_candidate.index[0]
s_interval = f"{interval}min"
int_string = "Daily" if interval == 1440 else "Intraday"
else:
if source == "YahooFinance":
s_int = str(interval) + "m"
s_interval = s_int + "in"
d_granularity = {"1m": 6, "5m": 59, "15m": 59, "30m": 59, "60m": 729}
s_start_dt = datetime.utcnow() - timedelta(days=d_granularity[s_int])
s_date_start = s_start_dt.strftime("%Y-%m-%d")
df_stock_candidate = yf.download(
symbol,
start=s_date_start
if s_start_dt > start_date
else start_date.strftime("%Y-%m-%d"),
progress=False,
interval=s_int,
prepost=prepost,
)
# Check that loading a stock was not successful
if df_stock_candidate.empty:
return pd.DataFrame()
df_stock_candidate.index = df_stock_candidate.index.tz_localize(None)
if s_start_dt > start_date:
s_start = pytz.utc.localize(s_start_dt)
else:
s_start = start_date
df_stock_candidate.index.name = "date"
elif source == "Polygon":
request_url = (
f"https://api.polygon.io/v2/aggs/ticker/"
f"{symbol.upper()}/range/{interval}/minute/{start_date.strftime('%Y-%m-%d')}"
f"/{end_date.strftime('%Y-%m-%d')}"
f"?adjusted=true&sort=desc&limit=49999&apiKey={cfg.API_POLYGON_KEY}"
)
r = requests.get(request_url)
if r.status_code != 200:
console.print("[red]Error in polygon request[/red]")
return pd.DataFrame()
r_json = r.json()
if "results" not in r_json.keys():
console.print("[red]No results found in polygon reply.[/red]")
return pd.DataFrame()
df_stock_candidate = pd.DataFrame(r_json["results"])
df_stock_candidate = df_stock_candidate.rename(
columns={
"o": "Open",
"c": "Close",
"h": "High",
"l": "Low",
"t": "date",
"v": "Volume",
"n": "Transactions",
}
)
# pylint: disable=unsupported-assignment-operation
df_stock_candidate["date"] = pd.to_datetime(
df_stock_candidate.date, unit="ms"
)
df_stock_candidate["Adj Close"] = df_stock_candidate.Close
df_stock_candidate = df_stock_candidate.sort_values(by="date")
df_stock_candidate = df_stock_candidate.set_index("date")
# Check that loading a stock was not successful
if df_stock_candidate.empty:
return pd.DataFrame()
df_stock_candidate.index = (
df_stock_candidate.index.tz_localize(tz="UTC")
.tz_convert("US/Eastern")
.tz_localize(None)
)
s_start_dt = df_stock_candidate.index[0]
if s_start_dt > start_date:
s_start = pytz.utc.localize(s_start_dt)
else:
s_start = start_date
s_interval = f"{interval}min"
int_string = "Intraday"
s_intraday = (f"Intraday {s_interval}", int_string)[interval == 1440]
if verbose:
console.print(
f"Loading {s_intraday} data for {symbol.upper()} "
f"with starting period {s_start.strftime('%Y-%m-%d')}.",
)
return df_stock_candidate
def display_candle(
symbol: str,
data: pd.DataFrame = None,
use_matplotlib: bool = True,
intraday: bool = False,
add_trend: bool = False,
ma: Optional[Iterable[int]] = None,
asset_type: str = "",
start_date: Optional[Union[datetime, str]] = None,
interval: int = 1440,
end_date: Optional[Union[datetime, str]] = None,
prepost: bool = False,
source: str = "YahooFinance",
iexrange: str = "ytd",
weekly: bool = False,
monthly: bool = False,
external_axes: Optional[List[plt.Axes]] = None,
raw: bool = False,
yscale: str = "linear",
):
"""Show candle plot of loaded ticker.
[Source: Yahoo Finance, IEX Cloud or Alpha Vantage]
Parameters
----------
symbol: str
Ticker name
data: pd.DataFrame
Stock dataframe
use_matplotlib: bool
Flag to use matplotlib instead of interactive plotly chart
intraday: bool
Flag for intraday data for plotly range breaks
add_trend: bool
Flag to add high and low trends to chart
ma: Tuple[int]
Moving averages to add to the candle
asset_type_: str
String to include in title
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
asset_type_: str
String to include in title
start_date: str or datetime, optional
Start date to get data from with. - datetime or string format (YYYY-MM-DD)
interval: int
Interval (in minutes) to get data 1, 5, 15, 30, 60 or 1440
end_date: str or datetime, optional
End date to get data from with. - datetime or string format (YYYY-MM-DD)
prepost: bool
Pre and After hours data
source: str
Source of data extracted
iexrange: str
Timeframe to get IEX data.
weekly: bool
Flag to get weekly data
monthly: bool
Flag to get monthly data
raw : bool, optional
Flag to display raw data, by default False
yscale: str
Linear or log for yscale
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.stocks.candle("AAPL")
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=1100)).strftime("%Y-%m-%d")
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
start_date = check_datetime(start_date)
end_date = check_datetime(end_date, start=False)
if data is None:
data = load(
symbol,
start_date,
interval,
end_date,
prepost,
source,
iexrange,
weekly,
monthly,
)
data = process_candle(data)
if add_trend:
if (data.index[1] - data.index[0]).total_seconds() >= 86400:
data = find_trendline(data, "OC_High", "high")
data = find_trendline(data, "OC_Low", "low")
if not raw:
if use_matplotlib:
ap0 = []
if add_trend:
if "OC_High_trend" in data.columns:
ap0.append(
mpf.make_addplot(
data["OC_High_trend"],
color=cfg.theme.up_color,
secondary_y=False,
),
)
if "OC_Low_trend" in data.columns:
ap0.append(
mpf.make_addplot(
data["OC_Low_trend"],
color=cfg.theme.down_color,
secondary_y=False,
),
)
candle_chart_kwargs = {
"type": "candle",
"style": cfg.theme.mpf_style,
"volume": True,
"addplot": ap0,
"xrotation": cfg.theme.xticks_rotation,
"scale_padding": {"left": 0.3, "right": 1, "top": 0.8, "bottom": 0.8},
"update_width_config": {
"candle_linewidth": 0.6,
"candle_width": 0.8,
"volume_linewidth": 0.8,
"volume_width": 0.8,
},
"warn_too_much_data": 10000,
"yscale": yscale,
}
kwargs = {"mav": ma} if ma else {}
if external_axes is None:
candle_chart_kwargs["returnfig"] = True
candle_chart_kwargs["figratio"] = (10, 7)
candle_chart_kwargs["figscale"] = 1.10
candle_chart_kwargs["figsize"] = plot_autoscale()
candle_chart_kwargs["warn_too_much_data"] = 100_000
fig, ax = mpf.plot(data, **candle_chart_kwargs, **kwargs)
lambda_long_number_format_y_axis(data, "Volume", ax)
fig.suptitle(
f"{asset_type} {symbol}",
x=0.055,
y=0.965,
horizontalalignment="left",
)
if ma:
# Manually construct the chart legend
colors = [cfg.theme.get_colors()[i] for i, _ in enumerate(ma)]
lines = [Line2D([0], [0], color=c) for c in colors]
labels = ["MA " + str(label) for label in ma]
ax[0].legend(lines, labels)
if yscale == "log":
ax[0].yaxis.set_major_formatter(ScalarFormatter())
ax[0].yaxis.set_major_locator(
LogLocator(base=100, subs=[1.0, 2.0, 5.0, 10.0])
)
ax[0].ticklabel_format(style="plain", axis="y")
cfg.theme.visualize_output(force_tight_layout=False)
else:
if len(external_axes) != 2:
logger.error("Expected list of one axis item.")
console.print("[red]Expected list of 2 axis items.\n[/red]")
return pd.DataFrame()
ax1, ax2 = external_axes
candle_chart_kwargs["ax"] = ax1
candle_chart_kwargs["volume"] = ax2
mpf.plot(data, **candle_chart_kwargs)
else:
fig = make_subplots(
rows=2,
cols=1,
shared_xaxes=True,
vertical_spacing=0.06,
subplot_titles=(f"{symbol}", "Volume"),
row_width=[0.2, 0.7],
)
fig.add_trace(
go.Candlestick(
x=data.index,
open=data.Open,
high=data.High,
low=data.Low,
close=data.Close,
name="OHLC",
),
row=1,
col=1,
)
if ma:
plotly_colors = [
"black",
"teal",
"blue",
"purple",
"orange",
"gray",
"deepskyblue",
]
for idx, ma_val in enumerate(ma):
temp = data["Adj Close"].copy()
temp[f"ma{ma_val}"] = data["Adj Close"].rolling(ma_val).mean()
temp = temp.dropna()
fig.add_trace(
go.Scatter(
x=temp.index,
y=temp[f"ma{ma_val}"],
name=f"MA{ma_val}",
mode="lines",
line=go.scatter.Line(
color=plotly_colors[np.mod(idx, len(plotly_colors))]
),
),
row=1,
col=1,
)
if add_trend:
if "OC_High_trend" in data.columns:
fig.add_trace(
go.Scatter(
x=data.index,
y=data["OC_High_trend"],
name="High Trend",
mode="lines",
line=go.scatter.Line(color="green"),
),
row=1,
col=1,
)
if "OC_Low_trend" in data.columns:
fig.add_trace(
go.Scatter(
x=data.index,
y=data["OC_Low_trend"],
name="Low Trend",
mode="lines",
line=go.scatter.Line(color="red"),
),
row=1,
col=1,
)
colors = [
"red" if row.Open < row["Adj Close"] else "green"
for _, row in data.iterrows()
]
fig.add_trace(
go.Bar(
x=data.index,
y=data.Volume,
name="Volume",
marker_color=colors,
),
row=2,
col=1,
)
fig.update_layout(
yaxis_title="Stock Price ($)",
xaxis=dict(
rangeselector=dict(
buttons=list(
[
dict(
count=1,
label="1m",
step="month",
stepmode="backward",
),
dict(
count=3,
label="3m",
step="month",
stepmode="backward",
),
dict(
count=1, label="YTD", step="year", stepmode="todate"
),
dict(
count=1,
label="1y",
step="year",
stepmode="backward",
),
dict(step="all"),
]
)
),
rangeslider=dict(visible=False),
type="date",
),
)
fig.update_layout(
updatemenus=[
dict(
buttons=[
dict(
label="linear",
method="relayout",
args=[{"yaxis.type": "linear"}],
),
dict(
label="log",
method="relayout",
args=[{"yaxis.type": "log"}],
),
]
)
]
)
if intraday:
fig.update_xaxes(
rangebreaks=[
dict(bounds=["sat", "mon"]),
dict(bounds=[20, 9], pattern="hour"),
]
)
fig.show(config=dict({"scrollZoom": True}))
else:
return data
def load_ticker(
ticker: str,
start_date: Union[str, datetime],
end_date: Optional[Union[str, datetime]] = None,
) -> pd.DataFrame:
"""Load a ticker data from Yahoo Finance.
Adds a data index column data_id and Open-Close High/Low columns after loading.
Parameters
----------
ticker : str
The stock ticker.
start_date : Union[str,datetime]
Start date to load stock ticker data formatted YYYY-MM-DD.
end_date : Union[str,datetime]
End date to load stock ticker data formatted YYYY-MM-DD.
Returns
-------
DataFrame
A Panda's data frame with columns Open, High, Low, Close, Adj Close, Volume,
date_id, OC-High, OC-Low.
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> msft_df = openbb.stocks.load("MSFT")
"""
df_data = yf.download(ticker, start=start_date, end=end_date, progress=False)
df_data.index = pd.to_datetime(df_data.index)
df_data["date_id"] = (df_data.index.date - df_data.index.date.min()).astype(
"timedelta64[D]"
)
df_data["date_id"] = df_data["date_id"].dt.days + 1
df_data["OC_High"] = df_data[["Open", "Close"]].max(axis=1)
df_data["OC_Low"] = df_data[["Open", "Close"]].min(axis=1)
return df_data
def process_candle(data: pd.DataFrame) -> pd.DataFrame:
"""Process DataFrame into candle style plot.
Parameters
----------
data : DataFrame
Stock dataframe.
Returns
-------
DataFrame
A Panda's data frame with columns Open, High, Low, Close, Adj Close, Volume,
date_id, OC-High, OC-Low.
"""
df_data = data.copy()
df_data["date_id"] = (df_data.index.date - df_data.index.date.min()).astype(
"timedelta64[D]"
)
df_data["date_id"] = df_data["date_id"].dt.days + 1
df_data["OC_High"] = df_data[["Open", "Close"]].max(axis=1)
df_data["OC_Low"] = df_data[["Open", "Close"]].min(axis=1)
df_data["ma20"] = df_data["Close"].rolling(20).mean().fillna(method="bfill")
df_data["ma50"] = df_data["Close"].rolling(50).mean().fillna(method="bfill")
return df_data
def find_trendline(
df_data: pd.DataFrame, y_key: str, high_low: str = "high"
) -> pd.DataFrame:
"""Attempt to find a trend line based on y_key column from a given stock ticker data frame.
Parameters
----------
df_data : DataFrame
The stock ticker data frame with at least date_id, y_key columns.
y_key : str
Column name to base the trend line on.
high_low: str, optional
Either "high" or "low". High is the default.
Returns
-------
DataFrame
If a trend is successfully found,
An updated Panda's data frame with a trend data {y_key}_trend column.
If no trend was found,
An original Panda's data frame
"""
for iteration in [3, 4, 5, 6, 7]:
df_temp = df_data.copy()
while len(df_temp) > iteration:
reg = stats.linregress(
x=df_temp["date_id"],
y=df_temp[y_key],
)
if high_low == "high":
df_temp = df_temp.loc[
df_temp[y_key] > reg[0] * df_temp["date_id"] + reg[1]
]
else:
df_temp = df_temp.loc[
df_temp[y_key] < reg[0] * df_temp["date_id"] + reg[1]
]
if len(df_temp) > 1:
break
if len(df_temp) == 1:
return df_data
reg = stats.linregress(
x=df_temp["date_id"],
y=df_temp[y_key],
)
df_data[f"{y_key}_trend"] = reg[0] * df_data["date_id"] + reg[1]
return df_data
def additional_info_about_ticker(ticker: str) -> str:
"""Get information about trading the ticker.
Includes exchange, currency, timezone and market status.
Parameters
----------
ticker : str
The stock ticker to extract if stock market is open or not
Returns
-------
str
Additional information about trading the ticker
"""
extra_info = ""
if ticker:
if ".US" in ticker.upper():
ticker = ticker.rstrip(".US")
ticker = ticker.rstrip(".us")
ticker_stats = yf.Ticker(ticker).stats()
extra_info += "\n[param]Company: [/param]"
extra_info += ticker_stats.get("quoteType", {}).get("shortName")
extra_info += "\n[param]Exchange: [/param]"
exchange_name = ticker_stats.get("quoteType", {}).get("exchange")
extra_info += (
exchange_mappings["X" + exchange_name]
if "X" + exchange_name in exchange_mappings
else exchange_name
)
extra_info += "\n[param]Currency: [/param]"
extra_info += ticker_stats.get("summaryDetail", {}).get("currency")
else:
extra_info += "\n[param]Company: [/param]"
extra_info += "\n[param]Exchange: [/param]"
extra_info += "\n[param]Currency: [/param]"
return extra_info + "\n"
def clean_fraction(num, denom):
"""Return the decimal value or NA if the operation cannot be performed.
Parameters
----------
num : Any
The numerator for the fraction
denom : Any
The denominator for the fraction
Returns
-------
val : Any
The value of the fraction
"""
try:
return num / denom
except TypeError:
return "N/A"
def load_custom(file_path: str) -> pd.DataFrame:
"""Load in a custom csv file.
Parameters
----------
file_path: str
Path to file
Returns
-------
pd.DataFrame
Dataframe of stock data
"""
# Double check that the file exists
if not os.path.exists(file_path):
console.print("[red]File path does not exist.[/red]\n")
return pd.DataFrame()
df = pd.read_csv(file_path)
console.print(f"Loaded data has columns: {', '.join(df.columns.to_list())}\n")
# Nasdaq specific
if "Close/Last" in df.columns:
df = df.rename(columns={"Close/Last": "Close"})
if "Last" in df.columns:
df = df.rename(columns={"Last": "Close"})
df.columns = [col.lower().rstrip().lstrip() for col in df.columns]
for col in df.columns:
if col in ["date", "time", "timestamp", "datetime"]:
df[col] = pd.to_datetime(df[col])
df = df.set_index(col)
console.print(f"Column [blue]{col.title()}[/blue] set as index.")
df.columns = [col.title() for col in df.columns]
df.index.name = df.index.name.title()
df = df.applymap(
lambda x: clean_function(x) if not isinstance(x, (int, float)) else x
)
if "Adj Close" not in df.columns:
df["Adj Close"] = df.Close.copy()
return df
def clean_function(entry: str) -> Union[str, float]:
"""Clean stock data from csv.
This can be customized for csvs.
"""
# If there is a digit, get rid of common characters and return float
if any(char.isdigit() for char in entry):
return float(entry.replace("$", "").replace(",", ""))
return entry
def show_quick_performance(stock_df: pd.DataFrame, ticker: str):
"""Show quick performance stats of stock prices.
Daily prices expected.
"""
closes = stock_df["Adj Close"]
volumes = stock_df["Volume"]
perfs = {
"1 Day": 100 * closes.pct_change(2)[-1],
"1 Week": 100 * closes.pct_change(5)[-1],
"1 Month": 100 * closes.pct_change(21)[-1],
"1 Year": 100 * closes.pct_change(252)[-1],
}
if "2022-01-03" in closes.index:
closes_ytd = closes[closes.index > f"{date.today().year}-01-01"]
perfs["YTD"] = 100 * (closes_ytd[-1] - closes_ytd[0]) / closes_ytd[0]
else:
perfs["Period"] = 100 * (closes[-1] - closes[0]) / closes[0]
perf_df = pd.DataFrame.from_dict(perfs, orient="index").dropna().T
perf_df = perf_df.applymap(lambda x: str(round(x, 2)) + " %")
perf_df = perf_df.applymap(
lambda x: f"[red]{x}[/red]" if "-" in x else f"[green]{x}[/green]"
)
if len(closes) > 252:
perf_df["Volatility (1Y)"] = (
str(round(100 * np.sqrt(252) * closes[-252:].pct_change().std(), 2)) + " %"
)
else:
perf_df["Volatility (Ann)"] = (
str(round(100 * np.sqrt(252) * closes.pct_change().std(), 2)) + " %"
)
if len(volumes) > 10:
perf_df["Volume (10D avg)"] = (
str(round(np.mean(volumes[-12:-2]) / 1_000_000, 2)) + " M"
)
perf_df["Last Price"] = str(round(closes[-1], 2))
print_rich_table(
perf_df,
show_index=False,
headers=perf_df.columns,
title=f"{ticker.upper()} Performance",
)
def show_codes_polygon(ticker: str):
"""Show FIGI, SIC and SIK codes for ticker.
Parameters
----------
ticker: str
Stock ticker
"""
link = f"https://api.polygon.io/v3/reference/tickers/{ticker.upper()}?apiKey={cfg.API_POLYGON_KEY}"
if cfg.API_POLYGON_KEY == "REPLACE_ME":
console.print("[red]Polygon API key missing[/red]\n")
return
r = requests.get(link)
if r.status_code != 200:
console.print("[red]Error in polygon request[/red]\n")
return
r_json = r.json()
if "results" not in r_json.keys():
console.print("[red]Results not found in polygon request[/red]")
return
r_json = r_json["results"]
cols = ["cik", "composite_figi", "share_class_figi", "sic_code"]
vals = [r_json[col] for col in cols]
polygon_df = pd.DataFrame({"codes": [c.upper() for c in cols], "vals": vals})
polygon_df.codes = polygon_df.codes.apply(lambda x: x.replace("_", " "))
print_rich_table(
polygon_df, show_index=False, headers=["", ""], title=f"{ticker.upper()} Codes"
)
def format_parse_choices(choices: List[str]) -> List[str]:
"""Formats a list of strings to be lowercase and replace spaces with underscores.
Parameters
----------
choices: List[str]
The options to be formatted
Returns
-------
clean_choices: List[str]
The cleaned options
"""
return [x.lower().replace(" ", "_") for x in choices]
def map_parse_choices(choices: List[str]) -> Dict[str, str]:
"""Creates a mapping of clean arguments (keys) to original arguments (values)
Parameters
----------
choices: List[str]
The options to be formatted
Returns
-------
clean_choices: Dict[str, str]
The mappung
"""
the_dict = {x.lower().replace(" ", "_"): x for x in choices}
the_dict[""] = ""
return the_dict | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/stocks_helper.py | 0.792906 | 0.174199 | stocks_helper.py | pypi |
__docformat__ = "numpy"
import argparse
import logging
import os
from datetime import datetime, timedelta
from typing import List
import financedatabase
import yfinance as yf
from openbb_terminal import feature_flags as obbff
from openbb_terminal.common import feedparser_view, newsapi_view
from openbb_terminal.common.quantitative_analysis import qa_view
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal.decorators import log_start_end
from openbb_terminal.stocks import cboe_view
from openbb_terminal.helper_funcs import (
EXPORT_ONLY_RAW_DATA_ALLOWED,
EXPORT_BOTH_RAW_DATA_AND_FIGURES,
export_data,
valid_date,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import StockBaseController
from openbb_terminal.rich_config import (
MenuText,
console,
translate,
)
from openbb_terminal.stocks import stocks_helper
from openbb_terminal.stocks import stocks_view
# pylint: disable=R1710,import-outside-toplevel,R0913,R1702,no-member
logger = logging.getLogger(__name__)
class StocksController(StockBaseController):
"""Stocks Controller class."""
CHOICES_COMMANDS = [
"search",
"load",
"quote",
"tob",
"candle",
"news",
"resources",
"codes",
]
CHOICES_MENUS = [
"ta",
"ba",
"qa",
"disc",
"dps",
"scr",
"sia",
"ins",
"gov",
"res",
"fa",
"bt",
"dd",
"ca",
"options",
"th",
"forecast",
]
PATH = "/stocks/"
FILE_PATH = os.path.join(os.path.dirname(__file__), "README.md")
try:
country = financedatabase.show_options("equities", "countries")
sector = financedatabase.show_options("equities", "sectors")
industry = financedatabase.show_options("equities", "industries")
except Exception:
country, sector, industry = {}, {}, {}
console.print(
"[red]Note: Some datasets from GitHub failed to load. This means that the `search` command and "
"the /stocks/sia menu will not work. If other commands are failing please check your internet connection or "
"communicate with your IT department that certain websites are blocked.[/red] \n"
)
TOB_EXCHANGES = ["BZX", "EDGX", "BYX", "EDGA"]
CHOICES_GENERATION = True
def __init__(self, queue: List[str] = None):
"""Construct stocks controller."""
super().__init__(queue)
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help."""
stock_text = ""
if self.ticker:
s_intraday = (f"Intraday {self.interval}", "Daily")[
self.interval == "1440min"
]
stock_text += f"{s_intraday} {self.ticker}"
if self.start:
stock_text += f" (from {self.start.strftime('%Y-%m-%d')})"
mt = MenuText("stocks/", 100)
mt.add_cmd("search")
mt.add_cmd("load")
mt.add_raw("\n")
mt.add_param("_ticker", stock_text)
mt.add_raw(self.add_info)
mt.add_raw("\n")
mt.add_cmd("quote", self.ticker)
mt.add_cmd("tob", self.ticker)
mt.add_cmd("candle", self.ticker)
mt.add_cmd("codes", self.ticker)
mt.add_cmd("news", self.ticker)
mt.add_raw("\n")
mt.add_menu("th")
mt.add_menu("options")
mt.add_menu("disc")
mt.add_menu("sia")
mt.add_menu("dps")
mt.add_menu("scr")
mt.add_menu("ins")
mt.add_menu("gov")
mt.add_menu("ba")
mt.add_menu("ca")
mt.add_menu("fa", self.ticker)
mt.add_menu("res", self.ticker)
mt.add_menu("dd", self.ticker)
mt.add_menu("bt", self.ticker)
mt.add_menu("ta", self.ticker)
mt.add_menu("qa", self.ticker)
mt.add_menu("forecast", self.ticker)
console.print(text=mt.menu_text, menu="Stocks")
def custom_reset(self):
"""Class specific component of reset command."""
if self.ticker:
return [
"stocks",
f"load {self.ticker}.{self.suffix}"
if self.suffix
else f"load {self.ticker}",
]
return []
@log_start_end(log=logger)
def call_search(self, other_args: List[str]):
"""Process search command."""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="search",
description="Show companies matching the search query",
)
parser.add_argument(
"-q",
"--query",
action="store",
dest="query",
type=str.lower,
default="",
nargs="+",
help="The search term used to find company tickers",
)
clean_countries = [x.lower().replace(" ", "_") for x in self.country]
parser.add_argument(
"-c",
"--country",
default="",
choices=clean_countries,
dest="country",
metavar="country",
type=str.lower,
help="Search by country to find stocks matching the criteria",
)
parser.add_argument(
"-s",
"--sector",
default="",
choices=stocks_helper.format_parse_choices(self.sector),
type=str.lower,
metavar="sector",
dest="sector",
help="Search by sector to find stocks matching the criteria",
)
parser.add_argument(
"-i",
"--industry",
default="",
choices=stocks_helper.format_parse_choices(self.industry),
type=str.lower,
metavar="industry",
dest="industry",
help="Search by industry to find stocks matching the criteria",
)
parser.add_argument(
"-e",
"--exchange",
default="",
choices=stocks_helper.format_parse_choices(
list(stocks_helper.market_coverage_suffix.keys())
),
type=str.lower,
metavar="exchange",
dest="exchange_country",
help="Search by a specific exchange country to find stocks matching the criteria",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-q")
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
EXPORT_ONLY_RAW_DATA_ALLOWED,
limit=10,
)
if ns_parser:
# Mapping
sector = stocks_helper.map_parse_choices(self.sector)[ns_parser.sector]
industry = stocks_helper.map_parse_choices(self.industry)[
ns_parser.industry
]
exchange = stocks_helper.map_parse_choices(
list(stocks_helper.market_coverage_suffix.keys())
)[ns_parser.exchange_country]
stocks_helper.search(
query=" ".join(ns_parser.query),
country=ns_parser.country,
sector=sector,
industry=industry,
exchange_country=exchange,
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_tob(self, other_args: List[str]):
"""Process quote command."""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="quote",
description="Get top of book for loaded ticker from selected exchange",
)
parser.add_argument(
"-t",
"--ticker",
action="store",
dest="s_ticker",
required="-h" not in other_args and not self.ticker,
help="Ticker to get data for",
)
parser.add_argument(
"-e",
"--exchange",
default="BZX",
choices=self.TOB_EXCHANGES,
type=str,
dest="exchange",
)
if not self.ticker:
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-t")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
ticker = ns_parser.s_ticker if ns_parser.s_ticker else self.ticker
cboe_view.display_top_of_book(ticker, ns_parser.exchange)
@log_start_end(log=logger)
def call_quote(self, other_args: List[str]):
"""Process quote command."""
ticker = self.ticker + "." + self.suffix if self.suffix else self.ticker
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="quote",
description="Current quote for stock ticker",
)
if self.ticker:
parser.add_argument(
"-t",
"--ticker",
action="store",
dest="s_ticker",
default=ticker,
help="Stock ticker",
)
else:
parser.add_argument(
"-t",
"--ticker",
action="store",
dest="s_ticker",
required="-h" not in other_args,
help=translate("stocks/QUOTE_ticker"),
)
# For the case where a user uses: 'quote BB'
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-t")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
stocks_view.display_quote(ns_parser.s_ticker)
@log_start_end(log=logger)
def call_codes(self, _):
"""Process codes command."""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="codes",
description="Show CIK, FIGI and SCI code from polygon for loaded ticker.",
)
ns_parser = self.parse_known_args_and_warn(parser, _)
if ns_parser:
if self.ticker:
stocks_helper.show_codes_polygon(self.ticker)
else:
console.print("No ticker loaded. First use `load {ticker}`\n")
@log_start_end(log=logger)
def call_candle(self, other_args: List[str]):
"""Process candle command."""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="candle",
description="Shows historic data for a stock",
)
parser.add_argument(
"-p",
"--plotly",
dest="plotly",
action="store_false",
default=True,
help="Flag to show interactive plotly chart",
)
parser.add_argument(
"--sort",
choices=stocks_helper.CANDLE_SORT,
default="",
type=str.lower,
dest="sort",
help="Choose a column to sort by. Only works when raw data is displayed.",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
default=False,
help=(
"Data is sorted in descending order by default. "
"Reverse flag will sort it in an ascending way. "
"Only works when raw data is displayed."
),
)
parser.add_argument(
"--raw",
action="store_true",
dest="raw",
default=False,
help="Shows raw data instead of chart.",
)
parser.add_argument(
"-t",
"--trend",
action="store_true",
default=False,
help="Flag to add high and low trends to candle",
dest="trendlines",
)
parser.add_argument(
"--ma",
dest="mov_avg",
type=str,
help=(
"Add moving average in number of days to plot and separate by a comma. "
"Value for ma (moving average) keyword needs to be greater than 1."
),
default=None,
)
parser.add_argument(
"--log",
help="Plot with y axis on log scale",
action="store_true",
default=False,
dest="logy",
)
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
EXPORT_BOTH_RAW_DATA_AND_FIGURES,
limit=20,
)
if ns_parser:
if self.ticker:
if ns_parser.raw:
qa_view.display_raw(
data=self.stock,
sortby=ns_parser.sort,
ascend=ns_parser.reverse,
limit=ns_parser.limit,
)
else:
data = stocks_helper.process_candle(self.stock)
mov_avgs = []
if ns_parser.mov_avg:
mov_list = (num for num in ns_parser.mov_avg.split(","))
for num in mov_list:
try:
num = int(num)
if num <= 1:
raise ValueError
mov_avgs.append(num)
except ValueError:
console.print(
f"[red]{num} is not a valid moving average, must be an integer greater than 1."
)
stocks_helper.display_candle(
symbol=self.ticker,
data=data,
use_matplotlib=ns_parser.plotly,
intraday=self.interval != "1440min",
add_trend=ns_parser.trendlines,
ma=mov_avgs,
yscale="log" if ns_parser.logy else "linear",
)
export_data(
ns_parser.export,
os.path.dirname(os.path.abspath(__file__)),
f"{self.ticker}",
self.stock,
)
else:
console.print("No ticker loaded. First use 'load <ticker>'")
@log_start_end(log=logger)
def call_news(self, other_args: List[str]):
"""Process news command."""
parser = argparse.ArgumentParser(
add_help=False,
prog="news",
description="latest news of the company",
)
parser.add_argument(
"-d",
"--date",
action="store",
dest="n_start_date",
type=valid_date,
default=datetime.now() - timedelta(days=7),
help="The starting date (format YYYY-MM-DD) to search articles from",
)
parser.add_argument(
"-o",
"--oldest",
action="store_false",
dest="n_oldest",
default=True,
help="Show oldest articles first",
)
parser.add_argument(
"-s",
"--sources",
dest="sources",
type=str,
default="",
help="Show news only from the sources specified (e.g bloomberg,reuters)",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED, limit=3
)
if ns_parser:
if self.ticker:
if ns_parser.source == "NewsApi":
d_stock = yf.Ticker(self.ticker).info
newsapi_view.display_news(
query=d_stock["shortName"].replace(" ", "+")
if "shortName" in d_stock
else self.ticker,
limit=ns_parser.limit,
start_date=ns_parser.n_start_date.strftime("%Y-%m-%d"),
show_newest=ns_parser.n_oldest,
sources=ns_parser.sources,
)
elif ns_parser.source == "Feedparser":
d_stock = yf.Ticker(self.ticker).info
feedparser_view.display_news(
term=d_stock["shortName"].replace(" ", "+")
if "shortName" in d_stock
else self.ticker,
sources=ns_parser.sources,
limit=ns_parser.limit,
export=ns_parser.export,
)
else:
console.print("Use 'load <ticker>' prior to this command!")
@log_start_end(log=logger)
def call_disc(self, _):
"""Process disc command."""
from openbb_terminal.stocks.discovery.disc_controller import DiscoveryController
self.queue = self.load_class(DiscoveryController, self.queue)
@log_start_end(log=logger)
def call_dps(self, _):
"""Process dps command."""
from openbb_terminal.stocks.dark_pool_shorts.dps_controller import (
DarkPoolShortsController,
)
self.queue = self.load_class(
DarkPoolShortsController, self.ticker, self.start, self.stock, self.queue
)
@log_start_end(log=logger)
def call_scr(self, _):
"""Process scr command."""
from openbb_terminal.stocks.screener.screener_controller import (
ScreenerController,
)
self.queue = self.load_class(ScreenerController, self.queue)
@log_start_end(log=logger)
def call_sia(self, _):
"""Process ins command."""
from openbb_terminal.stocks.sector_industry_analysis.sia_controller import (
SectorIndustryAnalysisController,
)
self.queue = self.load_class(
SectorIndustryAnalysisController, self.ticker, self.queue
)
@log_start_end(log=logger)
def call_ins(self, _):
"""Process ins command."""
from openbb_terminal.stocks.insider.insider_controller import InsiderController
self.queue = self.load_class(
InsiderController,
self.ticker,
self.start,
self.interval,
self.stock,
self.queue,
)
@log_start_end(log=logger)
def call_gov(self, _):
"""Process gov command."""
from openbb_terminal.stocks.government.gov_controller import GovController
self.queue = self.load_class(GovController, self.ticker, self.queue)
@log_start_end(log=logger)
def call_options(self, _):
"""Process options command."""
from openbb_terminal.stocks.options.options_controller import OptionsController
self.queue = self.load_class(OptionsController, self.ticker, self.queue)
@log_start_end(log=logger)
def call_th(self, _):
"""Process th command."""
from openbb_terminal.stocks.tradinghours import tradinghours_controller
self.queue = self.load_class(
tradinghours_controller.TradingHoursController,
self.ticker,
self.queue,
)
@log_start_end(log=logger)
def call_res(self, _):
"""Process res command."""
if self.ticker:
from openbb_terminal.stocks.research.res_controller import (
ResearchController,
)
self.queue = self.load_class(
ResearchController, self.ticker, self.start, self.interval, self.queue
)
else:
console.print("Use 'load <ticker>' prior to this command!")
@log_start_end(log=logger)
def call_dd(self, _):
"""Process dd command."""
if self.ticker:
from openbb_terminal.stocks.due_diligence import dd_controller
self.queue = self.load_class(
dd_controller.DueDiligenceController,
self.ticker,
self.start,
self.interval,
self.stock,
self.queue,
)
else:
console.print("Use 'load <ticker>' prior to this command!")
@log_start_end(log=logger)
def call_ca(self, _):
"""Process ca command."""
from openbb_terminal.stocks.comparison_analysis import ca_controller
self.queue = self.load_class(
ca_controller.ComparisonAnalysisController,
[f"{self.ticker}.{self.suffix}" if self.suffix else self.ticker]
if self.ticker
else "",
self.queue,
)
@log_start_end(log=logger)
def call_fa(self, _):
"""Process fa command."""
if self.ticker:
from openbb_terminal.stocks.fundamental_analysis import fa_controller
self.queue = self.load_class(
fa_controller.FundamentalAnalysisController,
self.ticker,
self.start,
self.interval,
self.suffix,
self.queue,
)
else:
console.print("Use 'load <ticker>' prior to this command!")
@log_start_end(log=logger)
def call_bt(self, _):
"""Process bt command."""
if self.ticker:
from openbb_terminal.stocks.backtesting import bt_controller
self.queue = self.load_class(
bt_controller.BacktestingController, self.ticker, self.stock, self.queue
)
else:
console.print("Use 'load <ticker>' prior to this command!")
@log_start_end(log=logger)
def call_ta(self, _):
"""Process ta command."""
if self.ticker:
from openbb_terminal.stocks.technical_analysis import ta_controller
self.queue = self.load_class(
ta_controller.TechnicalAnalysisController,
self.ticker,
self.start,
self.interval,
self.stock,
self.queue,
)
else:
console.print("Use 'load <ticker>' prior to this command!")
@log_start_end(log=logger)
def call_ba(self, _):
"""Process ba command."""
from openbb_terminal.stocks.behavioural_analysis import ba_controller
self.queue = self.load_class(
ba_controller.BehaviouralAnalysisController,
self.ticker,
self.start,
self.queue,
)
@log_start_end(log=logger)
def call_qa(self, _):
"""Process qa command."""
if self.ticker:
from openbb_terminal.stocks.quantitative_analysis import qa_controller
self.queue = self.load_class(
qa_controller.QaController,
self.ticker,
self.start,
self.interval,
self.stock,
self.queue,
)
else:
console.print("Use 'load <ticker>' prior to this command!")
@log_start_end(log=logger)
def call_forecast(self, _):
"""Process forecast command."""
from openbb_terminal.forecast import forecast_controller
self.queue = self.load_class(
forecast_controller.ForecastController,
self.ticker,
self.stock,
self.queue,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/stocks_controller.py | 0.583322 | 0.163012 | stocks_controller.py | pypi |
import os
from datetime import datetime
import logging
import pyEX
import requests
import pandas as pd
import yfinance as yf
from alpha_vantage.timeseries import TimeSeries
from openbb_terminal.decorators import check_api_key
from openbb_terminal.rich_config import console
from openbb_terminal import config_terminal as cfg
# pylint: disable=unsupported-assignment-operation,no-member
logger = logging.getLogger(__name__)
def load_stock_av(
symbol: str, start_date: datetime, end_date: datetime
) -> pd.DataFrame:
try:
ts = TimeSeries(key=cfg.API_KEY_ALPHAVANTAGE, output_format="pandas")
df_stock_candidate: pd.DataFrame = ts.get_daily_adjusted(
symbol=symbol, outputsize="full"
)[0]
except Exception as e:
console.print(e)
return pd.DataFrame()
df_stock_candidate.columns = [
val.split(". ")[1].capitalize() for val in df_stock_candidate.columns
]
df_stock_candidate = df_stock_candidate.rename(
columns={"Adjusted close": "Adj Close"}
)
# Check that loading a stock was not successful
if df_stock_candidate.empty:
console.print("No data found.")
return pd.DataFrame()
df_stock_candidate.index = df_stock_candidate.index.tz_localize(None)
df_stock_candidate.sort_index(ascending=True, inplace=True)
# Slice dataframe from the starting date YYYY-MM-DD selected
df_stock_candidate = df_stock_candidate[
(df_stock_candidate.index >= start_date.strftime("%Y-%m-%d"))
& (df_stock_candidate.index <= end_date.strftime("%Y-%m-%d"))
]
return df_stock_candidate
def load_stock_yf(
symbol: str, start_date: datetime, end_date: datetime, weekly: bool, monthly: bool
) -> pd.DataFrame:
# TODO: Better handling of interval with week/month
int_ = "1d"
int_string = "Daily"
if weekly:
int_ = "1wk"
int_string = "Weekly"
if monthly:
int_ = "1mo"
int_string = "Monthly"
# Win10 version of mktime cannot cope with dates before 1970
if os.name == "nt" and start_date < datetime(1970, 1, 1):
start_date = datetime(
1970, 1, 2
) # 1 day buffer in case of timezone adjustments
# Adding a dropna for weekly and monthly because these include weird NaN columns.
df_stock_candidate = yf.download(
symbol, start=start_date, end=end_date, progress=False, interval=int_
).dropna(axis=0)
# Check that loading a stock was not successful
if df_stock_candidate.empty:
return pd.DataFrame()
df_stock_candidate.index.name = "date", int_string
return df_stock_candidate
def load_stock_eodhd(
symbol: str, start_date: datetime, end_date: datetime, weekly: bool, monthly: bool
) -> pd.DataFrame:
int_ = "d"
if weekly:
int_ = "w"
elif monthly:
int_ = "m"
request_url = (
f"https://eodhistoricaldata.com/api/eod/"
f"{symbol.upper()}?"
f"{start_date.strftime('%Y-%m-%d')}&"
f"to={end_date.strftime('%Y-%m-%d')}&"
f"period={int_}&"
f"api_token={cfg.API_EODHD_KEY}&"
f"fmt=json&"
f"order=d"
)
r = requests.get(request_url)
if r.status_code != 200:
console.print("[red]Invalid API Key for eodhistoricaldata [/red]")
console.print(
"Get your Key here: https://eodhistoricaldata.com/r/?ref=869U7F4J"
)
return pd.DataFrame()
r_json = r.json()
df_stock_candidate = pd.DataFrame(r_json).dropna(axis=0)
# Check that loading a stock was not successful
if df_stock_candidate.empty:
console.print("No data found from End Of Day Historical Data.")
return df_stock_candidate
df_stock_candidate = df_stock_candidate[
["date", "open", "high", "low", "close", "adjusted_close", "volume"]
]
df_stock_candidate = df_stock_candidate.rename(
columns={
"date": "Date",
"close": "Close",
"high": "High",
"low": "Low",
"open": "Open",
"adjusted_close": "Adj Close",
"volume": "Volume",
}
)
df_stock_candidate["Date"] = pd.to_datetime(df_stock_candidate.Date)
df_stock_candidate.set_index("Date", inplace=True)
df_stock_candidate.sort_index(ascending=True, inplace=True)
return df_stock_candidate
@check_api_key(["API_IEX_TOKEN"])
def load_stock_iex_cloud(symbol: str, iexrange: str) -> pd.DataFrame:
df_stock_candidate = pd.DataFrame()
try:
client = pyEX.Client(api_token=cfg.API_IEX_TOKEN, version="v1")
df_stock_candidate = client.chartDF(symbol, timeframe=iexrange)
# Check that loading a stock was not successful
if df_stock_candidate.empty:
console.print("No data found.")
return df_stock_candidate
except Exception as e:
if "The API key provided is not valid" in str(e):
console.print("[red]Invalid API Key[/red]")
else:
console.print(e)
return df_stock_candidate
df_stock_candidate = df_stock_candidate[
["close", "fHigh", "fLow", "fOpen", "fClose", "volume"]
]
df_stock_candidate = df_stock_candidate.rename(
columns={
"close": "Close",
"fHigh": "High",
"fLow": "Low",
"fOpen": "Open",
"fClose": "Adj Close",
"volume": "Volume",
}
)
df_stock_candidate.sort_index(ascending=True, inplace=True)
return df_stock_candidate
@check_api_key(["API_POLYGON_KEY"])
def load_stock_polygon(
symbol: str, start_date: datetime, end_date: datetime, weekly: bool, monthly: bool
) -> pd.DataFrame:
# Polygon allows: day, minute, hour, day, week, month, quarter, year
timespan = "day"
if weekly or monthly:
timespan = "week" if weekly else "month"
request_url = (
f"https://api.polygon.io/v2/aggs/ticker/"
f"{symbol.upper()}/range/1/{timespan}/"
f"{start_date.strftime('%Y-%m-%d')}/{end_date.strftime('%Y-%m-%d')}?adjusted=true"
f"&sort=desc&limit=49999&apiKey={cfg.API_POLYGON_KEY}"
)
r = requests.get(request_url)
if r.status_code != 200:
console.print("[red]Error in polygon request[/red]")
return pd.DataFrame()
r_json = r.json()
if "results" not in r_json.keys():
console.print("[red]No results found in polygon reply.[/red]")
return pd.DataFrame()
df_stock_candidate = pd.DataFrame(r_json["results"])
df_stock_candidate = df_stock_candidate.rename(
columns={
"o": "Open",
"c": "Adj Close",
"h": "High",
"l": "Low",
"t": "date",
"v": "Volume",
"n": "Transactions",
}
)
df_stock_candidate["date"] = pd.to_datetime(df_stock_candidate.date, unit="ms")
# TODO: Clean up Close vs Adj Close throughout
df_stock_candidate["Close"] = df_stock_candidate["Adj Close"]
df_stock_candidate = df_stock_candidate.sort_values(by="date")
df_stock_candidate = df_stock_candidate.set_index("date")
return df_stock_candidate
def load_quote(symbol: str) -> pd.DataFrame:
"""Ticker quote. [Source: YahooFinance]
Parameters
----------
symbol : str
Ticker
"""
ticker = yf.Ticker(symbol)
try:
quote_df = pd.DataFrame(
[
{
"Symbol": ticker.info["symbol"],
"Name": ticker.info["shortName"],
"Price": ticker.info["regularMarketPrice"],
"Open": ticker.info["regularMarketOpen"],
"High": ticker.info["dayHigh"],
"Low": ticker.info["dayLow"],
"Previous Close": ticker.info["previousClose"],
"Volume": ticker.info["volume"],
"52 Week High": ticker.info["fiftyTwoWeekHigh"],
"52 Week Low": ticker.info["fiftyTwoWeekLow"],
}
]
)
quote_df["Change"] = quote_df["Price"] - quote_df["Previous Close"]
quote_df["Change %"] = quote_df.apply(
lambda x: f'{((x["Change"] / x["Previous Close"]) * 100):.2f}%',
axis="columns",
)
for c in [
"Price",
"Open",
"High",
"Low",
"Previous Close",
"52 Week High",
"52 Week Low",
"Change",
]:
quote_df[c] = quote_df[c].apply(lambda x: f"{x:.2f}")
quote_df["Volume"] = quote_df["Volume"].apply(lambda x: f"{x:,}")
quote_df = quote_df.set_index("Symbol")
quote_data = quote_df.T
return quote_data
except KeyError:
logger.exception("Invalid stock ticker")
console.print(f"Invalid stock ticker: {symbol}")
return pd.DataFrame() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/stocks_model.py | 0.446495 | 0.218503 | stocks_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import List
import pandas as pd
from finvizfinance.screener import (
financial,
overview,
ownership,
performance,
technical,
valuation,
)
from finvizfinance.screener.overview import Overview
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_similar_companies(symbol: str, compare_list: List[str] = None) -> List[str]:
"""Get similar companies from Finviz.
Parameters
----------
symbol : str
Ticker to find comparisons for
compare_list : List[str]
List of fields to compare, ["Sector", "Industry", "Country"]
Returns
-------
List[str]
List of similar companies
"""
try:
compare_list = ["Sector", "Industry"] if compare_list is None else compare_list
similar = (
Overview().compare(symbol, compare_list, verbose=0)["Ticker"].to_list()
)
except Exception as e:
logger.exception(str(e))
console.print(e)
similar = [""]
return similar
@log_start_end(log=logger)
def get_comparison_data(similar: List[str], data_type: str = "overview"):
"""Screener Overview.
Parameters
----------
similar:
List of similar companies.
Comparable companies can be accessed through
finnhub_peers(), finviz_peers(), polygon_peers().
data_type : str
Data type between: overview, valuation, financial, ownership, performance, technical
Returns
-------
pd.DataFrame
Dataframe with overview, valuation, financial, ownership, performance or technical
"""
if data_type == "overview":
screen = overview.Overview()
elif data_type == "valuation":
screen = valuation.Valuation()
elif data_type == "financial":
screen = financial.Financial()
elif data_type == "ownership":
screen = ownership.Ownership()
elif data_type == "performance":
screen = performance.Performance()
elif data_type == "technical":
screen = technical.Technical()
else:
console.print("Invalid selected screener type")
return pd.DataFrame()
screen.set_filter(ticker=",".join(similar))
try:
return screen.screener_view(verbose=0)
except IndexError:
console.print("[red]Invalid data from website[red]\n")
return pd.DataFrame()
except AttributeError:
console.print("[red]Invalid data from website[red]\n")
return pd.DataFrame() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/comparison_analysis/finviz_compare_model.py | 0.748812 | 0.29175 | finviz_compare_model.py | pypi |
__docformat__ = "numpy"
import logging
from datetime import datetime, timedelta
from typing import List, Optional, Tuple
import warnings
import numpy as np
import pandas as pd
import yfinance as yf
from sklearn.manifold import TSNE
from sklearn.preprocessing import normalize
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
d_candle_types = {
"o": "Open",
"h": "High",
"l": "Low",
"c": "Close",
"a": "Adj Close",
"v": "Volume",
}
@log_start_end(log=logger)
def get_historical(
similar: List[str],
start_date: Optional[str] = None,
candle_type: str = "a",
) -> pd.DataFrame:
"""Get historical prices for all comparison stocks
Parameters
----------
similar: List[str]
List of similar tickers.
Comparable companies can be accessed through
finnhub_peers(), finviz_peers(), polygon_peers().
start_date: Optional[str], optional
Initial date (e.g., 2021-10-01). Defaults to 1 year back
candle_type: str, optional
Candle variable to compare, by default "a" for Adjusted Close. Possible values are: o, h, l, c, a, v, r
Returns
-------
pd.DataFrame
Dataframe containing candle type variable for each ticker
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d")
candle_type = candle_type.lower()
use_returns = False
if candle_type == "r":
# Calculate returns based off of adjusted close
use_returns = True
candle_type = "a"
# To avoid having to recursively append, just do a single yfinance call. This will give dataframe
# where all tickers are columns.
similar_tickers_dataframe = yf.download(
similar, start=start_date, progress=False, threads=False
)[d_candle_types[candle_type]]
returnable = (
similar_tickers_dataframe
if similar_tickers_dataframe.empty
else similar_tickers_dataframe[similar]
)
if use_returns:
# To calculate the period to period return,
# shift the dataframe by one row, then divide it into
# the other, then subtract 1 to get a percentage, which is the return.
shifted = returnable.shift(1)[1:]
returnable = returnable.div(shifted) - 1
df_similar = returnable[similar]
if np.any(df_similar.isna()):
nan_tickers = df_similar.columns[df_similar.isna().sum() >= 1].to_list()
console.print(
f"NaN values found in: {', '.join(nan_tickers)}. Backfilling data"
)
df_similar = df_similar.fillna(method="bfill")
df_similar = df_similar.dropna(axis=1, how="all")
return df_similar
@log_start_end(log=logger)
def get_correlation(
similar: List[str],
start_date: Optional[str] = None,
candle_type: str = "a",
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Get historical price correlation. [Source: Yahoo Finance]
Parameters
----------
similar : List[str]
List of similar tickers.
Comparable companies can be accessed through
finnhub_peers(), finviz_peers(), polygon_peers().
start_date : Optional[str], optional
Initial date (e.g., 2021-10-01). Defaults to 1 year back
candle_type : str, optional
OHLCA column to use for candles or R for returns, by default "a" for Adjusted Close
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame]
Dataframe with correlation matrix, Dataframe with historical prices for all comparison stocks
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d")
df_similar = get_historical(similar, start_date, candle_type)
correlations = df_similar.corr()
return correlations, df_similar
@log_start_end(log=logger)
def get_volume(
similar: List[str],
start_date: Optional[str] = None,
) -> pd.DataFrame:
"""Get stock volume. [Source: Yahoo Finance]
Parameters
----------
similar : List[str]
List of similar tickers.
Comparable companies can be accessed through
finnhub_peers(), finviz_peers(), polygon_peers().
start_date : Optional[str], optional
Initial date (e.g., 2021-10-01). Defaults to 1 year back
Returns
-------
pd.DataFrame
Dataframe with volume for stock
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d")
df_similar = get_historical(similar, start_date, "v")
df_similar = df_similar[df_similar.columns]
return df_similar
@log_start_end(log=logger)
def get_1y_sp500() -> pd.DataFrame:
"""
Gets the last year of Adj Close prices for all current SP 500 stocks.
They are scraped daily using yfinance at https://github.com/jmaslek/daily_sp_500
Returns
-------
pd.DataFrame
DataFrame containing last 1 year of closes for all SP500 stocks.
"""
return pd.read_csv(
"https://raw.githubusercontent.com/jmaslek/daily_sp_500/main/SP500_prices_1yr.csv",
index_col=0,
)
# pylint:disable=E1137,E1101
@log_start_end(log=logger)
def get_sp500_comps_tsne(
symbol: str,
lr: int = 200,
) -> pd.DataFrame:
"""
Runs TSNE on SP500 tickers (along with ticker if not in SP500).
TSNE is a method of visualing higher dimensional data
https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html
Note that the TSNE numbers are meaningless and will be arbitrary if run again.
Parameters
----------
symbol: str
Ticker to get comparisons to
lr: int
Learning rate for TSNE
Returns
-------
pd.DataFrame
Dataframe of tickers closest to selected ticker
"""
# Adding the type makes pylint stop yelling
close_vals: pd.DataFrame = get_1y_sp500()
if symbol not in close_vals.columns:
df_symbol = yf.download(symbol, start=close_vals.index[0], progress=False)[
"Adj Close"
].to_frame()
df_symbol.columns = [symbol]
df_symbol.index = df_symbol.index.astype(str)
close_vals = close_vals.join(df_symbol)
close_vals = close_vals.fillna(method="bfill")
rets = close_vals.pct_change()[1:].T
# Future warning from sklearn. Think 1.2 will stop printing it
warnings.filterwarnings("ignore", category=FutureWarning)
model = TSNE(learning_rate=lr, init="pca")
tsne_features = model.fit_transform(normalize(rets))
warnings.resetwarnings()
xs = tsne_features[:, 0]
ys = tsne_features[:, 1]
data = pd.DataFrame({"X": xs, "Y": ys}, index=rets.index)
x0, y0 = data.loc[symbol]
data["dist"] = (data.X - x0) ** 2 + (data.Y - y0) ** 2
data = data.sort_values(by="dist")
return data | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/comparison_analysis/yahoo_finance_model.py | 0.886605 | 0.428951 | yahoo_finance_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import List, Tuple
import pandas as pd
import requests
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def find_smallest_num_data_point(results_list: List[dict]) -> int:
"""Helper function to find the number for the smallest total number of data points
out of all the tickers. This is necessary because if one ticker has more or less
data points than another then it will throw an indexing error. The solution is to
have each ticker have the same number of data points as to graph and view properly.
We chose to set each ticker to the minimum of number data points out of all the
tickers.
Parameters
----------
results_list : List[json]
List of dicts storing ticker data
Returns
-------
int
Value of smallest total number of sentiment data points
"""
small_list = list()
for result_json in results_list:
if (
"ticker" in result_json
and "sentimentAnalysis" in result_json
and len(result_json["sentimentAnalysis"].values()) > 0
):
small_list.append(len(result_json["sentimentAnalysis"].values()))
return min(small_list)
@log_start_end(log=logger)
def get_sentiments(symbols: List[str]) -> pd.DataFrame:
"""Gets Sentiment analysis from several symbols provided by FinBrain's API.
Parameters
----------
symbols : List[str]
List of tickers to get sentiment.
Comparable companies can be accessed through
finnhub_peers(), finviz_peers(), polygon_peers().
Returns
-------
pd.DataFrame
Contains sentiment analysis from several tickers
"""
df_sentiment = pd.DataFrame()
dates_sentiment = []
symbols_to_remove = list()
results_list = list()
for ticker in symbols:
result = requests.get(f"https://api.finbrain.tech/v0/sentiments/{ticker}")
# Check status code, if its correct then convert to dict using .json()
if result.status_code == 200:
result_json = result.json()
results_list.append(result_json)
else:
console.print(
f"Request error in retrieving {ticker} sentiment from FinBrain API"
)
symbols_to_remove.append(ticker)
# Finds the smallest amount of data points from any of the tickers as to not run
# into an indexing error when graphing
smallest_num_data_point = find_smallest_num_data_point(results_list)
num = 0
for result_json in results_list:
ticker = symbols[num]
# Checks to see if sentiment data in results_json
if (
"ticker" in result_json
and "sentimentAnalysis" in result_json
and len(result_json["sentimentAnalysis"].values()) > 0
):
# Storing sentiments and dates in list
sentiments = list(result_json["sentimentAnalysis"].values())
dates_sentiment = list(result_json["sentimentAnalysis"].keys())
# If there are more sentiment data points for one ticker compared to the
# smallest amount of data points, then remove that data points from that
# ticker as to be able to graph properly
if len(sentiments) > smallest_num_data_point:
sentiments = sentiments[0:smallest_num_data_point]
dates_sentiment = dates_sentiment[0:smallest_num_data_point]
df_sentiment[ticker] = [float(val) for val in sentiments]
# If sentiment data not in results_json remove it
else:
console.print(
f"Unexpected data format or no data from FinBrain API for {ticker}"
)
symbols_to_remove.append(ticker)
num = num + 1
for ticker in symbols_to_remove:
symbols.remove(ticker)
if not df_sentiment.empty:
df_sentiment.index = dates_sentiment
df_sentiment.sort_index(ascending=True, inplace=True)
return df_sentiment
@log_start_end(log=logger)
def get_sentiment_correlation(
similar: List[str],
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Get correlation sentiments across similar companies. [Source: FinBrain].
Parameters
----------
similar : List[str]
Similar companies to compare income with.
Comparable companies can be accessed through
finnhub_peers(), finviz_peers(), polygon_peers().
Returns
-------
Tuple[pd.DataFrame,pd.DataFrame]
Contains sentiment analysis from several tickers
"""
df_sentiment = get_sentiments(similar)
corrs = df_sentiment.corr()
return corrs, df_sentiment | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/comparison_analysis/finbrain_model.py | 0.851552 | 0.563198 | finbrain_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from datetime import datetime, timedelta
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from pandas.plotting import register_matplotlib_converters
from sklearn.preprocessing import MinMaxScaler
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
is_valid_axes_count,
print_rich_table,
)
from openbb_terminal.stocks.comparison_analysis import yahoo_finance_model
logger = logging.getLogger(__name__)
register_matplotlib_converters()
d_candle_types = {
"o": "Open",
"h": "High",
"l": "Low",
"c": "Close",
"a": "Adj Close",
"v": "Volume",
"r": "Returns",
}
@log_start_end(log=logger)
def display_historical(
similar: List[str],
start_date: Optional[str] = None,
candle_type: str = "a",
normalize: bool = True,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display historical stock prices. [Source: Yahoo Finance]
Parameters
----------
similar: List[str]
List of similar tickers.
Comparable companies can be accessed through
finnhub_peers(), finviz_peers(), polygon_peers().
start_date: Optional[str], optional
Initial date (e.g., 2021-10-01). Defaults to 1 year back
candle_type: str, optional
OHLCA column to use or R to use daily returns calculated from Adjusted Close, by default "a" for Adjusted Close
normalize: bool, optional
Boolean to normalize all stock prices using MinMax defaults True
export: str, optional
Format to export historical prices, by default ""
external_axes: Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_similar = yahoo_finance_model.get_historical(similar, start_date, candle_type)
# This puts everything on 0-1 scale for visualizing
if normalize:
mm_scale = MinMaxScaler()
df_similar = pd.DataFrame(
mm_scale.fit_transform(df_similar),
columns=df_similar.columns,
index=df_similar.index,
)
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
companies_names = df_similar.columns.to_list()
ax.plot(df_similar, label=companies_names)
ax.set_title("Historical price of similar companies")
ax.set_ylabel(f"{['','Normalized'][normalize]} Share Price {['($)',''][normalize]}")
# ensures that the historical data starts from same datapoint
ax.set_xlim([df_similar.index[0], df_similar.index[-1]])
ax.legend()
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "historical", df_similar
)
@log_start_end(log=logger)
def display_volume(
similar: List[str],
start_date: Optional[str] = None,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display stock volume. [Source: Yahoo Finance]
Parameters
----------
similar : List[str]
List of similar tickers.
Comparable companies can be accessed through
finnhub_peers(), finviz_peers(), polygon_peers().
start_date : Optional[str], optional
Initial date (e.g., 2021-10-01). Defaults to 1 year back
export : str, optional
Format to export historical prices, by default ""
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_similar = yahoo_finance_model.get_volume(similar, start_date)
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
df_similar = df_similar.div(1_000_000)
companies_names = df_similar.columns.to_list()
ax.plot(df_similar, label=companies_names)
ax.set_title("Historical volume of similar companies")
ax.set_ylabel("Volume [M]")
# ensures that the historical data starts from same datapoint
ax.set_xlim([df_similar.index[0], df_similar.index[-1]])
ax.legend()
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "volume", df_similar
)
@log_start_end(log=logger)
def display_correlation(
similar: List[str],
start_date: Optional[str] = None,
candle_type: str = "a",
display_full_matrix: bool = False,
raw: bool = False,
external_axes: Optional[List[plt.Axes]] = None,
export: str = "",
):
"""
Correlation heatmap based on historical price comparison
between similar companies. [Source: Yahoo Finance]
Parameters
----------
similar : List[str]
List of similar tickers.
Comparable companies can be accessed through
finnhub_peers(), finviz_peers(), polygon_peers().
start_date : Optional[str], optional
Initial date (e.g., 2021-10-01). Defaults to 1 year back
candle_type : str, optional
OHLCA column to use for candles or R for returns, by default "a" for Adjusted Close
display_full_matrix : bool, optional
Optionally display all values in the matrix, rather than masking off half, by default False
raw: bool, optional
Whether to display raw data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
export : str, optional
Format to export correlation prices, by default ""
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d")
correlations, df_similar = yahoo_finance_model.get_correlation(
similar, start_date, candle_type
)
mask = None
if not display_full_matrix:
mask = np.zeros((df_similar.shape[1], df_similar.shape[1]), dtype=bool)
mask[np.triu_indices(len(mask))] = True
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
if raw:
print_rich_table(
correlations,
headers=[x.title().upper() for x in correlations.columns],
show_index=True,
)
sns.heatmap(
correlations,
cbar_kws={"ticks": [-1.0, -0.5, 0.0, 0.5, 1.0]},
cmap="RdYlGn",
linewidths=1,
annot=True,
annot_kws={"fontsize": 10},
vmin=-1,
vmax=1,
mask=mask,
ax=ax,
)
ax.set_title(f"Correlation Heatmap of similar companies from {start_date}")
if not external_axes:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "hcorr", df_similar)
@log_start_end(log=logger)
def display_sp500_comps_tsne(
symbol: str,
lr: int = 200,
no_plot: bool = False,
limit: int = 10,
external_axes: Optional[List[plt.Axes]] = None,
) -> List[str]:
"""Runs TSNE on SP500 tickers (along with ticker if not in SP500).
TSNE is a method of visualing higher dimensional data
https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html
Note that the TSNE numbers are meaningless and will be arbitrary if run again.
Parameters
----------
symbol: str
Ticker to get comparisons to
lr: int
Learning rate for TSNE
no_plot: bool
Flag to hold off on plotting
limit: int
Number of tickers to return
external_axes : Optional[List[plt.Axes]]
External axes (1 axis is expected in the list), by default None
Returns
-------
List[str]
List of the 10 closest stocks due to TSNE
"""
data = yahoo_finance_model.get_sp500_comps_tsne(symbol=symbol, lr=lr)
top_n = data.iloc[1 : (limit + 1)]
top_n_name = top_n.index.to_list()
if not no_plot:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return []
top_100 = data[(limit + 1) : 101]
symbol_df = data[data.index == symbol]
ax.scatter(
top_n.X,
top_n.Y,
alpha=0.8,
c=theme.up_color,
label=f"Top {limit} closest tickers",
)
ax.scatter(
top_100.X, top_100.Y, alpha=0.5, c="grey", label="Top 100 closest tickers"
)
for x, y, company in zip(top_n.X, top_n.Y, top_n.index):
ax.annotate(company, (x, y), fontsize=9, alpha=0.9)
for x, y, company in zip(top_100.X, top_100.Y, top_100.index):
ax.annotate(company, (x, y), fontsize=9, alpha=0.75)
ax.scatter(
symbol_df.X,
symbol_df.Y,
s=50,
c=theme.down_color,
)
ax.annotate(symbol, (symbol_df.X, symbol_df.Y), fontsize=9, alpha=1)
ax.legend()
ax.set_title(
f"Top 100 closest stocks on S&P500 to {symbol} using TSNE algorithm",
fontsize=11,
)
ax.set_xlabel("Dimension 1")
ax.set_ylabel("Dimension 2")
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
return top_n_name | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/comparison_analysis/yahoo_finance_view.py | 0.863046 | 0.429429 | yahoo_finance_view.py | pypi |
__docformat__ = "numpy"
from datetime import datetime
import logging
import os
from typing import List
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
lambda_financials_colored_values,
patch_pandas_text_adjustment,
print_rich_table,
)
from openbb_terminal.stocks.comparison_analysis import marketwatch_model
from openbb_terminal import rich_config
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_income_comparison(
symbols: List[str],
timeframe: str = str(datetime.now().year - 1),
quarter: bool = False,
export: str = "",
):
"""Display income data. [Source: Marketwatch].
Parameters
----------
symbols : List[str]
List of tickers to compare. Enter tickers you want to see as shown below:
["TSLA", "AAPL", "NFLX", "BBY"]
You can also get a list of comparable peers with
finnhub_peers(), finviz_peers(), polygon_peers().
timeframe : str
What year to look at
quarter : bool, optional
Whether to use quarterly statements, by default False
export : str, optional
Format to export data
"""
df_financials_compared = marketwatch_model.get_income_comparison(
symbols, timeframe, quarter
)
if len(df_financials_compared) == 0 or df_financials_compared.empty:
return
# Export data before the color
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"income",
df_financials_compared,
)
if rich_config.USE_COLOR:
df_financials_compared = df_financials_compared.applymap(
lambda_financials_colored_values
)
patch_pandas_text_adjustment()
if not quarter:
df_financials_compared.index.name = timeframe
print_rich_table(
df_financials_compared,
headers=list(df_financials_compared.columns),
show_index=True,
title="Income Data",
)
@log_start_end(log=logger)
def display_balance_comparison(
symbols: List[str],
timeframe: str = str(datetime.now().year - 1),
quarter: bool = False,
export: str = "",
):
"""Compare balance between companies. [Source: Marketwatch]
Parameters
----------
symbols : List[str]
List of tickers to compare. Enter tickers you want to see as shown below:
["TSLA", "AAPL", "NFLX", "BBY"]
You can also get a list of comparable peers with
finnhub_peers(), finviz_peers(), polygon_peers().
timeframe : str
What year to look at
quarter : bool, optional
Whether to use quarterly statements, by default False
export : str, optional
Format to export data
"""
df_financials_compared = marketwatch_model.get_financial_comparisons(
symbols, "balance", timeframe, quarter
)
if len(df_financials_compared) == 0 or df_financials_compared.empty:
return
# Export data before the color
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"balance",
df_financials_compared,
)
if rich_config.USE_COLOR:
df_financials_compared = df_financials_compared.applymap(
lambda_financials_colored_values
)
patch_pandas_text_adjustment()
if not quarter:
df_financials_compared.index.name = timeframe
print_rich_table(
df_financials_compared,
headers=list(df_financials_compared.columns),
show_index=True,
title="Company Comparison",
)
@log_start_end(log=logger)
def display_cashflow_comparison(
symbols: List[str],
timeframe: str = str(datetime.now().year - 1),
quarter: bool = False,
export: str = "",
):
"""Compare cashflow between companies. [Source: Marketwatch]
Parameters
----------
symbols : List[str]
List of tickers to compare. Enter tickers you want to see as shown below:
["TSLA", "AAPL", "NFLX", "BBY"]
You can also get a list of comparable peers with
finnhub_peers(), finviz_peers(), polygon_peers().
timeframe : str
What year/quarter to look at
quarter : bool, optional
Whether to use quarterly statements, by default False
export : str, optional
Format to export data
"""
df_financials_compared = marketwatch_model.get_financial_comparisons(
symbols, "cashflow", timeframe, quarter
)
if len(df_financials_compared) == 0 or df_financials_compared.empty:
return
# Export data before the color
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cashflow",
df_financials_compared,
)
if rich_config.USE_COLOR:
df_financials_compared = df_financials_compared.applymap(
lambda_financials_colored_values
)
patch_pandas_text_adjustment()
if not quarter:
df_financials_compared.index.name = timeframe
print_rich_table(
df_financials_compared,
headers=list(df_financials_compared.columns),
show_index=True,
title="Cashflow Comparison",
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/comparison_analysis/marketwatch_view.py | 0.836254 | 0.406273 | marketwatch_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.comparison_analysis import finbrain_model
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def display_sentiment_compare(
similar: List[str],
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display sentiment for all ticker. [Source: FinBrain].
Parameters
----------
similar : List[str]
Similar companies to compare income with.
Comparable companies can be accessed through
finviz_peers(), finnhub_peers() or polygon_peers().
raw : bool, optional
Output raw values, by default False
export : str, optional
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_sentiment = finbrain_model.get_sentiments(similar)
if df_sentiment.empty:
console.print("No sentiments found.")
else:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
for idx, tick in enumerate(similar):
offset = 2 * idx
ax.axhline(y=offset, color="white", linestyle="--", lw=1)
ax.axhline(y=offset + 1, color="white", linestyle="--", lw=1)
senValues = np.array(pd.to_numeric(df_sentiment[tick].values))
senNone = np.array(0 * len(df_sentiment))
ax.fill_between(
df_sentiment.index,
pd.to_numeric(df_sentiment[tick].values) + offset,
offset,
where=(senValues < senNone),
color=theme.down_color,
interpolate=True,
)
ax.fill_between(
df_sentiment.index,
pd.to_numeric(df_sentiment[tick].values) + offset,
offset,
where=(senValues >= senNone),
color=theme.up_color,
interpolate=True,
)
ax.set_ylabel("Sentiment")
ax.axhline(y=-1, color="white", linestyle="--", lw=1)
ax.set_yticks(np.arange(len(similar)) * 2)
ax.set_yticklabels(similar)
ax.set_xlim(df_sentiment.index[0], df_sentiment.index[-1])
ax.set_title(f"FinBrain's Sentiment Analysis since {df_sentiment.index[0]}")
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
if raw:
print_rich_table(
df_sentiment,
headers=list(df_sentiment.columns),
title="Ticker Sentiment",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"sentiment",
df_sentiment,
)
@log_start_end(log=logger)
def display_sentiment_correlation(
similar: List[str],
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot correlation sentiments heatmap across similar companies. [Source: FinBrain].
Parameters
----------
similar : List[str]
Similar companies to compare income with.
Comparable companies can be accessed through
finviz_peers(), finnhub_peers() or polygon_peers().
raw : bool, optional
Output raw values, by default False
export : str, optional
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
corrs, df_sentiment = finbrain_model.get_sentiment_correlation(similar)
if df_sentiment.empty:
console.print("No sentiments found.")
else:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
mask = np.zeros((len(similar), len(similar)), dtype=bool)
mask[np.triu_indices(len(mask))] = True
sns.heatmap(
corrs,
cbar_kws={"ticks": [-1.0, -0.5, 0.0, 0.5, 1.0]},
cmap="RdYlGn",
linewidths=1,
annot=True,
vmin=-1,
vmax=1,
mask=mask,
ax=ax,
)
similar_string = ",".join(similar)
ax.set_title(
f"Sentiment correlation heatmap across \n{similar_string}", fontsize=11
)
if not external_axes:
theme.visualize_output()
if raw:
print_rich_table(
corrs,
headers=list(corrs.columns),
show_index=True,
title="Correlation Sentiments",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"scorr",
corrs,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/comparison_analysis/finbrain_view.py | 0.855972 | 0.403684 | finbrain_view.py | pypi |
__docformat__ = "numpy"
from datetime import datetime
import logging
from typing import Dict, List, Tuple
import pandas as pd
import requests
from bs4 import BeautifulSoup
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_financial_comparisons(
symbols: List[str],
data: str = "income",
timeframe: str = str(datetime.now().year - 1),
quarter: bool = False,
) -> pd.DataFrame:
"""Get dataframe of income data from marketwatch.
Parameters
----------
symbols : List[str]
List of tickers to compare. Enter tickers you want to see as shown below:
["TSLA", "AAPL", "NFLX", "BBY"]
data : str
Data to get. Can be income, balance or cashflow
timeframe : str
What year/quarter to look at
quarter : bool
Flag to use quarterly data.
Returns
-------
pd.DataFrame
Dataframe of financial statements
Raises
------
ValueError
Timeframe not valid
"""
l_timeframes, ddf_financials = prepare_comparison_financials(symbols, data, quarter)
if timeframe:
if (timeframe == str(datetime.now().year - 1)) and quarter:
timeframe = l_timeframes[-1]
elif timeframe not in l_timeframes:
raise ValueError(
f"Timeframe selected should be one of {', '.join(l_timeframes)}"
)
s_timeframe = timeframe
else:
if len(l_timeframes) == 0:
return pd.DataFrame()
s_timeframe = l_timeframes[-1]
console.print(
f"Other available {('yearly', 'quarterly')[quarter]} timeframes are:"
f" {', '.join(l_timeframes)}\n"
)
return combine_similar_financials(ddf_financials, symbols, s_timeframe, quarter)
@log_start_end(log=logger)
def get_income_comparison(
similar: List[str],
timeframe: str = str(datetime.today().year - 1),
quarter: bool = False,
) -> pd.DataFrame:
"""Get income data. [Source: Marketwatch].
Parameters
----------
similar : List[str]
List of tickers to compare.
Comparable companies can be accessed through
finnhub_peers(), finviz_peers(), polygon_peers().
timeframe : str
Column header to compare
quarter : bool, optional
Whether to use quarterly statements, by default False
export : str, optional
Format to export data
Returns
-------
pd.DataFrame
Dataframe of income statements
"""
df_financials_compared = get_financial_comparisons(
similar, "income", timeframe, quarter
)
return df_financials_compared
@log_start_end(log=logger)
def get_balance_comparison(
similar: List[str],
timeframe: str = str(datetime.today().year - 1),
quarter: bool = False,
) -> pd.DataFrame:
"""Get balance data. [Source: Marketwatch].
Parameters
----------
similar : List[str]
List of tickers to compare.
Comparable companies can be accessed through
finnhub_peers(), finviz_peers(), polygon_peers().
timeframe : str
Column header to compare
quarter : bool, optional
Whether to use quarterly statements, by default False
export : str, optional
Format to export data
Returns
-------
pd.DataFrame
Dataframe of balance comparisons
"""
df_financials_compared = get_financial_comparisons(
similar, "balance", timeframe, quarter
)
return df_financials_compared
@log_start_end(log=logger)
def get_cashflow_comparison(
similar: List[str],
timeframe: str = str(datetime.today().year - 1),
quarter: bool = False,
) -> pd.DataFrame:
"""Get cashflow data. [Source: Marketwatch]
Parameters
----------
similar : List[str]
List of tickers to compare.
Comparable companies can be accessed through
finnhub_peers(), finviz_peers(), polygon_peers().
timeframe : str
Column header to compare
quarter : bool, optional
Whether to use quarterly statements, by default False
export : str, optional
Format to export data
Returns
-------
pd.DataFrame
Dataframe of cashflow comparisons
"""
df_financials_compared = get_financial_comparisons(
similar, "cashflow", timeframe, quarter
)
return df_financials_compared
@log_start_end(log=logger)
def prepare_df_financials(
ticker: str, statement: str, quarter: bool = False
) -> pd.DataFrame:
"""Builds a DataFrame with financial statements for a given company.
Parameters
----------
ticker: str
Company's stock ticker
statement: str
Financial statement to get. Can be income, balance or cashflow
quarter: bool, optional
Return quarterly financial statements instead of annual, by default False
Returns
-------
pd.DataFrame
A DataFrame with financial info
Raises
------
ValueError
If statement is not income, balance or cashflow
"""
financial_urls = {
"income": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/income/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/income",
},
"balance": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/balance-sheet/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/balance-sheet",
},
"cashflow": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/cash-flow/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/cash-flow",
},
}
if statement not in financial_urls:
raise ValueError(f"type {statement} is not in {financial_urls.keys()}")
try:
period = "quarter" if quarter else "annual"
text_soup_financials = BeautifulSoup(
requests.get(
financial_urls[statement][period].format(ticker),
headers={"User-Agent": get_user_agent()},
).text,
"lxml",
)
# Define financials columns
a_financials_header = [
financials_header.text.strip("\n").split("\n")[0]
for financials_header in text_soup_financials.findAll(
"th", {"class": "overflow__heading"}
)
]
s_header_end_trend = ("5-year trend", "5- qtr trend")[quarter]
if s_header_end_trend in a_financials_header:
df_financials = pd.DataFrame(
columns=a_financials_header[
0 : a_financials_header.index(s_header_end_trend)
]
)
else:
# We don't have the data we need for whatever reason, so return an empty DataFrame
return pd.DataFrame()
find_table = text_soup_financials.findAll(
"div", {"class": "element element--table table--fixed financials"}
)
if not find_table:
return df_financials
financials_rows = find_table[0].findAll(
"tr", {"class": ["table__row is-highlighted", "table__row"]}
)
for a_row in financials_rows:
constructed_row = []
financial_columns = a_row.findAll(
"td", {"class": ["overflow__cell", "overflow__cell fixed--column"]}
)
if not financial_columns:
continue
for a_column in financial_columns:
column_to_text = a_column.text.strip()
if "\n" in column_to_text:
column_to_text = column_to_text.split("\n")[0]
if column_to_text == "":
continue
constructed_row.append(column_to_text)
df_financials.loc[len(df_financials)] = constructed_row
except Exception:
df_financials = pd.DataFrame()
return df_financials
@log_start_end(log=logger)
def prepare_comparison_financials(
similar: List[str], statement: str, quarter: bool = False
) -> Tuple[List[str], Dict[str, pd.DataFrame]]:
"""Builds a dictionary of DataFrame with financial statements for list of tickers
Parameters
----------
similar : List[str]
List of similar stock tickers
statement : str
Financial statement to get. Can be income, balance or cashflow
quarter : bool
Return quarterly financial statements instead of annual, by default False
Returns
-------
Tuple[List[str], Dict[str, pd.DataFrame]]
List of index headers,
A dictionary of DataFrame with financial info from list of similar tickers
"""
financials = {}
for (
symbol
) in (
similar.copy()
): # We need a copy since we are modifying the original potentially
results = prepare_df_financials(symbol, statement, quarter)
if results.empty:
# If we have an empty result set, don't do further analysis on this symbol and remove it from consideration
console.print(
"Didn't get data for ticker "
+ symbol
+ ". Removing from further processing."
)
similar.remove(symbol)
continue
financials[symbol] = results.set_index("Item")
if quarter:
items = financials[similar[0]].columns
else:
items = []
# Get common headers between tickers
for symbol in similar:
if len(financials[symbol].columns) > len(items):
items = financials[symbol].columns
# Add columns with N/A when data is not available, to have similar columns
for symbol in similar:
if len(financials[symbol].columns) < len(items):
for item in items:
if item not in financials[symbol].columns:
financials[symbol][item] = "N/A"
financials[symbol] = financials[symbol].reindex(sorted(items), axis=1)
return list(items), financials
@log_start_end(log=logger)
def combine_similar_financials(
datasets: Dict[str, pd.DataFrame],
similar: List[str],
timeframe: str,
quarter: bool = False,
) -> pd.DataFrame:
"""Builds a DataFrame with financial statements from a certain timeframe of a list of tickers
Parameters
----------
datasets: Dict[str, pd.DataFrame]
A dictionary of DataFrame with financial info from list of similar tickers
similar: List[str]
List of similar stock tickers
timeframe: str
Column label, which is a timeframe
quarter: bool
False for yearly data, True for quarterly
Returns
-------
pd.DataFrame
A DataFrame with financial statements from a certain timeframe of a list of tickers
"""
# Quarter data is a bit more confusing for comparison due to the fact that
# the reports may occur at slight different moments. Hence we rely on the
# order set by the Market Watch website
if quarter:
compare_financials = datasets[similar[0]][timeframe].to_frame()
compare_financials.rename(columns={timeframe: similar[0]}, inplace=True)
earnings_dates = [timeframe]
idx = len(datasets[similar[0]].columns) - list(
datasets[similar[0]].columns
).index(timeframe)
for symbol in similar[1:]:
report_quarter_date = list(datasets[symbol].columns)[-idx]
earnings_dates.append(report_quarter_date)
compare_financials[symbol] = datasets[symbol][report_quarter_date]
compare_financials.columns = pd.MultiIndex.from_tuples(
zip(earnings_dates, compare_financials.columns),
)
else:
compare_financials = datasets[similar[0]][timeframe].to_frame()
compare_financials.rename(columns={timeframe: similar[0]}, inplace=True)
for symbol in similar[1:]:
compare_financials[symbol] = datasets[symbol][timeframe]
return compare_financials | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/comparison_analysis/marketwatch_model.py | 0.833934 | 0.419351 | marketwatch_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
from datetime import datetime, timedelta
import pandas as pd
from matplotlib import pyplot as plt
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
print_rich_table,
plot_autoscale,
is_valid_axes_count,
)
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.stocks import stocks_helper
from openbb_terminal.stocks.dark_pool_shorts import sec_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def fails_to_deliver(
symbol: str,
data: pd.DataFrame = None,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
limit: int = 0,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display fails-to-deliver data for a given ticker. [Source: SEC]
Parameters
----------
symbol: str
Stock ticker
data: pd.DataFrame
Stock data
start_date: Optional[str]
Start of data, in YYYY-MM-DD format
end_date: Optional[str]
End of data, in YYYY-MM-DD format
limit : int
Number of latest fails-to-deliver being printed
raw: bool
Print raw data
export: str
Export dataframe data to csv,json,xlsx file
external_axes: Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=60)).strftime("%Y-%m-%d")
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
if data is None:
data = stocks_helper.load(
symbol=symbol, start_date=start_date, end_date=end_date
)
ftds_data = sec_model.get_fails_to_deliver(symbol, start_date, end_date, limit)
# This plot has 2 axes
if not external_axes:
_, ax1 = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax2 = ax1.twinx()
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
ax1.bar(
ftds_data["SETTLEMENT DATE"],
ftds_data["QUANTITY (FAILS)"] / 1000,
label="Fail Quantity",
)
ax1.set_ylabel("Shares [K]")
ax1.set_title(f"Fails-to-deliver Data for {symbol}")
ax1.legend(loc="lower right")
if limit > 0:
data_ftd = data[data.index > (datetime.now() - timedelta(days=limit + 31))]
else:
data_ftd = data[data.index > start_date]
data_ftd = data_ftd[data_ftd.index < end_date]
ax2.plot(data_ftd.index, data_ftd["Adj Close"], color="orange", label="Share Price")
ax2.set_ylabel("Share Price [$]")
ax2.legend(loc="upper right")
theme.style_twin_axes(ax1, ax2)
if not external_axes:
theme.visualize_output()
if raw:
print_rich_table(
ftds_data,
headers=list(ftds_data.columns),
show_index=False,
title="Fails-To-Deliver Data",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ftd",
ftds_data.reset_index(),
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/dark_pool_shorts/sec_view.py | 0.882047 | 0.33565 | sec_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import List, Tuple
from json import JSONDecodeError
import pandas as pd
import requests
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
# pylint: disable=unsupported-assignment-operation
@log_start_end(log=logger)
def get_dark_pool_short_positions(
sortby: str = "dpp_dollar", ascend: bool = False
) -> pd.DataFrame:
"""Get dark pool short positions. [Source: Stockgrid]
Parameters
----------
sortby : str
Field for which to sort by, where 'sv': Short Vol. [1M],
'sv_pct': Short Vol. %%, 'nsv': Net Short Vol. [1M],
'nsv_dollar': Net Short Vol. ($100M), 'dpp': DP Position [1M],
'dpp_dollar': DP Position ($1B)
ascend : bool
Data in ascending order
Returns
-------
pd.DataFrame
Dark pool short position data
"""
d_fields_endpoints = {
"sv": "Short+Volume",
"sv_pct": "Short+Volume+%25",
"nsv": "Net+Short+Volume",
"nsv_dollar": "Net+Short+Volume+$",
"dpp": "Dark+Pools+Position",
"dpp_dollar": "Dark+Pools+Position+$",
}
field = d_fields_endpoints[sortby]
if ascend:
order = "asc"
else:
order = "desc"
link = f"https://stockgridapp.herokuapp.com/get_dark_pool_data?top={field}&minmax={order}"
response = requests.get(link)
df = pd.DataFrame(response.json()["data"])
df = df[
[
"Ticker",
"Date",
"Short Volume",
"Short Volume %",
"Net Short Volume",
"Net Short Volume $",
"Dark Pools Position",
"Dark Pools Position $",
]
]
return df
@log_start_end(log=logger)
def get_short_interest_days_to_cover(sortby: str = "float") -> pd.DataFrame:
"""Get short interest and days to cover. [Source: Stockgrid]
Parameters
----------
sortby : str
Field for which to sort by, where 'float': Float Short %%,
'dtc': Days to Cover, 'si': Short Interest
Returns
-------
pd.DataFrame
Short interest and days to cover data
"""
link = "https://stockgridapp.herokuapp.com/get_short_interest?top=days"
r = requests.get(link)
df = pd.DataFrame(r.json()["data"])
d_fields = {
"float": "%Float Short",
"dtc": "Days To Cover",
"si": "Short Interest",
}
df = df[
["Ticker", "Date", "%Float Short", "Days To Cover", "Short Interest"]
].sort_values(
by=d_fields[sortby],
ascending=bool(sortby == "dtc"),
)
df["Short Interest"] = df["Short Interest"] / 1_000_000
df.head()
df.columns = [
"Ticker",
"Date",
"Float Short %",
"Days to Cover",
"Short Interest [1M]",
]
return df
@log_start_end(log=logger)
def get_short_interest_volume(symbol: str) -> Tuple[pd.DataFrame, List]:
"""Get price vs short interest volume. [Source: Stockgrid]
Parameters
----------
symbol : str
Stock to get data from
Returns
-------
Tuple[pd.DataFrame, List]
Short interest volume data, Price data
"""
link = f"https://stockgridapp.herokuapp.com/get_dark_pool_individual_data?ticker={symbol}"
response = requests.get(link)
try:
response_json = response.json()
except JSONDecodeError:
return pd.DataFrame(), [None]
df = pd.DataFrame(response_json["individual_short_volume_table"]["data"])
df["date"] = pd.to_datetime(df["date"])
df = df.sort_values(by="date", ascending=False)
df["Short Vol. [1M]"] = df["short_volume"] / 1_000_000
df["Short Vol. %"] = df["short_volume%"] * 100
df["Short Exempt Vol. [1k]"] = df["short_exempt_volume"] / 1_000
df["Total Vol. [1M]"] = df["total_volume"] / 1_000_000
df = df[
[
"date",
"Short Vol. [1M]",
"Short Vol. %",
"Short Exempt Vol. [1k]",
"Total Vol. [1M]",
]
]
return df, response_json["prices"]["prices"]
@log_start_end(log=logger)
def get_net_short_position(symbol: str) -> pd.DataFrame:
"""Get net short position. [Source: Stockgrid]
Parameters
----------
symbol: str
Stock to get data from
Returns
-------
pd.DataFrame
Net short position
"""
link = f"https://stockgridapp.herokuapp.com/get_dark_pool_individual_data?ticker={symbol}"
response = requests.get(link)
try:
df = pd.DataFrame(response.json()["individual_dark_pool_position_data"])
except JSONDecodeError:
return pd.DataFrame()
df["dates"] = pd.to_datetime(df["dates"])
df = df.sort_values(by="dates", ascending=False)
df["Net Short Vol. (1k $)"] = df["dollar_net_volume"] / 1_000
df["Position (1M $)"] = df["dollar_dp_position"]
df = df[
[
"dates",
"Net Short Vol. (1k $)",
"Position (1M $)",
]
]
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/dark_pool_shorts/stockgrid_model.py | 0.866655 | 0.37502 | stockgrid_model.py | pypi |
__docformat__ = "numpy"
import logging
from datetime import datetime, timedelta
from typing import Optional
import pandas as pd
import requests
from bs4 import BeautifulSoup
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def catching_diff_url_formats(ftd_urls: list) -> list:
"""Catches if URL for SEC data is one of the few URLS that are not in the
standard format. Catches are for either specific date ranges that have a different
format or singular URLs that have a different format.
Parameters
----------
ftd_urls : list
list of urls of sec data
Returns
-------
list
list of ftd urls
"""
feb_mar_apr_catch = ["202002", "202003", "202004"]
for i, ftd_url in enumerate(ftd_urls):
# URLs with dates prior to the first half of June 2017 have different formats
if int(ftd_url[58:64]) < 201706 or "201706a" in ftd_url:
ftd_urls[i] = ftd_url.replace(
"fails-deliver-data",
"frequently-requested-foia-document-fails-deliver-data",
)
# URLs between february, march, and april of 2020 have different formats
elif any(x in ftd_urls[i] for x in feb_mar_apr_catch):
ftd_urls[i] = ftd_url.replace(
"data/fails-deliver-data", "node/add/data_distribution"
)
# First half of october 2019 has a different format
elif (
ftd_url
== "https://www.sec.gov/files/data/fails-deliver-data/cnsfails201910a.zip"
):
ftd_urls[
i
] = "https://www.sec.gov/files/data/fails-deliver-data/cnsfails201910a_0.zip"
return ftd_urls
@log_start_end(log=logger)
def get_fails_to_deliver(
symbol: str,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
limit: int = 0,
) -> pd.DataFrame:
"""Display fails-to-deliver data for a given ticker. [Source: SEC]
Parameters
----------
symbol : str
Stock ticker
start_date : Optional[str]
Start of data, in YYYY-MM-DD format
end_date : Optional[str]
End of data, in YYYY-MM-DD format
limit : int
Number of latest fails-to-deliver being printed
Returns
-------
pd.DataFrame
Fail to deliver data
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=60)).strftime("%Y-%m-%d")
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
ftds_data = pd.DataFrame()
start = datetime.strptime(start_date, "%Y-%m-%d")
end = datetime.strptime(end_date, "%Y-%m-%d")
# Filter by number of last FTD
if limit > 0:
url_ftds = "https://www.sec.gov/data/foiadocsfailsdatahtm"
text_soup_ftds = BeautifulSoup(
requests.get(url_ftds, headers={"User-Agent": get_user_agent()}).text,
"lxml",
)
table = text_soup_ftds.find("table", {"class": "list"})
links = table.findAll("a")
link_idx = 0
while len(ftds_data) < limit:
if link_idx > len(links):
break
link = links[link_idx]
url = "https://www.sec.gov" + link["href"]
all_ftds = pd.read_csv(
url,
compression="zip",
sep="|",
engine="python",
skipfooter=2,
usecols=[0, 2, 3, 5],
dtype={"QUANTITY (FAILS)": "int"},
encoding="iso8859",
)
tmp_ftds = all_ftds[all_ftds["SYMBOL"] == symbol]
del tmp_ftds["PRICE"]
del tmp_ftds["SYMBOL"]
# merge the data from this archive
ftds_data = pd.concat([ftds_data, tmp_ftds], ignore_index=True)
link_idx += 1
# clip away extra rows
ftds_data = ftds_data.sort_values("SETTLEMENT DATE")[-limit:]
ftds_data["SETTLEMENT DATE"] = ftds_data["SETTLEMENT DATE"].apply(
lambda x: datetime.strptime(str(x), "%Y%m%d")
)
# Filter by start and end dates for FTD
else:
base_url = "https://www.sec.gov/files/data/fails-deliver-data/cnsfails"
ftd_dates = []
for y in range(start.year, end.year + 1):
if y < end.year:
for a_month in range(start.month, 13):
formatted_month = f"{a_month:02d}"
if a_month == start.month and y == start.year:
if start.day < 16:
ftd_dates.append(str(y) + formatted_month + "a")
ftd_dates.append(str(y) + formatted_month + "b")
else:
ftd_dates.append(str(y) + formatted_month + "a")
ftd_dates.append(str(y) + formatted_month + "b")
else:
for a_month in range(1, end.month):
formatted_month = f"{a_month:02d}"
if a_month == end.month - 1:
ftd_dates.append(str(y) + formatted_month + "a")
if end.day > 15:
ftd_dates.append(str(y) + formatted_month + "b")
else:
ftd_dates.append(str(y) + formatted_month + "a")
ftd_dates.append(str(y) + formatted_month + "b")
ftd_urls = [base_url + ftd_date + ".zip" for ftd_date in ftd_dates]
# Calling function that catches a handful of urls that are slightly
# different than the standard format
ftd_urls = catching_diff_url_formats(ftd_urls)
for ftd_link in ftd_urls:
all_ftds = pd.read_csv(
ftd_link,
compression="zip",
sep="|",
engine="python",
skipfooter=2,
usecols=[0, 2, 3, 5],
dtype={"QUANTITY (FAILS)": "Int64"},
encoding="iso8859",
)
tmp_ftds = all_ftds[all_ftds["SYMBOL"] == symbol]
del tmp_ftds["PRICE"]
del tmp_ftds["SYMBOL"]
# merge the data from this archive
ftds_data = pd.concat([ftds_data, tmp_ftds], ignore_index=True)
ftds_data["SETTLEMENT DATE"] = ftds_data["SETTLEMENT DATE"].apply(
lambda x: datetime.strptime(str(x), "%Y%m%d")
)
ftds_data = ftds_data[ftds_data["SETTLEMENT DATE"] > start]
ftds_data = ftds_data[ftds_data["SETTLEMENT DATE"] < end]
return ftds_data | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/dark_pool_shorts/sec_model.py | 0.746324 | 0.266402 | sec_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import matplotlib.dates as mdates
import pandas as pd
from matplotlib import pyplot as plt
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.dark_pool_shorts import finra_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def darkpool_ats_otc(
symbol: str, export: str = "", external_axes: Optional[List[plt.Axes]] = None
):
"""Display barchart of dark pool (ATS) and OTC (Non ATS) data. [Source: FINRA]
Parameters
----------
symbol : str
Stock ticker
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
ats, otc = finra_model.getTickerFINRAdata(symbol)
if ats.empty:
console.print("[red]Could not get data[/red]\n")
return
if ats.empty and otc.empty:
console.print("No ticker data found!")
# This plot has 2 axes
if not external_axes:
_, axes = plt.subplots(
2, 1, sharex=True, figsize=plot_autoscale(), dpi=PLOT_DPI
)
(ax1, ax2) = axes
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
if not ats.empty and not otc.empty:
ax1.bar(
ats.index,
(ats["totalWeeklyShareQuantity"] + otc["totalWeeklyShareQuantity"])
/ 1_000_000,
color=theme.down_color,
)
ax1.bar(
otc.index, otc["totalWeeklyShareQuantity"] / 1_000_000, color=theme.up_color
)
ax1.legend(["ATS", "OTC"])
elif not ats.empty:
ax1.bar(
ats.index,
ats["totalWeeklyShareQuantity"] / 1_000_000,
color=theme.down_color,
)
ax1.legend(["ATS"])
elif not otc.empty:
ax1.bar(
otc.index, otc["totalWeeklyShareQuantity"] / 1_000_000, color=theme.up_color
)
ax1.legend(["OTC"])
ax1.set_ylabel("Total Weekly Shares [Million]")
ax1.set_title(f"Dark Pools (ATS) vs OTC (Non-ATS) Data for {symbol}")
ax1.set_xticks([])
if not ats.empty:
ax2.plot(
ats.index,
ats["totalWeeklyShareQuantity"] / ats["totalWeeklyTradeCount"],
color=theme.down_color,
)
ax2.legend(["ATS"])
if not otc.empty:
ax2.plot(
otc.index,
otc["totalWeeklyShareQuantity"] / otc["totalWeeklyTradeCount"],
color=theme.up_color,
)
ax2.legend(["ATS", "OTC"])
else:
ax2.plot(
otc.index,
otc["totalWeeklyShareQuantity"] / otc["totalWeeklyTradeCount"],
color=theme.up_color,
)
ax2.legend(["OTC"])
ax2.set_ylabel("Shares per Trade")
ax2.xaxis.set_major_locator(mdates.DayLocator(interval=10))
ax2.set_xlim(otc.index[0], otc.index[-1])
ax2.set_xlabel("Weeks")
theme.style_primary_axis(ax1)
theme.style_primary_axis(ax2)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"dpotc_ats",
ats,
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"dpotc_otc",
otc,
)
@log_start_end(log=logger)
def plot_dark_pools_ats(
data: pd.DataFrame,
symbols: List,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots promising tickers based on growing ATS data
Parameters
----------
data: pd.DataFrame
Dark Pools (ATS) Data
symbols: List
List of tickers from most promising with better linear regression slope
external_axes: Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
for symbol in symbols:
ax.plot(
pd.to_datetime(
data[data["issueSymbolIdentifier"] == symbol]["weekStartDate"]
),
data[data["issueSymbolIdentifier"] == symbol]["totalWeeklyShareQuantity"]
/ 1_000_000,
)
ax.legend(symbols)
ax.set_ylabel("Total Weekly Shares [Million]")
ax.set_title("Dark Pool (ATS) growing tickers")
ax.set_xlabel("Weeks")
data["weekStartDate"] = pd.to_datetime(data["weekStartDate"])
ax.set_xlim(data["weekStartDate"].iloc[0], data["weekStartDate"].iloc[-1])
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
@log_start_end(log=logger)
def darkpool_otc(
input_limit: int = 1000,
limit: int = 10,
tier: str = "T1",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display dark pool (ATS) data of tickers with growing trades activity. [Source: FINRA]
Parameters
----------
input_limit : int
Number of tickers to filter from entire ATS data based on
the sum of the total weekly shares quantity
limit : int
Number of tickers to display from most promising with
better linear regression slope
tier : str
Tier to process data from: T1, T2 or OTCE
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
# TODO: Improve command logic to be faster and more useful
df_ats, d_ats_reg = finra_model.getATSdata(input_limit, tier)
if not df_ats.empty and d_ats_reg:
symbols = list(
dict(
sorted(d_ats_reg.items(), key=lambda item: item[1], reverse=True)
).keys()
)[:limit]
plot_dark_pools_ats(df_ats, symbols, external_axes)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"prom",
df_ats,
)
else:
console.print("[red]Could not get data[/red]\n")
return | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/dark_pool_shorts/finra_view.py | 0.828037 | 0.321154 | finra_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import matplotlib.ticker
import pandas as pd
from matplotlib import pyplot as plt
from openbb_terminal.decorators import check_api_key
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
print_rich_table,
plot_autoscale,
is_valid_axes_count,
)
from openbb_terminal.stocks.dark_pool_shorts import quandl_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_KEY_QUANDL"])
def plot_short_interest(
symbol: str,
data: pd.DataFrame,
nyse: bool = False,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot the short interest of a stock. This corresponds to the
number of shares that have been sold short but have not yet been
covered or closed out. Either NASDAQ or NYSE [Source: Quandl]
Parameters
----------
symbol : str
ticker to get short interest from
data: pd.DataFrame
Short interest dataframe
nyse : bool
data from NYSE if true, otherwise NASDAQ
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
# This plot has 2 axes
if not external_axes:
_, ax1 = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax2 = ax1.twinx()
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
ax1.bar(
data.index,
data["Short Volume"],
0.3,
color=theme.down_color,
)
ax1.bar(
data.index,
data["Total Volume"] - data["Short Volume"],
0.3,
bottom=data["Short Volume"],
color=theme.up_color,
)
ax1.set_ylabel("Shares")
ax1.set_title(f"{('NASDAQ', 'NYSE')[nyse]} Short Interest on {symbol}")
ax1.legend(labels=["Short Volume", "Total Volume"], loc="best")
ax1.yaxis.set_major_formatter(matplotlib.ticker.EngFormatter())
ax2.tick_params(axis="y")
ax2.set_ylabel("Percentage of Volume Shorted")
ax2.plot(
data.index,
data["% of Volume Shorted"],
)
ax2.tick_params(axis="y", which="major")
ax2.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter("%.0f%%"))
theme.style_twin_axes(ax1, ax2)
if not external_axes:
theme.visualize_output()
@log_start_end(log=logger)
@check_api_key(["API_KEY_QUANDL"])
def short_interest(
symbol: str,
nyse: bool = False,
limit: int = 10,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot the short interest of a stock. This corresponds to the
number of shares that have been sold short but have not yet been
covered or closed out. Either NASDAQ or NYSE [Source: Quandl]
Parameters
----------
symbol : str
ticker to get short interest from
nyse : bool
data from NYSE if true, otherwise NASDAQ
limit: int
Number of past days to show short interest
raw : bool
Flag to print raw data instead
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
df_short_interest = quandl_model.get_short_interest(symbol, nyse)
df_short_interest = df_short_interest.tail(limit)
df_short_interest.columns = [
"".join(" " + char if char.isupper() else char.strip() for char in idx).strip()
for idx in df_short_interest.columns.tolist()
]
pd.options.mode.chained_assignment = None
vol_pct = (
100
* df_short_interest["Short Volume"].values
/ df_short_interest["Total Volume"].values
)
df_short_interest["% of Volume Shorted"] = [round(pct, 2) for pct in vol_pct]
plot_short_interest(symbol, df_short_interest, nyse, external_axes)
if raw:
df_short_interest["% of Volume Shorted"] = df_short_interest[
"% of Volume Shorted"
].apply(lambda x: f"{x/100:.2%}")
df_short_interest = df_short_interest.applymap(
lambda x: lambda_long_number_format(x)
).sort_index(ascending=False)
df_short_interest.index = df_short_interest.index.date
print_rich_table(
df_short_interest,
headers=list(df_short_interest.columns),
show_index=True,
title="Short Interest of Stock",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"psi(quandl)",
df_short_interest,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/dark_pool_shorts/quandl_view.py | 0.89682 | 0.402304 | quandl_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import matplotlib.ticker
import pandas as pd
from matplotlib import pyplot as plt
from openbb_terminal.decorators import check_api_key
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
print_rich_table,
plot_autoscale,
is_valid_axes_count,
)
from openbb_terminal.stocks.dark_pool_shorts import stocksera_model
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def plot_cost_to_borrow(
symbol: str,
data: pd.DataFrame,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot the cost to borrow of a stock. [Source: Stocksera]
Parameters
----------
symbol : str
ticker to get cost to borrow from
data: pd.DataFrame
Cost to borrow dataframe
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
# This plot has 2 axes
if not external_axes:
_, ax1 = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax2 = ax1.twinx()
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
if data.empty:
return
ax1.bar(
data.index,
data["Available"],
0.3,
color=theme.up_color,
)
ax1.set_title(f"Cost to Borrow of {symbol}")
ax1.legend(labels=["Number Shares"], loc="best")
ax1.yaxis.set_major_formatter(matplotlib.ticker.EngFormatter())
ax2.set_ylabel("Fees %")
ax2.plot(data.index, data["Fees"].values)
ax2.tick_params(axis="y", which="major")
theme.style_twin_axes(ax1, ax2)
ax1.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(6))
if not external_axes:
theme.visualize_output()
@log_start_end(log=logger)
@check_api_key(["API_STOCKSERA_KEY"])
def cost_to_borrow(
symbol: str,
limit: int = 100,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot the short interest of a stock. This corresponds to the
number of shares that have been sold short but have not yet been
covered or closed out. Either NASDAQ or NYSE [Source: Quandl]
Parameters
----------
symbol : str
ticker to get cost to borrow from
limit: int
Number of historical cost to borrow data to show
raw : bool
Flag to print raw data instead
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
# Note: if you send an empty string stocksera will search every ticker
if not symbol:
console.print("[red]No symbol provided[/red]\n")
return
df_cost_to_borrow = stocksera_model.get_cost_to_borrow(symbol)
df_cost_to_borrow = df_cost_to_borrow.head(limit)[::-1]
pd.options.mode.chained_assignment = None
plot_cost_to_borrow(symbol, df_cost_to_borrow, external_axes)
if raw:
df_cost_to_borrow["Available"] = df_cost_to_borrow["Available"].apply(
lambda x: lambda_long_number_format(x)
)
print_rich_table(
df_cost_to_borrow,
headers=list(df_cost_to_borrow.columns),
show_index=True,
title=f"Cost to Borrow of {symbol}",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"stocksera",
df_cost_to_borrow,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/dark_pool_shorts/stocksera_view.py | 0.884688 | 0.432423 | stocksera_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Dict, List, Tuple
import pandas as pd
import requests
from scipy import stats
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def getFINRAweeks(tier: str = "T1", is_ats: bool = True) -> List:
"""Get FINRA weeks. [Source: FINRA]
Parameters
----------
tier : str
Stock tier between T1, T2, or OTCE
is_ats : bool
ATS data if true, NON-ATS otherwise
Returns
-------
List
List of response data
"""
req_hdr = {"Accept": "application/json", "Content-Type": "application/json"}
req_data = {
"compareFilters": [
{
"compareType": "EQUAL",
"fieldName": "summaryTypeCode",
"fieldValue": "ATS_W_SMBL" if is_ats else "OTC_W_SMBL",
},
{
"compareType": "EQUAL",
"fieldName": "tierIdentifier",
"fieldValue": tier,
},
],
"delimiter": "|",
"fields": ["weekStartDate"],
"limit": 27,
"quoteValues": False,
"sortFields": ["-weekStartDate"],
}
response = requests.post(
"https://api.finra.org/data/group/otcMarket/name/weeklyDownloadDetails",
headers=req_hdr,
json=req_data,
)
return response.json() if response.status_code == 200 else list()
@log_start_end(log=logger)
def getFINRAdata_offset(
start_date: str,
tier: str = "T1",
symbol: str = "",
is_ats: bool = True,
offset: int = 0,
) -> requests.Response:
"""Get FINRA data. [Source: FINRA]
Parameters
----------
start_date: str
Weekly data to get FINRA data, in YYYY-MM-DD format
tier: str
Stock tier between T1, T2, or OTCE
symbol: str
Stock ticker to get data from
is_ats: bool
ATS data if true, NON-ATS otherwise
offset: int
Offset in getting the data
Returns
-------
requests.Response
Response from FINRA data
"""
req_hdr = {"Accept": "application/json", "Content-Type": "application/json"}
l_cmp_filters = [
{
"compareType": "EQUAL",
"fieldName": "weekStartDate",
"fieldValue": start_date,
},
{"compareType": "EQUAL", "fieldName": "tierIdentifier", "fieldValue": tier},
{
"compareType": "EQUAL",
"description": "",
"fieldName": "summaryTypeCode",
"fieldValue": "ATS_W_SMBL" if is_ats else "OTC_W_SMBL",
},
]
if symbol:
l_cmp_filters.append(
{
"compareType": "EQUAL",
"fieldName": "issueSymbolIdentifier",
"fieldValue": symbol,
}
)
req_data = {
"compareFilters": l_cmp_filters,
"delimiter": "|",
"fields": [
"issueSymbolIdentifier",
"totalWeeklyShareQuantity",
"totalWeeklyTradeCount",
"lastUpdateDate",
],
"limit": 5000,
"offset": offset,
"quoteValues": False,
"sortFields": ["totalWeeklyShareQuantity"],
}
return requests.post(
"https://api.finra.org/data/group/otcMarket/name/weeklySummary",
headers=req_hdr,
json=req_data,
)
def getFINRAdata(
start_date: str, symbol: str = "", tier: str = "T1", is_ats: bool = True
) -> Tuple[int, List]:
"""Get FINRA data. [Source: FINRA]
Parameters
----------
start_date : str
Weekly data to get FINRA data, in YYYY-MM-DD format
symbol : str
Stock ticker to get data from
tier : str
Stock tier between T1, T2, or OTCE
is_ats : bool
ATS data if true, NON-ATS otherwise
Returns
-------
Tuple[int, List]
Status code from request, List of response data
"""
req_hdr = {"Accept": "application/json", "Content-Type": "application/json"}
l_cmp_filters = [
{
"compareType": "EQUAL",
"fieldName": "weekStartDate",
"fieldValue": start_date,
},
{"compareType": "EQUAL", "fieldName": "tierIdentifier", "fieldValue": tier},
{
"compareType": "EQUAL",
"description": "",
"fieldName": "summaryTypeCode",
"fieldValue": "ATS_W_SMBL" if is_ats else "OTC_W_SMBL",
},
]
if symbol:
l_cmp_filters.append(
{
"compareType": "EQUAL",
"fieldName": "issueSymbolIdentifier",
"fieldValue": symbol,
}
)
req_data = {
"compareFilters": l_cmp_filters,
"delimiter": "|",
"fields": [
"issueSymbolIdentifier",
"totalWeeklyShareQuantity",
"totalWeeklyTradeCount",
"lastUpdateDate",
],
"limit": 5000,
"quoteValues": False,
"sortFields": ["totalWeeklyShareQuantity"],
}
response = requests.post(
"https://api.finra.org/data/group/otcMarket/name/weeklySummary",
headers=req_hdr,
json=req_data,
)
return (
response.status_code,
response.json() if response.status_code == 200 else list(),
)
@log_start_end(log=logger)
def getATSdata(limit: int = 1000, tier_ats: str = "T1") -> Tuple[pd.DataFrame, Dict]:
"""Get all FINRA ATS data, and parse most promising tickers based on linear regression
Parameters
----------
limit: int
Number of tickers to filter from entire ATS data based on the sum of the total weekly shares quantity
tier_ats : int
Tier to process data from: T1, T2 or OTCE
Returns
-------
Tuple[pd.DataFrame, Dict]
Dark Pools (ATS) Data, Tickers from Dark Pools with better regression slope
"""
if tier_ats:
tiers = [tier_ats]
else:
tiers = ["T1", "T2", "OTCE"]
df_ats = pd.DataFrame()
for tier in tiers:
console.print(f"Processing Tier {tier} ...")
for d_week in getFINRAweeks(tier, is_ats=True):
offset = 0
response = getFINRAdata_offset(
d_week["weekStartDate"], tier, "", True, offset
)
l_data = response.json()
while len(response.json()) == 5000:
offset += 5000
response = getFINRAdata_offset(
d_week["weekStartDate"], tier, "", True, offset
)
l_data += response.json()
df_ats_week = pd.DataFrame(l_data)
df_ats_week["weekStartDate"] = d_week["weekStartDate"]
if not df_ats_week.empty:
# df_ats = df_ats.append(df_ats_week, ignore_index=True)
df_ats = pd.concat([df_ats, df_ats_week], ignore_index=True)
if not df_ats.empty:
df_ats = df_ats.sort_values("weekStartDate")
df_ats["weekStartDateInt"] = pd.to_datetime(df_ats["weekStartDate"]).apply(
lambda x: x.timestamp()
)
console.print(f"Processing regression on {limit} promising tickers ...")
d_ats_reg = {}
# set(df_ats['issueSymbolIdentifier'].values) this would be iterating through all tickers
# but that is extremely time consuming for little reward. A little filtering is done to
# speed up search for best ATS tickers
for symbol in list(
df_ats.groupby("issueSymbolIdentifier")["totalWeeklyShareQuantity"]
.sum()
.sort_values()[-limit:]
.index
):
try:
slope = stats.linregress(
df_ats[df_ats["issueSymbolIdentifier"] == symbol][
"weekStartDateInt"
].values,
df_ats[df_ats["issueSymbolIdentifier"] == symbol][
"totalWeeklyShareQuantity"
].values,
)[0]
d_ats_reg[symbol] = slope
except Exception: # nosec B110
pass
return df_ats, d_ats_reg
@log_start_end(log=logger)
def getTickerFINRAdata(symbol: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Get all FINRA data associated with a ticker
Parameters
----------
symbol : str
Stock ticker to get data from
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame]
Dark Pools (ATS) Data, OTC (Non-ATS) Data
"""
tiers = ["T1", "T2", "OTCE"]
l_data = []
for tier in tiers:
for d_week in getFINRAweeks(tier, is_ats=True):
status_code, response = getFINRAdata(
d_week["weekStartDate"], symbol, tier, True
)
if status_code == 200:
if response:
d_data = response[0]
d_data.update(d_week)
l_data.append(d_data)
else:
break
df_ats = pd.DataFrame(l_data)
if not df_ats.empty:
df_ats = df_ats.sort_values("weekStartDate")
df_ats = df_ats.set_index("weekStartDate")
l_data = []
for tier in tiers:
for d_week in getFINRAweeks(tier, is_ats=False):
status_code, response = getFINRAdata(
d_week["weekStartDate"], symbol, tier, False
)
if status_code == 200:
if response:
d_data = response[0]
d_data.update(d_week)
l_data.append(d_data)
else:
break
df_otc = pd.DataFrame(l_data)
if not df_otc.empty:
df_otc = df_otc.sort_values("weekStartDate")
df_otc = df_otc.set_index("weekStartDate")
return df_ats, df_otc | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/dark_pool_shorts/finra_model.py | 0.759582 | 0.35855 | finra_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from datetime import timedelta
from typing import List, Optional
import matplotlib.pyplot as plt
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.dark_pool_shorts import stockgrid_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def dark_pool_short_positions(
limit: int = 10,
sortby: str = "dpp_dollar",
ascend: bool = False,
export: str = "",
):
"""Get dark pool short positions. [Source: Stockgrid]
Parameters
----------
limit : int
Number of top tickers to show
sortby : str
Field for which to sort by, where 'sv': Short Vol. [1M],
'sv_pct': Short Vol. %%, 'nsv': Net Short Vol. [1M],
'nsv_dollar': Net Short Vol. ($100M), 'dpp': DP Position [1M],
'dpp_dollar': DP Position ($1B)
ascend : bool
Data in ascending order
export : str
Export dataframe data to csv,json,xlsx file
"""
df = stockgrid_model.get_dark_pool_short_positions(sortby, ascend)
dp_date = df["Date"].values[0]
df = df.drop(columns=["Date"])
df["Net Short Volume $"] = df["Net Short Volume $"] / 100_000_000
df["Short Volume"] = df["Short Volume"] / 1_000_000
df["Net Short Volume"] = df["Net Short Volume"] / 1_000_000
df["Short Volume %"] = df["Short Volume %"] * 100
df["Dark Pools Position $"] = df["Dark Pools Position $"] / (1_000_000_000)
df["Dark Pools Position"] = df["Dark Pools Position"] / 1_000_000
df.columns = [
"Ticker",
"Short Vol. [1M]",
"Short Vol. %",
"Net Short Vol. [1M]",
"Net Short Vol. ($100M)",
"DP Position [1M]",
"DP Position ($1B)",
]
# Assuming that the datetime is the same, which from my experiments seems to be the case
print_rich_table(
df.iloc[:limit],
headers=list(df.columns),
show_index=False,
title=f"Data for: {dp_date}",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"dppos",
df,
)
@log_start_end(log=logger)
def short_interest_days_to_cover(
limit: int = 10, sortby: str = "float", export: str = ""
):
"""Print short interest and days to cover. [Source: Stockgrid]
Parameters
----------
limit : int
Number of top tickers to show
sortby : str
Field for which to sort by, where 'float': Float Short %%,
'dtc': Days to Cover, 'si': Short Interest
export : str
Export dataframe data to csv,json,xlsx file
"""
df = stockgrid_model.get_short_interest_days_to_cover(sortby)
dp_date = df["Date"].values[0]
df = df.drop(columns=["Date"])
# Assuming that the datetime is the same, which from my experiments seems to be the case
print_rich_table(
df.iloc[:limit],
headers=list(df.columns),
show_index=False,
title=f"Data for: {dp_date}",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"shortdtc",
df,
)
@log_start_end(log=logger)
def short_interest_volume(
symbol: str,
limit: int = 84,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot price vs short interest volume. [Source: Stockgrid]
Parameters
----------
symbol : str
Stock to plot for
limit : int
Number of last open market days to show
raw : bool
Flag to print raw data instead
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (3 axes are expected in the list), by default None
"""
df, prices = stockgrid_model.get_short_interest_volume(symbol)
if df.empty:
console.print("[red]No data available[/red]\n")
return
if raw:
df.date = df.date.dt.date
print_rich_table(
df.iloc[:limit],
headers=list(df.columns),
show_index=False,
title="Price vs Short Volume",
)
else:
# This plot has 3 axes
if not external_axes:
_, axes = plt.subplots(
2,
1,
sharex=True,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
gridspec_kw={"height_ratios": [2, 1]},
)
(ax, ax1) = axes
ax2 = ax.twinx()
elif is_valid_axes_count(external_axes, 3):
(ax, ax1, ax2) = external_axes
else:
return
ax.bar(
df["date"],
df["Total Vol. [1M]"],
width=timedelta(days=1),
color=theme.up_color,
label="Total Volume",
)
ax.bar(
df["date"],
df["Short Vol. [1M]"],
width=timedelta(days=1),
color=theme.down_color,
label="Short Volume",
)
ax.set_ylabel("Volume [1M]")
ax2.plot(
df["date"].values,
prices[len(prices) - len(df) :], # noqa: E203
label="Price",
)
ax2.set_ylabel("Price ($)")
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc="upper left")
ax.set_xlim(
df["date"].values[max(0, len(df) - limit)],
df["date"].values[len(df) - 1],
)
ax.ticklabel_format(style="plain", axis="y")
ax.set_title(f"Price vs Short Volume Interest for {symbol}")
ax1.plot(
df["date"].values,
df["Short Vol. %"],
label="Short Vol. %",
)
ax1.set_xlim(
df["date"].values[max(0, len(df) - limit)],
df["date"].values[len(df) - 1],
)
ax1.set_ylabel("Short Vol. %")
lines, labels = ax1.get_legend_handles_labels()
ax1.legend(lines, labels, loc="upper left")
ax1.set_ylim([0, 100])
theme.style_twin_axes(ax, ax2)
theme.style_primary_axis(ax1)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"shortint(stockgrid)",
df,
)
@log_start_end(log=logger)
def net_short_position(
symbol: str,
limit: int = 84,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot net short position. [Source: Stockgrid]
Parameters
----------
symbol: str
Stock to plot for
limit : int
Number of last open market days to show
raw : bool
Flag to print raw data instead
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
df = stockgrid_model.get_net_short_position(symbol)
if df.empty:
console.print("[red]No data available[/red]\n")
return
if raw:
df["dates"] = df["dates"].dt.date
print_rich_table(
df.iloc[:limit],
headers=list(df.columns),
show_index=False,
title="Net Short Positions",
)
else:
# This plot has 2 axes
if not external_axes:
_, ax1 = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax2 = ax1.twinx()
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
df = df.sort_values(by=["dates"])
ax1.bar(
df["dates"],
df["Net Short Vol. (1k $)"],
color=theme.down_color,
label="Net Short Vol. (1k $)",
)
ax1.set_ylabel("Net Short Vol. (1k $)")
ax2.plot(
df["dates"].values,
df["Position (1M $)"],
c=theme.up_color,
label="Position (1M $)",
)
ax2.set_ylabel("Position (1M $)")
lines, labels = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc="upper left")
ax1.set_xlim(
df["dates"].values[max(0, len(df) - limit)],
df["dates"].values[len(df) - 1],
)
ax1.set_title(f"Net Short Vol. vs Position for {symbol}")
theme.style_twin_axes(ax1, ax2)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"shortpos",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/dark_pool_shorts/stockgrid_view.py | 0.864882 | 0.360573 | stockgrid_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
import pandas as pd
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.due_diligence import csimarket_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def suppliers(symbol: str, export: str = "", limit: int = 10) -> None:
"""Display suppliers from ticker provided. [Source: CSIMarket]
Parameters
----------
symbol: str
Ticker to select suppliers from
export : str
Export dataframe data to csv,json,xlsx file
limit: int
The maximum number of rows to show
"""
tickers = csimarket_model.get_suppliers(symbol, limit=limit)
if tickers.empty:
console.print("No suppliers found.\n")
else:
console.print(f"List of suppliers: {', '.join(tickers)}\n")
print_rich_table(
tickers,
headers=list(tickers.columns),
show_index=True,
title=f"Suppliers for {symbol.upper()}",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"supplier",
tickers,
)
@log_start_end(log=logger)
def customers(symbol: str, export: str = ""):
"""Display customers from ticker provided. [Source: CSIMarket]
Parameters
----------
symbol: str
Ticker to select customers from
export : str
Export dataframe data to csv,json,xlsx file
"""
tickers = csimarket_model.get_customers(symbol)
if tickers.empty:
console.print("No customers found.\n")
else:
print_rich_table(
tickers,
headers=list(tickers.columns),
show_index=True,
title=f"Customers for {symbol.upper()}",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"customer",
pd.DataFrame(tickers),
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/due_diligence/csimarket_view.py | 0.520253 | 0.224055 | csimarket_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Any, List
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.due_diligence import finviz_model
from openbb_terminal import rich_config
logger = logging.getLogger(__name__)
def lambda_category_color_red_green(val: str) -> str:
"""Add color to analyst rating
Parameters
----------
val : str
Analyst rating - Upgrade/Downgrade
Returns
-------
str
Analyst rating with color
"""
if val == "Upgrade":
return f"[green]{val}[/green]"
if val == "Downgrade":
return f"[red]{val}[/red]"
if val == "Reiterated":
return f"[yellow]{val}[/yellow]"
return val
@log_start_end(log=logger)
def news(symbol: str, limit: int = 5):
"""Display news for a given stock ticker
Parameters
----------
symbol: str
Stock ticker
limit: int
Number of latest news being printed
"""
fnews: List[Any] = finviz_model.get_news(symbol)
if fnews:
fnews = sorted(fnews, reverse=True)[:limit]
for news_date, news_title, news_link, _ in fnews:
console.print(f"{news_date} - {news_title}")
console.print(f"{news_link}\n")
else:
console.print("No news found for this ticker")
@log_start_end(log=logger)
def analyst(symbol: str, export: str = ""):
"""Display analyst ratings. [Source: Finviz]
Parameters
----------
symbol : str
Stock ticker
export : str
Export dataframe data to csv,json,xlsx file
"""
df = finviz_model.get_analyst_data(symbol)
if rich_config.USE_COLOR:
df["category"] = df["category"].apply(lambda_category_color_red_green)
print_rich_table(
df, headers=list(df.columns), show_index=True, title="Display Analyst Ratings"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"analyst",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/due_diligence/finviz_view.py | 0.780997 | 0.172555 | finviz_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from datetime import datetime, timedelta
from typing import List, Optional
import matplotlib.pyplot as plt
from pandas.core.frame import DataFrame
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.due_diligence import business_insider_model
from openbb_terminal.stocks.stocks_helper import load
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def price_target_from_analysts(
symbol: str,
data: Optional[DataFrame] = None,
start_date: Optional[str] = None,
limit: int = 10,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display analysts' price targets for a given stock. [Source: Business Insider]
Parameters
----------
symbol: str
Due diligence ticker symbol
data: Optional[DataFrame]
Price target DataFrame
start_date : Optional[str]
Start date of the stock data, format YYYY-MM-DD
limit : int
Number of latest price targets from analysts to print
raw: bool
Display raw data only
export: str
Export dataframe data to csv,json,xlsx file
external_axes: Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.stocks.dd.pt_chart(symbol="AAPL")
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=1100)).strftime("%Y-%m-%d")
if data is None:
data = load(symbol=symbol, start_date=start_date)
df_analyst_data = business_insider_model.get_price_target_from_analysts(symbol)
if df_analyst_data.empty:
console.print("[red]Could not get data for ticker.[/red]\n")
return
if raw:
df_analyst_data.index = df_analyst_data.index.strftime("%Y-%m-%d")
print_rich_table(
df_analyst_data.sort_index(ascending=False).head(limit),
headers=list(df_analyst_data.columns),
show_index=True,
title="Analyst Price Targets",
)
else:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
# Slice start of ratings
if start_date:
df_analyst_data = df_analyst_data[start_date:] # type: ignore
plot_column = "Close"
legend_price_label = "Close"
ax.plot(data.index, data[plot_column].values)
if start_date:
ax.plot(df_analyst_data.groupby(by=["Date"]).mean(numeric_only=True)[start_date:]) # type: ignore
else:
ax.plot(df_analyst_data.groupby(by=["Date"]).mean(numeric_only=True))
ax.scatter(
df_analyst_data.index,
df_analyst_data["Price Target"],
color=theme.down_color,
edgecolors=theme.up_color,
zorder=2,
)
ax.legend([legend_price_label, "Average Price Target", "Price Target"])
ax.set_title(f"{symbol} (Time Series) and Price Target")
ax.set_xlim(data.index[0], data.index[-1])
ax.set_ylabel("Share Price")
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"pt",
df_analyst_data,
)
@log_start_end(log=logger)
def estimates(symbol: str, estimate: str, export: str = ""):
"""Display analysts' estimates for a given ticker. [Source: Business Insider]
Parameters
----------
symbol : str
Ticker to get analysts' estimates
estimate: str
Type of estimate to get
export : str
Export dataframe data to csv,json,xlsx file
"""
(
df_year_estimates,
df_quarter_earnings,
df_quarter_revenues,
) = business_insider_model.get_estimates(symbol)
if estimate == "annualearnings":
print_rich_table(
df_year_estimates,
headers=list(df_year_estimates.columns),
show_index=True,
title="Annual Earnings Estimates",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"pt_year",
df_year_estimates,
)
elif estimate == "quarterearnings":
print_rich_table(
df_quarter_earnings,
headers=list(df_quarter_earnings.columns),
show_index=True,
title="Quarterly Earnings Estimates",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"pt_qtr_earnings",
df_quarter_earnings,
)
elif estimate == "annualrevenue":
print_rich_table(
df_quarter_revenues,
headers=list(df_quarter_revenues.columns),
show_index=True,
title="Quarterly Revenue Estimates",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"pt_qtr_revenues",
df_quarter_revenues,
)
else:
console.print("[red]Invalid estimate type[/red]") | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/due_diligence/business_insider_view.py | 0.835316 | 0.387227 | business_insider_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import pandas as pd
from matplotlib import pyplot as plt
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.decorators import check_api_key
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.stocks.due_diligence import finnhub_model
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def plot_rating_over_time(
data: pd.DataFrame,
symbol: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot rating over time
Parameters
----------
data: pd.DataFrame
Rating over time
symbol: str
Ticker symbol associated with ratings
external_axes: Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
rot = data.sort_values("period")
ax.plot(pd.to_datetime(rot["period"]), rot["strongBuy"], c="green", lw=3)
ax.plot(pd.to_datetime(rot["period"]), rot["buy"], c="lightgreen", lw=3)
ax.plot(pd.to_datetime(rot["period"]), rot["hold"], c="grey", lw=3)
ax.plot(pd.to_datetime(rot["period"]), rot["sell"], c="pink", lw=3)
ax.plot(pd.to_datetime(rot["period"]), rot["strongSell"], c="red", lw=3)
ax.set_xlim(
pd.to_datetime(rot["period"].values[0]),
pd.to_datetime(rot["period"].values[-1]),
)
ax.set_title(f"{symbol}'s ratings over time")
ax.set_ylabel("Rating")
ax.legend(["Strong Buy", "Buy", "Hold", "Sell", "Strong Sell"])
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
@log_start_end(log=logger)
@check_api_key(["API_FINNHUB_KEY"])
def rating_over_time(
symbol: str,
limit: int = 10,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Rating over time (monthly). [Source: Finnhub]
Parameters
----------
ticker : str
Ticker to get ratings from
limit : int
Number of last months ratings to show
raw: bool
Display raw data only
export: str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]]
External axes (1 axis is expected in the list), by default None
"""
df_rot = finnhub_model.get_rating_over_time(symbol)
if df_rot.empty:
return
if raw:
d_cols = {
"strongSell": "Strong Sell",
"sell": "Sell",
"hold": "Hold",
"buy": "Buy",
"strongBuy": "Strong Buy",
}
df_rot_raw = (
df_rot[["period", "strongSell", "sell", "hold", "buy", "strongBuy"]]
.rename(columns=d_cols)
.head(limit)
)
print_rich_table(
df_rot_raw,
headers=list(df_rot_raw.columns),
show_index=False,
title="Monthly Rating",
)
else:
plot_rating_over_time(df_rot.head(limit), symbol, external_axes)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"rot",
df_rot,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/due_diligence/finnhub_view.py | 0.887826 | 0.423458 | finnhub_view.py | pypi |
__docformat__ = "numpy"
import logging
from datetime import datetime
from typing import Any, Dict, List
import finviz
import pandas as pd
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_news(symbol: str) -> List[Any]:
"""Get news from Finviz
Parameters
----------
symbol : str
Stock ticker symbol
Returns
-------
List[Any]
News
"""
return finviz.get_news(symbol)
@log_start_end(log=logger)
def get_analyst_data(symbol: str) -> pd.DataFrame:
"""Get analyst data. [Source: Finviz]
Parameters
----------
symbol : str
Stock ticker symbol
Returns
-------
df_fa: DataFrame
Analyst price targets
"""
d_finviz_analyst_price = get_analyst_price_targets_workaround(symbol)
df_fa = pd.DataFrame.from_dict(d_finviz_analyst_price)
if not df_fa.empty:
df_fa.set_index("date", inplace=True)
return df_fa
# Patches finviz function while finviz is not updated
def get_analyst_price_targets_workaround(
ticker: str, last_ratings: int = 5
) -> List[Dict]:
"""Patch the analyst price targets function from finviz
Parameters
----------
ticker: str
Ticker symbol
last_ratings: int
Number to get
"""
analyst_price_targets = []
try:
finviz.main_func.get_page(ticker)
page_parsed = finviz.main_func.STOCK_PAGE[ticker]
table = page_parsed.cssselect(
'table[class="js-table-ratings fullview-ratings-outer"]'
)[0]
for row in table:
rating = row.xpath("td//text()")
rating = [
val.replace("→", "->").replace("$", "") for val in rating if val != "\n"
]
rating[0] = datetime.strptime(rating[0], "%b-%d-%y").strftime("%Y-%m-%d")
data = {
"date": rating[0],
"category": rating[1],
"analyst": rating[2],
"rating": rating[3],
}
if len(rating) == 5:
if "->" in rating[4]:
rating.extend(rating[4].replace(" ", "").split("->"))
del rating[4]
data["target_from"] = float(rating[4])
data["target_to"] = float(rating[5])
else:
data["target"] = float(rating[4])
analyst_price_targets.append(data)
except Exception:
pass
return analyst_price_targets[:last_ratings] | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/due_diligence/finviz_model.py | 0.801354 | 0.25034 | finviz_model.py | pypi |
__docformat__ = "numpy"
import json
import logging
import re
from typing import Tuple
import pandas as pd
import requests
from bs4 import BeautifulSoup
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_price_target_from_analysts(symbol: str) -> pd.DataFrame:
"""Get analysts' price targets for a given stock. [Source: Business Insider]
Parameters
----------
symbol : str
Ticker symbol
Returns
-------
pd.DataFrame
Analysts data
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.dd.pt(symbol="AAPL")
"""
url_market_business_insider = (
f"https://markets.businessinsider.com/stocks/{symbol.lower()}-stock"
)
text_soup_market_business_insider = BeautifulSoup(
requests.get(
url_market_business_insider, headers={"User-Agent": get_user_agent()}
).text,
"lxml",
)
d_analyst_data = None
for script in text_soup_market_business_insider.find_all("script"):
# Get Analyst data
if "window.analyseChartConfigs.push" in str(script):
# Extract config data:
s_analyst_data = str(script).split("config: ", 1)[1].split(",\r\n", 1)[0]
d_analyst_data = json.loads(s_analyst_data.split(",\n")[0])
break
try:
df_analyst_data = pd.DataFrame.from_dict(d_analyst_data["Markers"]) # type: ignore
except TypeError:
return pd.DataFrame()
df_analyst_data = df_analyst_data[
["DateLabel", "Company", "InternalRating", "PriceTarget"]
]
df_analyst_data.columns = ["Date", "Company", "Rating", "Price Target"]
# df_analyst_data
df_analyst_data["Rating"].replace(
{"gut": "BUY", "neutral": "HOLD", "schlecht": "SELL"}, inplace=True
)
df_analyst_data["Date"] = pd.to_datetime(df_analyst_data["Date"])
df_analyst_data = df_analyst_data.set_index("Date")
return df_analyst_data
@log_start_end(log=logger)
def get_estimates(symbol: str) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""Get analysts' estimates for a given ticker. [Source: Business Insider]
Parameters
----------
symbol : str
Ticker to get analysts' estimates
Returns
-------
df_year_estimates : pd.DataFrame
Year estimates
df_quarter_earnings : pd.DataFrame
Quarter earnings estimates
df_quarter_revenues : pd.DataFrame
Quarter revenues estimates
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]
Year estimates, quarter earnings estimates, quarter revenues estimates
"""
url_market_business_insider = (
f"https://markets.businessinsider.com/stocks/{symbol.lower()}-stock"
)
text_soup_market_business_insider = BeautifulSoup(
requests.get(
url_market_business_insider, headers={"User-Agent": get_user_agent()}
).text,
"lxml",
)
l_estimates_year_header = list()
l_estimates_quarter_header = list()
for estimates_header in text_soup_market_business_insider.findAll(
"th", {"class": "table__th text-right"}
):
s_estimates_header = estimates_header.text.strip()
if s_estimates_header.isdigit():
l_estimates_year_header.append(s_estimates_header)
elif ("in %" not in s_estimates_header) and ("Job" not in s_estimates_header):
l_estimates_quarter_header.append(s_estimates_header)
l_estimates_year_metric = list()
for estimates_year_metric in text_soup_market_business_insider.findAll(
"td", {"class": "table__td black"}
):
l_estimates_year_metric.append(estimates_year_metric.text)
l_estimates_quarter_metric = list()
for estimates_quarter_metric in text_soup_market_business_insider.findAll(
"td", {"class": "table__td font-color-dim-gray"}
):
l_estimates_quarter_metric.append(estimates_quarter_metric.text)
d_metric_year = dict()
d_metric_quarter_earnings = dict()
d_metric_quarter_revenues = dict()
l_metrics = list()
n_metrics = 0
b_year = True
for idx, metric_value in enumerate(
text_soup_market_business_insider.findAll(
"td", {"class": "table__td text-right"}
)
):
if b_year:
# YEAR metrics
l_metrics.append(metric_value.text.strip())
# Check if we have processed all year metrics
if n_metrics > len(l_estimates_year_metric) - 1:
b_year = False
n_metrics = 0
l_metrics = list()
idx_y = idx
# Add value to dictionary
if (idx + 1) % len(l_estimates_year_header) == 0:
d_metric_year[l_estimates_year_metric[n_metrics]] = l_metrics
l_metrics = list()
n_metrics += 1
if not b_year:
# QUARTER metrics
l_metrics.append(metric_value.text.strip())
# Check if we have processed all quarter metrics
if n_metrics > len(l_estimates_quarter_metric) - 1:
break
# Add value to dictionary
if (idx - idx_y + 1) % len(l_estimates_quarter_header) == 0:
if n_metrics < 4:
d_metric_quarter_earnings[
l_estimates_quarter_metric[n_metrics]
] = l_metrics
else:
d_metric_quarter_revenues[
l_estimates_quarter_metric[n_metrics - 4]
] = l_metrics
l_metrics = list()
n_metrics += 1
df_year_estimates = pd.DataFrame.from_dict(
d_metric_year, orient="index", columns=l_estimates_year_header
)
df_year_estimates.index.name = "YEARLY ESTIMATES"
df_quarter_earnings = pd.DataFrame.from_dict(
d_metric_quarter_earnings,
orient="index",
columns=l_estimates_quarter_header,
)
# df_quarter_earnings.index.name = 'Earnings'
df_quarter_revenues = pd.DataFrame.from_dict(
d_metric_quarter_revenues,
orient="index",
columns=l_estimates_quarter_header,
)
# df_quarter_revenues.index.name = 'Revenues'
if not df_quarter_earnings.empty:
l_quarter = list()
l_date = list()
for quarter_title in df_quarter_earnings.columns:
l_quarter.append(re.split(" ending", quarter_title)[0])
if len(re.split(" ending", quarter_title)) == 2:
l_date.append(
"ending " + re.split(" ending", quarter_title)[1].strip()
)
else:
l_date.append("-")
df_quarter_earnings.index.name = "QUARTER EARNINGS ESTIMATES"
df_quarter_earnings.columns = l_quarter
df_quarter_earnings.loc["Date"] = l_date
df_quarter_earnings = df_quarter_earnings.reindex(
["Date", "No. of Analysts", "Average Estimate", "Year Ago", "Publish Date"]
)
if not df_quarter_revenues.empty:
df_quarter_revenues.index.name = "QUARTER REVENUES ESTIMATES"
df_quarter_revenues.columns = l_quarter
df_quarter_revenues.loc["Date"] = l_date
df_quarter_revenues = df_quarter_revenues.reindex(
["Date", "No. of Analysts", "Average Estimate", "Year Ago", "Publish Date"]
)
return df_year_estimates, df_quarter_earnings, df_quarter_revenues | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/due_diligence/business_insider_model.py | 0.77081 | 0.228684 | business_insider_model.py | pypi |
__docformat__ = "numpy"
import json
import logging
import pandas as pd
import requests
import yfinance as yf
from bs4 import BeautifulSoup
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_ark_trades_by_ticker(symbol: str) -> pd.DataFrame:
"""Gets a dataframe of ARK trades for ticker
Parameters
----------
symbol : str
Ticker to get trades for
Returns
-------
pd.DataFrame
DataFrame of trades
"""
url = f"https://cathiesark.com/ark-combined-holdings-of-{symbol}"
r = requests.get(url, headers={"User-Agent": get_user_agent()})
# Error in request
if r.status_code != 200:
return pd.DataFrame()
parsed_script = BeautifulSoup(r.text, "lxml").find(
"script", {"id": "__NEXT_DATA__"}
)
parsed_json = json.loads(parsed_script.string)
# Return empty dataframe if there is no "trades" data
if "trades" not in parsed_json["props"]["pageProps"]:
return pd.DataFrame()
df_orders = pd.json_normalize(parsed_json["props"]["pageProps"]["trades"])
# If trades found in dictionary, but the output is empty
if df_orders.empty:
return pd.DataFrame()
df_orders = df_orders.drop(columns=["hidden", "everything.profile.customThumbnail"])
df_orders["date"] = df_orders["date"].apply(lambda x: x.strip("Z"))
df_orders.rename(columns={"date": "Date"}, inplace=True)
# Get yfinance price to merge. Use Close which assumes purchased throughout day
prices = yf.download(
symbol,
end=df_orders.Date.iloc[0],
start=df_orders.Date.iloc[-1],
progress=False,
)["Close"]
df_orders.set_index("Date", inplace=True)
df_orders.index = pd.DatetimeIndex(df_orders.index)
df_orders = df_orders.join(prices)
df_orders["Total"] = df_orders["Close"] * df_orders["shares"]
return df_orders.sort_index(ascending=False) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/due_diligence/ark_model.py | 0.652241 | 0.169819 | ark_model.py | pypi |
__docformat__ = "numpy"
import argparse
import logging
from typing import List
from pandas.core.frame import DataFrame
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal import feature_flags as obbff
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_BOTH_RAW_DATA_AND_FIGURES,
EXPORT_ONLY_RAW_DATA_ALLOWED,
check_positive,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import StockBaseController
from openbb_terminal.rich_config import console, MenuText
from openbb_terminal.stocks.due_diligence import (
ark_view,
business_insider_view,
csimarket_view,
finnhub_view,
finviz_view,
fmp_view,
marketwatch_view,
)
logger = logging.getLogger(__name__)
class DueDiligenceController(StockBaseController):
"""Due Diligence Controller class"""
CHOICES_COMMANDS = [
"load",
"sec",
"rating",
"pt",
"rot",
"est",
"analyst",
"supplier",
"customer",
"arktrades",
]
PATH = "/stocks/dd/"
ESTIMATE_CHOICES = ["annualrevenue", "annualearnings", "quarterearnings"]
CHOICES_GENERATION = True
def __init__(
self,
ticker: str,
start: str,
interval: str,
stock: DataFrame,
queue: List[str] = None,
):
"""Constructor"""
super().__init__(queue)
self.ticker = ticker
self.start = start
self.interval = interval
self.stock = stock
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
mt = MenuText("stocks/dd/", 90)
mt.add_cmd("load")
mt.add_raw("\n")
mt.add_param("_ticker", self.ticker.upper())
mt.add_raw("\n")
mt.add_cmd("analyst")
mt.add_cmd("rating")
mt.add_cmd("rot")
mt.add_cmd("pt")
mt.add_cmd("est")
mt.add_cmd("sec")
mt.add_cmd("supplier")
mt.add_cmd("customer")
mt.add_cmd("arktrades")
console.print(text=mt.menu_text, menu="Stocks - Due Diligence")
def custom_reset(self) -> List[str]:
"""Class specific component of reset command"""
if self.ticker:
return ["stocks", f"load {self.ticker}", "dd"]
return []
@log_start_end(log=logger)
def call_analyst(self, other_args: List[str]):
"""Process analyst command"""
parser = argparse.ArgumentParser(
add_help=False,
prog="analyst",
description="""
Print analyst prices and ratings of the company. The following fields are expected:
date, analyst, category, price from, price to, and rating. [Source: Finviz]
""",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
finviz_view.analyst(symbol=self.ticker, export=ns_parser.export)
@log_start_end(log=logger)
def call_pt(self, other_args: List[str]):
"""Process pt command"""
parser = argparse.ArgumentParser(
add_help=False,
prog="pt",
description="""Prints price target from analysts. [Source: Business Insider]""",
)
parser.add_argument(
"--raw",
action="store_true",
dest="raw",
help="Only output raw data",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_positive,
default=10,
help="Limit of latest price targets from analysts to print.",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
business_insider_view.price_target_from_analysts(
symbol=self.ticker,
data=self.stock,
start_date=self.start,
limit=ns_parser.limit,
raw=ns_parser.raw,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_est(self, other_args: List[str]):
"""Process est command"""
parser = argparse.ArgumentParser(
add_help=False,
prog="est",
description="""Yearly estimates and quarter earnings/revenues.
[Source: Business Insider]""",
)
parser.add_argument(
"-e",
"--estimate",
help="Estimates to get",
dest="estimate",
choices=self.ESTIMATE_CHOICES,
default="annualearnings",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
business_insider_view.estimates(
symbol=self.ticker,
estimate=ns_parser.estimate,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_rot(self, other_args: List[str]):
"""Process rot command"""
parser = argparse.ArgumentParser(
add_help=False,
prog="rot",
description="""
Rating over time (monthly). [Source: Finnhub]
""",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_positive,
default=10,
help="Limit of last months",
)
parser.add_argument(
"--raw",
action="store_true",
dest="raw",
help="Only output raw data",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
finnhub_view.rating_over_time(
symbol=self.ticker,
limit=ns_parser.limit,
raw=ns_parser.raw,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_rating(self, other_args: List[str]):
"""Process rating command"""
parser = argparse.ArgumentParser(
add_help=False,
prog="rating",
description="""
Based on specific ratios, prints information whether the company
is a (strong) buy, neutral or a (strong) sell. The following fields are expected:
P/B, ROA, DCF, P/E, ROE, and D/E. [Source: Financial Modeling Prep]
""",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_positive,
default=10,
help="limit of last days to display ratings",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
fmp_view.rating(
symbol=self.ticker,
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_sec(self, other_args: List[str]):
"""Process sec command"""
parser = argparse.ArgumentParser(
add_help=False,
prog="sec",
description="""
Prints SEC filings of the company. The following fields are expected: Filing Date,
Document Date, Type, Category, Amended, and Link. [Source: Market Watch]
""",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit",
type=check_positive,
default=5,
help="number of latest SEC filings.",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
marketwatch_view.sec_filings(
symbol=self.ticker,
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_supplier(self, other_args: List[str]):
"""Process supplier command"""
parser = argparse.ArgumentParser(
prog="supplier",
add_help=False,
description="List of suppliers from ticker provided. [Source: CSIMarket]",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
csimarket_view.suppliers(
symbol=self.ticker,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_customer(self, other_args: List[str]):
"""Process customer command"""
parser = argparse.ArgumentParser(
prog="customer",
add_help=False,
description="List of customers from ticker provided. [Source: CSIMarket]",
)
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
csimarket_view.customers(
symbol=self.ticker,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_arktrades(self, other_args):
"""Process arktrades command"""
parser = argparse.ArgumentParser(
add_help=False,
prog="arktrades",
description="""
Get trades for ticker across all ARK funds.
""",
)
parser.add_argument(
"-l",
"--limit",
help="Limit of rows to show",
dest="limit",
default=10,
type=check_positive,
)
parser.add_argument(
"-s",
"--show_symbol",
action="store_true",
default=False,
help="Flag to show ticker in table",
dest="show_symbol",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
ark_view.display_ark_trades(
symbol=self.ticker,
limit=ns_parser.limit,
show_symbol=ns_parser.show_symbol,
export=ns_parser.export,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/due_diligence/dd_controller.py | 0.645008 | 0.203826 | dd_controller.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
import requests
from bs4 import BeautifulSoup
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
# pylint: disable=too-many-branches
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_sec_filings(symbol: str) -> pd.DataFrame:
"""Get SEC filings for a given stock ticker. [Source: Market Watch]
Parameters
----------
symbol : str
Stock ticker symbol
Returns
-------
df_financials : pd.DataFrame
SEC filings data
"""
pd.set_option("display.max_colwidth", None)
url_financials = (
f"https://www.marketwatch.com/investing/stock/{symbol}/financials/secfilings"
)
text_soup_financials = BeautifulSoup(
requests.get(url_financials, headers={"User-Agent": get_user_agent()}).text,
"lxml",
)
# a_financials_header = list()
df_financials = None
b_ready_to_process_info = False
soup_financials = text_soup_financials.findAll("tr", {"class": "table__row"})
for financials_info in soup_financials:
a_financials = financials_info.text.split("\n")
# If header has been processed and dataframe created ready to populate the SEC information
if b_ready_to_process_info:
if len(a_financials) > 1:
l_financials_info = [a_financials[2]]
l_financials_info.extend(a_financials[5:-1])
l_financials_info.append(financials_info.a["href"])
# Append data values to financials
df_financials.loc[len(df_financials.index)] = l_financials_info # type: ignore
if "Filing Date" in a_financials:
if len(a_financials) > 1:
l_financials_header = [a_financials[2]]
l_financials_header.extend(a_financials[5:-1])
l_financials_header.append("Link")
df_financials = pd.DataFrame(columns=l_financials_header)
df_financials.set_index("Filing Date")
b_ready_to_process_info = True
# Set Filing Date as index
df_financials = df_financials.set_index("Filing Date") # type: ignore
return df_financials | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/due_diligence/marketwatch_model.py | 0.570092 | 0.219442 | marketwatch_model.py | pypi |
__docformat__ = "numpy"
import os
import logging
import re
import pandas as pd
import requests
import numpy as np
import yfinance as yf
from bs4 import BeautifulSoup
import certifi
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
# Necessary only for installer to identify where SSL certs are
os.environ["REQUESTS_CA_BUNDLE"] = certifi.where()
os.environ["SSL_CERT_FILE"] = certifi.where()
@log_start_end(log=logger)
def get_cramer_daily(inverse: bool = True) -> pd.DataFrame:
"""Scrape the daily recommendations of Jim Cramer
Parameters
----------
inverse: bool
Whether to include inverse
Returns
-------
pd.DataFrame
Datafreme of daily Cramer recommendations
"""
link = (
"https://madmoney.thestreet.com/screener/index.cfm?showview=stocks&showrows=500"
)
r = requests.post(
link,
headers={
"Content-Type": "application/x-www-form-urlencoded",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
"AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.3 Safari/605.1.15",
},
)
if r.status_code != 200:
return pd.DataFrame()
soup = BeautifulSoup(r.text, "html.parser")
table = soup.find_all("table")[0]
trs = table.find_all("tr")
recs = {
"1": "Sell",
"2": "Sell",
"3": "Hold",
"4": "Buy",
"5": "Buy",
}
rec = [recs[tr.find_all("td")[3].find("img")["src"][-5]] for tr in trs[1:]]
df = pd.read_html(r.text)[0]
df["Symbol"] = df.Company.apply(lambda x: re.findall(r"[\w]+", x)[-1])
last_price = [
round(
yf.download(ticker, period="1d", interval="1h", progress=False)["Close"][
-1
],
2,
)
for ticker in df.Symbol
]
df["LastPrice"] = last_price
df["Price"] = df.Price.apply(lambda x: float(x.strip("$")))
df = df.drop(columns=["Segment", "Call", "Portfolio"])
df["Change (%)"] = 100 * np.round((df["LastPrice"] - df["Price"]) / df.LastPrice, 4)
df["Recommendation"] = rec
df["Company"] = df.apply(lambda x: x.Company.replace(f"({x.Symbol})", ""), axis=1)
cols = [
"Date",
"Company",
"Symbol",
"Price",
"LastPrice",
"Change (%)",
"Recommendation",
]
if inverse:
df["InverseCramer"] = df["Recommendation"].apply(
lambda x: ["Buy", "Sell"][x == "Buy"]
)
cols.append("InverseCramer")
return df[cols]
@log_start_end(log=logger)
def get_cramer_ticker(symbol: str) -> pd.DataFrame:
"""Get cramer recommendations from beginning of year for given ticker
Parameters
----------
symbol: str
Ticker to get recommendations for
Returns
-------
pd.DataFrame
Dataframe with dates and recommendations
"""
link = "https://raw.githubusercontent.com/jmaslek/InverseCramer/main/AllRecommendations.csv"
df = pd.read_csv(link, index_col=0)
df["Date"] = pd.to_datetime(df["Date"].apply(lambda x: x + "/2022"))
return df[df.Symbol == symbol].reset_index(drop=True) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/behavioural_analysis/cramer_model.py | 0.705075 | 0.228404 | cramer_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
import numpy as np
import pandas as pd
import yfinance as yf
from matplotlib import pyplot as plt
from openbb_terminal.stocks.behavioural_analysis import finnhub_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
is_valid_axes_count,
)
from openbb_terminal.decorators import check_api_key
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_FINNHUB_KEY"])
def display_stock_price_headlines_sentiment(
symbol: str,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display stock price and headlines sentiment using VADER model over time. [Source: Finnhub]
Parameters
----------
symbol : str
Ticker of company
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
sentiment = finnhub_model.get_headlines_sentiment(symbol)
if not sentiment.empty:
sentiment_data = [item for sublist in sentiment.values for item in sublist]
df_stock = yf.download(
symbol,
start=min(sentiment.index).to_pydatetime().date(),
interval="15m",
prepost=True,
progress=False,
)
if not df_stock.empty:
# This plot has 2 axes
if external_axes is None:
_, axes = plt.subplots(
figsize=plot_autoscale(),
dpi=PLOT_DPI,
nrows=2,
ncols=1,
sharex=True,
gridspec_kw={"height_ratios": [2, 1]},
)
(ax1, ax2) = axes
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
else:
return
ax1.set_title(f"Headlines sentiment and {symbol} price")
for uniquedate in np.unique(df_stock.index.date):
ax1.plot(
df_stock[df_stock.index.date == uniquedate].index,
df_stock[df_stock.index.date == uniquedate]["Adj Close"].values,
c="#FCED00",
)
ax1.set_ylabel("Stock Price")
theme.style_primary_axis(ax1)
theme.style_primary_axis(ax2)
ax2.plot(
sentiment.index,
pd.Series(sentiment_data)
.rolling(window=5, min_periods=1)
.mean()
.values,
c="#FCED00",
)
ax2.bar(
sentiment[sentiment.values >= 0].index,
[
item
for sublist in sentiment[sentiment.values >= 0].values
for item in sublist
],
color=theme.up_color,
width=0.01,
)
ax2.bar(
sentiment[sentiment.values < 0].index,
[
item
for sublist in sentiment[sentiment.values < 0].values
for item in sublist
],
color=theme.down_color,
width=0.01,
)
ax2.yaxis.set_label_position("right")
ax2.set_ylabel("Headline Sentiment")
if external_axes is None:
theme.visualize_output()
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "snews", sentiment
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/behavioural_analysis/finnhub_view.py | 0.83825 | 0.353038 | finnhub_view.py | pypi |
__docformat__ = "numpy"
import os
from typing import Optional, List
import logging
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
import yfinance
import openbb_terminal.config_plot as cfp
from openbb_terminal.config_terminal import theme
from openbb_terminal.helper_funcs import (
print_rich_table,
export_data,
plot_autoscale,
is_valid_axes_count,
)
from openbb_terminal.stocks.behavioural_analysis import cramer_model
from openbb_terminal.rich_config import console
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_cramer_daily(inverse: bool = True, export: str = ""):
"""Display Jim Cramer daily recommendations
Parameters
----------
inverse: bool
Include inverse recommendation
export: str
Format to export data
"""
recs = cramer_model.get_cramer_daily(inverse)
if recs.empty:
console.print("[red]Error getting request.\n[/red]")
return
date = recs.Date[0]
recs = recs.drop(columns=["Date"])
if datetime.today().strftime("%m-%d") != datetime.strptime(
date.replace("/", "-"), "%m-%d"
):
console.print(
"""
\n[yellow]Warning[/yellow]: We noticed Jim Crammer recommendation data has not been updated for a while, \
and we're investigating on finding a replacement.
""",
)
print_rich_table(recs, title=f"Jim Cramer Recommendations for {date}")
export_data(export, os.path.dirname(os.path.abspath(__file__)), "cramer", recs)
@log_start_end(log=logger)
def display_cramer_ticker(
symbol: str,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display ticker close with Cramer recommendations
Parameters
----------
symbol: str
Stock ticker
raw: bool
Display raw data
export: str
Format to export data
external_axes: Optional[List[plt.Axes]] = None,
External axes to plot on
"""
df = cramer_model.get_cramer_ticker(symbol)
if df.empty:
console.print(f"No recommendations found for {symbol}.\n")
return
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
close_prices = yfinance.download(symbol, start="2022-01-01", progress=False)[
"Adj Close"
]
ax.plot(close_prices)
color_map = {"Buy": theme.up_color, "Sell": theme.down_color}
for name, group in df.groupby("Recommendation"):
ax.scatter(group.Date, group.Price, color=color_map[name], s=150, label=name)
ax.set_title(f"{symbol.upper()} Close With Cramer Recommendations")
theme.style_primary_axis(ax)
ax.legend(loc="best", scatterpoints=1)
# Overwrite default dote formatting
ax.xaxis.set_major_formatter(DateFormatter("%m/%d"))
ax.set_xlabel("Date")
if external_axes is None:
theme.visualize_output()
if raw:
df["Date"] = df["Date"].apply(lambda x: x.strftime("%Y-%m-%d"))
print_rich_table(df, title=f"Jim Cramer Recommendations for {symbol}")
export_data(export, os.path.dirname(os.path.abspath(__file__)), df, "jctr") | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/behavioural_analysis/cramer_view.py | 0.837254 | 0.271062 | cramer_view.py | pypi |
__docformat__ = "numpy"
import logging
from datetime import datetime, timedelta
from typing import List, Dict, Optional
import finnhub
import pandas as pd
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.helper_funcs import similar
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_FINNHUB_KEY"])
def get_company_news(
symbol: str,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
) -> List[Dict]:
"""Get news from a company. [Source: Finnhub]
Parameters
----------
symbol : str
company ticker to look for news articles
start_date: Optional[str]
date to start searching articles, with format YYYY-MM-DD
end_date: Optional[str]
date to end searching articles, with format YYYY-MM-DD
Returns
-------
articles : List
term to search on the news articles
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=30)).strftime("%Y-%m-%d")
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
try:
finnhub_client = finnhub.Client(api_key=cfg.API_FINNHUB_KEY)
articles = finnhub_client.company_news(
symbol.upper(), _from=start_date, to=end_date
)
return articles
except Exception as e:
console.print(f"[red]{e}\n[/red]")
return [{}]
@log_start_end(log=logger)
def process_news_headlines_sentiment(
articles: List[Dict],
) -> pd.DataFrame:
"""Process news headlines sentiment of a company using a VADER model.
Parameters
----------
articles : List[Dict]
list of articles with `headline` and `datetime` keys
Returns
-------
pd.DataFrame
Headlines sentiment using VADER model over time
"""
l_datetime = list()
l_compound = list()
if articles and len(articles) > 1:
analyzer = SentimentIntensityAnalyzer()
last_headline = ""
for article in articles:
# allows to discard news with similar headline
if similar(last_headline, article["headline"].upper()) < 0.7:
l_compound.append(
analyzer.polarity_scores(article["headline"])["compound"]
)
l_datetime.append(datetime.fromtimestamp(article["datetime"]))
last_headline = article["headline"].upper()
return pd.DataFrame(l_compound, index=l_datetime).sort_index()
@log_start_end(log=logger)
def get_headlines_sentiment(
symbol: str,
) -> pd.DataFrame:
"""Get headlines sentiment using VADER model over time. [Source: Finnhub]
Parameters
----------
symbol : str
Ticker of company
Returns
----------
pd.DataFrame
The news article information
"""
start = datetime.now() - timedelta(days=30)
if not symbol:
console.print("[red]Do not run this command without setting a ticker.[/red]\n")
return pd.DataFrame()
articles = get_company_news(
symbol.upper(),
start_date=start.strftime("%Y-%m-%d"),
end_date=datetime.now().strftime("%Y-%m-%d"),
)
sentiment = process_news_headlines_sentiment(articles)
return sentiment | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/behavioural_analysis/finnhub_model.py | 0.882719 | 0.201951 | finnhub_model.py | pypi |
__docformat__ = "numpy"
import logging
import warnings
import bt
import pandas as pd
import pandas_ta as ta
import yfinance as yf
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import is_intraday
from openbb_terminal.common.technical_analysis import ta_helpers
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_data(symbol: str, start_date: str = "2019-01-01") -> pd.DataFrame:
"""Function to replace bt.get, gets Adjusted close of symbol using yfinance.
Parameters
----------
symbol: str
Ticker to get data for
start_date: str
Start date in YYYY-MM-DD format
Returns
-------
prices: pd.DataFrame
Dataframe of Adj Close with columns = [ticker]
"""
data = yf.download(symbol, start=start_date, progress=False)
close_col = ta_helpers.check_columns(data, high=False, low=False)
if close_col is None:
return pd.DataFrame()
prices = pd.DataFrame(data[close_col])
prices.columns = [symbol]
return prices
@log_start_end(log=logger)
def buy_and_hold(symbol: str, start_date: str, name: str = "") -> bt.Backtest:
"""Generates a buy and hold backtest object for the given ticker.
Parameters
----------
symbol: str
Stock to test
start_date: str
Backtest start date, in YYYY-MM-DD format. Can be either string or datetime
name: str
Name of the backtest (for labeling purposes)
Returns
-------
bt.Backtest
Backtest object for buy and hold strategy
"""
prices = get_data(symbol, start_date)
bt_strategy = bt.Strategy(
name,
[
bt.algos.RunOnce(),
bt.algos.SelectAll(),
bt.algos.WeighEqually(),
bt.algos.Rebalance(),
],
)
return bt.Backtest(bt_strategy, prices)
@log_start_end(log=logger)
def ema_strategy(
symbol: str,
data: pd.DataFrame,
ema_length: int = 20,
spy_bt: bool = True,
no_bench: bool = False,
) -> bt.backtest.Result:
"""Perform backtest for simple EMA strategy. Buys when price>EMA(l).
Parameters
----------
symbol: str
Stock ticker
data: pd.DataFrame
Dataframe of prices
ema_length: int
Length of ema window
spy_bt: bool
Boolean to add spy comparison
no_bench: bool
Boolean to not show buy and hold comparison
Returns
-------
bt.backtest.Result
Backtest results
"""
# TODO: Help Wanted!
# Implement support for backtesting on intraday data
if is_intraday(data):
return None
data.index = pd.to_datetime(data.index.date)
symbol = symbol.lower()
ema = pd.DataFrame()
start_date = data.index[0]
close_col = ta_helpers.check_columns(data, high=False, low=False)
if close_col is None:
return bt.backtest.Result()
prices = pd.DataFrame(data[close_col])
prices.columns = [symbol]
ema[symbol] = ta.ema(prices[symbol], ema_length)
bt_strategy = bt.Strategy(
"AboveEMA",
[
bt.algos.SelectWhere(prices >= ema),
bt.algos.WeighEqually(),
bt.algos.Rebalance(),
],
)
bt_backtest = bt.Backtest(bt_strategy, prices)
backtests = [bt_backtest]
if spy_bt:
spy_bt = buy_and_hold("spy", start_date, "SPY Hold")
backtests.append(spy_bt)
if not no_bench:
stock_bt = buy_and_hold(symbol, start_date, symbol.upper() + " Hold")
backtests.append(stock_bt)
res = bt.run(*backtests)
return res
@log_start_end(log=logger)
def emacross_strategy(
symbol: str,
data: pd.DataFrame,
short_length: int = 20,
long_length: int = 50,
spy_bt: bool = True,
no_bench: bool = False,
shortable: bool = True,
) -> bt.backtest.Result:
"""Perform backtest for simple EMA strategy. Buys when price>EMA(l).
Parameters
----------
symbol : str
Stock ticker
data : pd.DataFrame
Dataframe of prices
short_length : int
Length of short ema window
long_length : int
Length of long ema window
spy_bt : bool
Boolean to add spy comparison
no_bench : bool
Boolean to not show buy and hold comparison
shortable : bool
Boolean to allow for selling of the stock at cross
Returns
-------
Result
Backtest results
"""
symbol = symbol.lower()
start_date = data.index[0]
close_col = ta_helpers.check_columns(data, low=False, high=False)
if close_col is None:
return bt.backtest.Result()
prices = pd.DataFrame(data[close_col])
prices.columns = [symbol]
short_ema = pd.DataFrame(ta.ema(prices[symbol], short_length))
short_ema.columns = [symbol]
long_ema = pd.DataFrame(ta.ema(prices[symbol], long_length))
long_ema.columns = [symbol]
# signals
signals = long_ema.copy()
signals[short_ema > long_ema] = 1.0
signals[short_ema <= long_ema] = -1.0 * shortable
signals[long_ema.isnull()] = 0.0
combined_data = bt.merge(signals, prices, short_ema, long_ema)
combined_data.columns = ["signal", "price", "ema_short", "ema_long"]
bt_strategy = bt.Strategy(
"EMACross",
[
bt.algos.WeighTarget(signals),
bt.algos.Rebalance(),
],
)
bt_backtest = bt.Backtest(bt_strategy, prices)
backtests = [bt_backtest]
if spy_bt:
spy_bt = buy_and_hold("spy", start_date, "SPY Hold")
backtests.append(spy_bt)
if not no_bench:
stock_bt = buy_and_hold(symbol, start_date, symbol.upper() + " Hold")
backtests.append(stock_bt)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
res = bt.run(*backtests)
return res
@log_start_end(log=logger)
def rsi_strategy(
symbol: str,
data: pd.DataFrame,
periods: int = 14,
low_rsi: int = 30,
high_rsi: int = 70,
spy_bt: bool = True,
no_bench: bool = False,
shortable: bool = True,
) -> bt.backtest.Result:
"""Perform backtest for simple EMA strategy. Buys when price>EMA(l).
Parameters
----------
symbol : str
Stock ticker
data : pd.DataFrame
Dataframe of prices
periods : int
Number of periods for RSI calculation
low_rsi : int
Low RSI value to buy
high_rsi : int
High RSI value to sell
spy_bt : bool
Boolean to add spy comparison
no_bench : bool
Boolean to not show buy and hold comparison
shortable : bool
Flag to disable the ability to short sell
Returns
-------
Result
Backtest results
"""
symbol = symbol.lower()
start_date = data.index[0]
close_col = ta_helpers.check_columns(data, high=False, low=False)
if close_col is None:
return pd.DataFrame()
prices = pd.DataFrame(data[close_col])
prices.columns = [symbol]
rsi = pd.DataFrame(ta.rsi(prices[symbol], periods))
rsi.columns = [symbol]
signal = 0 * rsi.copy()
signal[rsi > high_rsi] = -1 * shortable
signal[rsi < low_rsi] = 1
signal[rsi.isnull()] = 0
merged_data = bt.merge(signal, prices)
merged_data.columns = ["signal", "price"]
warnings.simplefilter(action="ignore", category=FutureWarning)
bt_strategy = bt.Strategy(
"RSI Reversion", [bt.algos.WeighTarget(signal), bt.algos.Rebalance()]
)
bt_backtest = bt.Backtest(bt_strategy, prices)
bt_backtest = bt.Backtest(bt_strategy, prices)
backtests = [bt_backtest]
# Once the bt package replaces pd iteritems with items we can remove this
with warnings.catch_warnings():
if spy_bt:
spy_bt = buy_and_hold("spy", start_date, "SPY Hold")
backtests.append(spy_bt)
if not no_bench:
stock_bt = buy_and_hold(symbol, start_date, symbol.upper() + " Hold")
backtests.append(stock_bt)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
res = bt.run(*backtests)
return res | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/backtesting/bt_model.py | 0.628521 | 0.318641 | bt_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from datetime import datetime
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import yfinance as yf
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
is_intraday,
plot_autoscale,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.backtesting import bt_model
logger = logging.getLogger(__name__)
register_matplotlib_converters()
np.seterr(divide="ignore")
@log_start_end(log=logger)
def display_whatif_scenario(
symbol: str,
date_shares_acquired: Optional[datetime] = None,
num_shares_acquired: float = 1,
):
"""Display what if scenario
Parameters
----------
symbol: str
Ticker to check what if scenario
date_shares_acquired: str
Date at which the shares were acquired
num_shares_acquired: float
Number of shares acquired
"""
data = yf.download(symbol, progress=False)
if not data.empty:
data = data["Adj Close"]
ipo_date = data.index[0]
last_date = data.index[-1]
if not date_shares_acquired:
date_shares_ac = ipo_date
console.print("IPO date selected by default.")
else:
date_shares_ac = date_shares_acquired
if date_shares_ac > last_date:
console.print("The date selected is in the future. Select a valid date.", "\n")
return
if date_shares_ac < ipo_date:
console.print(
f"{symbol} had not IPO at that date. Thus, changing the date to IPO on the {ipo_date.strftime('%Y-%m-%d')}",
"\n",
)
date_shares_ac = ipo_date
initial_shares_value = (
data[data.index > date_shares_ac].values[0] * num_shares_acquired
)
if (num_shares_acquired - int(num_shares_acquired)) > 0:
nshares = round(num_shares_acquired, 2)
else:
nshares = round(num_shares_acquired)
shares = "share"
these = "This"
if nshares > 1:
shares += "s"
these = "These"
console.print(
f"If you had acquired {nshares} {shares} of {symbol} on "
f"{date_shares_ac.strftime('%Y-%m-%d')} with a cost of {initial_shares_value:.2f}."
)
current_shares_value = (
data[data.index > date_shares_ac].values[-1] * num_shares_acquired
)
if current_shares_value > initial_shares_value:
pct = 100 * (
(current_shares_value - initial_shares_value) / initial_shares_value
)
console.print(
f"{these} would be worth {current_shares_value:.2f}. Which represents an increase of {pct:.2f}%.",
"\n",
)
else:
pct = 100 * (
(initial_shares_value - current_shares_value) / initial_shares_value
)
console.print(
f"{these} would be worth {current_shares_value:.2f}. Which represents an decrease of {pct:.2f}%.",
"\n",
)
@log_start_end(log=logger)
def display_simple_ema(
symbol: str,
data: pd.DataFrame,
ema_length: int = 20,
spy_bt: bool = True,
no_bench: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Strategy where stock is bought when Price > EMA(l)
Parameters
----------
symbol : str
Stock ticker
data : pd.Dataframe
Dataframe of prices
ema_length : int
Length of ema window
spy_bt : bool
Boolean to add spy comparison
no_bench : bool
Boolean to not show buy and hold comparison
export : bool
Format to export backtest results
external_axes : Optional[List[plt.Axes]], optional
External axes (3 axes are expected in the list), by default None
"""
# TODO: Help Wanted!
# Implement support for backtesting on intraday data
if is_intraday(data):
console.print("Backtesting on intraday data is not yet supported.")
console.print("Submit a feature request to let us know that you need it here:")
console.print("https://openbb.co/request-a-feature")
return
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
res = bt_model.ema_strategy(symbol, data, ema_length, spy_bt, no_bench)
res.plot(title=f"Equity for EMA({ema_length})", ax=ax)
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
console.print(res.display(), "\n")
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "simple_ema", res.stats
)
return
@log_start_end(log=logger)
def display_emacross(
symbol: str,
data: pd.DataFrame,
short_ema: int = 20,
long_ema: int = 50,
spy_bt: bool = True,
no_bench: bool = False,
shortable: bool = True,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
): # pylint: disable=R0913
"""Strategy where we go long/short when EMA(short) is greater than/less than EMA(short)
Parameters
----------
symbol : str
Stock ticker
data : pd.Dataframe
Dataframe of prices
short_ema : int
Length of short ema window
long_ema : int
Length of long ema window
spy_bt : bool
Boolean to add spy comparison
no_bench : bool
Boolean to not show buy and hold comparison
shortable : bool
Boolean to allow for selling of the stock at cross
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
# TODO: Help Wanted!
# Implement support for backtesting on intraday data
if is_intraday(data):
console.print("Backtesting on intraday data is not yet supported.")
console.print("Submit a feature request to let us know that you need it here:")
console.print("https://openbb.co/request-a-feature")
return
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
res = bt_model.emacross_strategy(
symbol, data, short_ema, long_ema, spy_bt, no_bench, shortable
)
res.plot(title=f"EMA Cross for EMA({short_ema})/EMA({long_ema})", ax=ax)
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "emacross", res.stats
)
return
# pylint:disable=too-many-arguments
@log_start_end(log=logger)
def display_rsi_strategy(
symbol: str,
data: pd.DataFrame,
periods: int = 14,
low_rsi: int = 30,
high_rsi: int = 70,
spy_bt: bool = True,
no_bench: bool = False,
shortable: bool = True,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Strategy that buys when the stock is less than a threshold and shorts when it exceeds a threshold.
Parameters
----------
symbol : str
Stock ticker
data : pd.Dataframe
Dataframe of prices
periods : int
Number of periods for RSI calculation
low_rsi : int
Low RSI value to buy
high_rsi : int
High RSI value to sell
spy_bt : bool
Boolean to add spy comparison
no_bench : bool
Boolean to not show buy and hold comparison
shortable : bool
Boolean to allow for selling of the stock at cross
export : str
Format to export backtest results
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
# TODO: Help Wanted!
# Implement support for backtesting on intraday data
if is_intraday(data):
console.print("Backtesting on intraday data is not yet supported.")
console.print("Submit a feature request to let us know that you need it here:")
console.print("https://openbb.co/request-a-feature")
return
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
res = bt_model.rsi_strategy(
symbol, data, periods, low_rsi, high_rsi, spy_bt, no_bench, shortable
)
res.plot(title=f"RSI Strategy between ({low_rsi}, {high_rsi})", ax=ax)
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "rsi_corss", res.stats
)
return | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/backtesting/bt_view.py | 0.727879 | 0.276015 | bt_view.py | pypi |
__docformat__ = "numpy"
import difflib
import logging
import os
from typing import List
import pandas as pd
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
print_rich_table,
lambda_long_number_format,
)
from openbb_terminal.terminal_helper import suppress_stdout
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.screener.finviz_model import get_screener_data
logger = logging.getLogger(__name__)
d_cols_to_sort = {
"overview": [
"Ticker",
"Company",
"Sector",
"Industry",
"Country",
"Market Cap",
"P/E",
"Price",
"Change",
"Volume",
],
"valuation": [
"Ticker",
"Market Cap",
"P/E",
"Fwd P/E",
"PEG",
"P/S",
"P/B",
"P/C",
"P/FCF",
"EPS this Y",
"EPS next Y",
"EPS past 5Y",
"EPS next 5Y",
"Sales past 5Y",
"Price",
"Change",
"Volume",
],
"financial": [
"Ticker",
"Market Cap",
"Dividend",
"ROA",
"ROE",
"ROI",
"Curr R",
"Quick R",
"LTDebt/Eq",
"Debt/Eq",
"Gross M",
"Oper M",
"Profit M",
"Earnings",
"Price",
"Change",
"Volume",
],
"ownership": [
"Ticker",
"Market Cap",
"Outstanding",
"Float",
"Insider Own",
"Insider Trans",
"Inst Own",
"Inst Trans",
"Float Short",
"Short Ratio",
"Avg Volume",
"Price",
"Change",
"Volume",
],
"performance": [
"Ticker",
"Perf Week",
"Perf Month",
"Perf Quart",
"Perf Half",
"Perf Year",
"Perf YTD",
"Volatility W",
"Volatility M",
"Recom",
"Avg Volume",
"Rel Volume",
"Price",
"Change",
"Volume",
],
"technical": [
"Ticker",
"Beta",
"ATR",
"SMA20",
"SMA50",
"SMA200",
"52W High",
"52W Low",
"RSI",
"Price",
"Change",
"from Open",
"Gap",
"Volume",
],
}
@log_start_end(log=logger)
def screener(
loaded_preset: str = "top_gainers",
data_type: str = "overview",
limit: int = 10,
ascend: bool = False,
sortby: str = "",
export: str = "",
) -> List[str]:
"""Screener one of the following: overview, valuation, financial, ownership, performance, technical.
Parameters
----------
loaded_preset: str
Preset loaded to filter for tickers
data_type : str
Data type string between: overview, valuation, financial, ownership, performance, technical
limit : int
Limit of stocks to display
ascend : bool
Order of table to ascend or descend
sortby: str
Column to sort table by
export : str
Export dataframe data to csv,json,xlsx file
Returns
-------
List[str]
List of stocks that meet preset criteria
"""
with suppress_stdout():
df_screen = get_screener_data(
preset_loaded=loaded_preset,
data_type=data_type,
limit=10,
ascend=ascend,
)
if isinstance(df_screen, pd.DataFrame):
if df_screen.empty:
return []
df_screen = df_screen.dropna(axis="columns", how="all")
if sortby:
if sortby in d_cols_to_sort[data_type]:
df_screen = df_screen.sort_values(
by=[sortby],
ascending=ascend,
na_position="last",
)
else:
similar_cmd = difflib.get_close_matches(
sortby,
d_cols_to_sort[data_type],
n=1,
cutoff=0.7,
)
if similar_cmd:
console.print(
f"Replacing '{' '.join(sortby)}' by '{similar_cmd[0]}' so table can be sorted."
)
df_screen = df_screen.sort_values(
by=[similar_cmd[0]],
ascending=ascend,
na_position="last",
)
else:
console.print(
f"Wrong sort column provided! Provide one of these: {', '.join(d_cols_to_sort[data_type])}"
)
df_screen = df_screen.fillna("")
if data_type == "ownership":
cols = ["Market Cap", "Outstanding", "Float", "Avg Volume", "Volume"]
df_screen[cols] = df_screen[cols].applymap(
lambda x: lambda_long_number_format(x, 1)
)
elif data_type == "overview":
cols = ["Market Cap", "Volume"]
df_screen[cols] = df_screen[cols].applymap(
lambda x: lambda_long_number_format(x, 1)
)
elif data_type == "technical":
cols = ["Volume"]
df_screen[cols] = df_screen[cols].applymap(
lambda x: lambda_long_number_format(x, 1)
)
elif data_type == "valuation":
cols = ["Market Cap", "Volume"]
df_screen[cols] = df_screen[cols].applymap(
lambda x: lambda_long_number_format(x, 1)
)
elif data_type == "financial":
cols = ["Market Cap", "Volume"]
df_screen[cols] = df_screen[cols].applymap(
lambda x: lambda_long_number_format(x, 1)
)
elif data_type == "performance":
cols = ["Avg Volume", "Volume"]
df_screen[cols] = df_screen[cols].applymap(
lambda x: lambda_long_number_format(x, 1)
)
elif data_type == "technical":
cols = ["Volume"]
df_screen[cols] = df_screen[cols].applymap(
lambda x: lambda_long_number_format(x, 1)
)
print_rich_table(
df_screen.head(n=limit),
headers=list(df_screen.columns),
show_index=False,
title="Finviz Screener",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
data_type,
df_screen,
)
return list(df_screen.head(n=limit)["Ticker"].values)
console.print(
"The preset selected did not return a sufficient number of tickers. Two or more tickers are needed."
)
return [] | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/screener/finviz_view.py | 0.705582 | 0.295408 | finviz_view.py | pypi |
import configparser
import logging
from pathlib import Path
import pandas as pd
from finvizfinance.screener import (
financial,
overview,
ownership,
performance,
technical,
valuation,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.core.config.paths import USER_PRESETS_DIRECTORY
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
PRESETS_PATH = USER_PRESETS_DIRECTORY / "stocks" / "screener"
PRESETS_PATH_DEFAULT = Path(__file__).parent / "presets"
preset_choices = {
filepath.name: filepath
for filepath in PRESETS_PATH.iterdir()
if filepath.suffix == ".ini"
}
preset_choices.update(
{
filepath.name: filepath
for filepath in PRESETS_PATH_DEFAULT.iterdir()
if filepath.suffix == ".ini"
}
)
# pylint: disable=C0302
d_signals = {
"top_gainers": "Top Gainers",
"top_losers": "Top Losers",
"new_high": "New High",
"new_low": "New Low",
"most_volatile": "Most Volatile",
"most_active": "Most Active",
"unusual_volume": "Unusual Volume",
"overbought": "Overbought",
"oversold": "Oversold",
"downgrades": "Downgrades",
"upgrades": "Upgrades",
"earnings_before": "Earnings Before",
"earnings_after": "Earnings After",
"recent_insider_buying": "Recent Insider Buying",
"recent_insider_selling": "Recent Insider Selling",
"major_news": "Major News",
"horizontal_sr": "Horizontal S/R",
"tl_resistance": "TL Resistance",
"tl_support": "TL Support",
"wedge_up": "Wedge Up",
"wedge_down": "Wedge Down",
"wedge": "Wedge",
"triangle_ascending": "Triangle Ascending",
"triangle_descending": "Triangle Descending",
"channel_up": "Channel Up",
"channel_down": "Channel Down",
"channel": "Channel",
"double_top": "Double Top",
"double_bottom": "Double Bottom",
"multiple_top": "Multiple Top",
"multiple_bottom": "Multiple Bottom",
"head_shoulders": "Head & Shoulders",
"head_shoulders_inverse": "Head & Shoulders Inverse",
}
@log_start_end(log=logger)
def get_screener_data(
preset_loaded: str = "top_gainers",
data_type: str = "overview",
limit: int = 10,
ascend: bool = False,
):
"""Screener Overview
Parameters
----------
preset_loaded : str
Loaded preset filter
data_type : str
Data type between: overview, valuation, financial, ownership, performance, technical
limit : int
Limit of stocks filtered with presets to print
ascend : bool
Ascended order of stocks filtered to print
Returns
-------
pd.DataFrame
Dataframe with loaded filtered stocks
"""
if data_type == "overview":
screen = overview.Overview()
elif data_type == "valuation":
screen = valuation.Valuation()
elif data_type == "financial":
screen = financial.Financial()
elif data_type == "ownership":
screen = ownership.Ownership()
elif data_type == "performance":
screen = performance.Performance()
elif data_type == "technical":
screen = technical.Technical()
else:
console.print("Invalid selected screener type")
return pd.DataFrame()
if preset_loaded in d_signals:
screen.set_filter(signal=d_signals[preset_loaded])
try:
if limit > 0:
df_screen = screen.screener_view(limit=limit, ascend=ascend)
else:
df_screen = screen.screener_view(ascend=ascend)
except IndexError:
console.print("[red]Invalid data provided by the website[/red]\n")
return pd.DataFrame()
except AttributeError:
console.print("[red]Invalid data provided by the website[/red]\n")
return pd.DataFrame()
else:
preset_filter = configparser.RawConfigParser()
preset_filter.optionxform = str # type: ignore
preset_filter.read(preset_choices[preset_loaded])
d_general = preset_filter["General"]
d_filters = {
**preset_filter["Descriptive"],
**preset_filter["Fundamental"],
**preset_filter["Technical"],
}
for section in ["General", "Descriptive", "Fundamental", "Technical"]:
for key, val in {**preset_filter[section]}.items():
if key not in d_check_screener:
console.print(
f"The screener variable {section}.{key} shouldn't exist!\n"
)
return pd.DataFrame()
if val not in d_check_screener[key]:
console.print(
f"Invalid [{section}] {key}={val}. "
f"Choose one of the following options:\n{', '.join(d_check_screener[key])}.\n"
)
return pd.DataFrame()
d_filters = {k: v for k, v in d_filters.items() if v}
screen.set_filter(filters_dict=d_filters)
if "Order" in d_general:
if limit > 0:
df_screen = screen.screener_view(
order=d_general["Order"],
limit=limit,
ascend=ascend,
)
else:
df_screen = screen.screener_view(
order=d_general["Order"], ascend=ascend
)
else:
if limit > 0:
df_screen = screen.screener_view(limit=limit, ascend=ascend)
else:
df_screen = screen.screener_view(ascend=ascend)
return df_screen
d_signals_desc = {
"top_gainers": "stocks with the highest %% price gain today",
"top_losers": "stocks with the highest %% price loss today",
"new_high": "stocks making 52-week high today",
"new_low": "stocks making 52-week low today",
"most_volatile": "stocks with the highest widest high/low trading range today",
"most_active": "stocks with the highest trading volume today",
"unusual_volume": "stocks with unusually high volume today - the highest relative volume ratio",
"overbought": "stock is becoming overvalued and may experience a pullback.",
"oversold": "oversold stocks may represent a buying opportunity for investors",
"downgrades": "stocks downgraded by analysts today",
"upgrades": "stocks upgraded by analysts today",
"earnings_before": "companies reporting earnings today, before market open",
"earnings_after": "companies reporting earnings today, after market close",
"recent_insider_buying": "stocks with recent insider buying activity",
"recent_insider_selling": "stocks with recent insider selling activity",
"major_news": "stocks with the highest news coverage today",
"horizontal_sr": "horizontal channel of price range between support and resistance trendlines",
"tl_resistance": "once a rising trendline is broken",
"tl_support": "once a falling trendline is broken",
"wedge_up": "upward trendline support and upward trendline resistance (reversal)",
"wedge_down": "downward trendline support and downward trendline resistance (reversal)",
"wedge": "upward trendline support, downward trendline resistance (contiunation)",
"triangle_ascending": "upward trendline support and horizontal trendline resistance",
"triangle_descending": "horizontal trendline support and downward trendline resistance",
"channel_up": "both support and resistance trendlines slope upward",
"channel_down": "both support and resistance trendlines slope downward",
"channel": "both support and resistance trendlines are horizontal",
"double_top": "stock with 'M' shape that indicates a bearish reversal in trend",
"double_bottom": "stock with 'W' shape that indicates a bullish reversal in trend",
"multiple_top": "same as double_top hitting more highs",
"multiple_bottom": "same as double_bottom hitting more lows",
"head_shoulders": "chart formation that predicts a bullish-to-bearish trend reversal",
"head_shoulders_inverse": "chart formation that predicts a bearish-to-bullish trend reversal",
}
d_check_screener = {
"Order": [
"Any",
"Signal",
"Ticker",
"Company",
"Sector",
"Industry",
"Country",
"Market Cap.",
"Price/Earnings",
"Forward Price/Earnings",
"PEG (Price/Earnings/Growth)",
"Price/Sales",
"Price/Book",
"Price/Cash",
"Price/Free Cash Flow",
"Dividend Yield",
"Payout Ratio",
"EPS(ttm)",
"EPS growth this year",
"EPS growth next year",
"EPS growth past 5 years",
"EPS growth next 5 years",
"Sales growth past 5 years",
"EPS growth qtr over qtr",
"Sales growth qtr over qtr",
"Shares Outstanding",
"Shares Float",
"Insider Ownership",
"Insider Transactions",
"Institutional Ownership",
"Institutional Transactions",
"Short Interest Share",
"Short Interest Ratio",
"Earnings Date",
"Return on Assets",
"Return on Equity",
"Return on Investment",
"Current Ratio",
"Quick Ratio",
"LT Debt/Equity",
"Total Debt/Equity",
"Gross Margin",
"Operating Margin",
"Net Profit Margin",
"Analyst Recommendation",
"Performance (Week)",
"Performance (Month)",
"Performance (Quarter)",
"Performance (Half Year)",
"Performance (Year)",
"Performance (Year To Date)",
"Beta",
"Average True Range",
"Volatility (Week)",
"Volatility (Month)",
"20-Day SMA (Relative)",
"50-Day SMA (Relative)",
"200-Day SMA (Relative)",
"50-Day High (Relative)",
"50-Day Low (Relative)",
"52-Week High (Relative)",
"52-Week Low (Relative)",
"Relative Strength Index (14)",
"Average Volume (3 Month)",
"Relative Volume",
"Change",
"Change from Open",
"Gap",
"Volume",
"Price",
"Target Price",
"IPO Date",
],
"Signal": [
"Any",
"Top Gainers",
"Top Losers",
"New High",
"New Low",
"Most Volatile",
"Most Active",
"Unusual Volume",
"Overbought",
"Oversold",
"Downgrades",
"Upgrades",
"Earnings Before",
"Earnings After",
"Recent Insider Buying",
"Recent Insider Selling",
"Major News",
"Horizontal S/R",
"TL Resistance",
"TL Support",
"Wedge Up",
"Wedge Down",
"Triangle Ascending",
"Triangle Descending",
"Wedge",
"Channel Up",
"Channel Down",
"Channel",
"Double Top",
"Double Bottom",
"Multiple Top",
"Multiple Bottom",
"Head & Shoulders",
"Head & Shoulders Inverse",
],
"Exchange": ["Any", "AMEX", "NASDAQ", "NYSE"],
"Market Cap.": [
"Any",
"Mega ($200bln and more)",
"Large ($10bln to $200bln)",
"Mid ($2bl to $10bln)",
"Small ($300mln to $2bln)",
"Micro ($50mln to $300mln)",
"Nano (under $50mln)",
"+Large (over $50mln)",
"+Mid (over $2bln)",
"+Small (over $300mln)",
"+Micro (over $50mln)",
"-Large (under $200bln)",
"-Mid (under $10bln)",
"-Small (under $2bln)",
"-Micro (under $300mln)",
],
"Earnings Date": [
"Any",
"Today",
"Today Before Market Open",
"Today Before Market Close",
"Tomorrow",
"Tomorrow Before Market Open",
"Tomorrow Before Market Close",
"Yesterday",
"Yesterday Before Market Open",
"Yesterday Before Market Close",
"Next 5 Days",
"Previous 5 Days",
"This Week",
"Next Week",
"Previous Week",
"This Month",
],
"Target Price": [
"Any",
"50% Above Price",
"40% Above Price",
"30% Above Price",
"20% Above Price",
"10% Above Price",
"5% Above Price",
"Above Price",
"Below Price",
"5% Below Price",
"10% Below Price",
"20% Below Price",
"30% Below Price",
"40% Below Price",
"50% Below Price",
],
"Index": ["Any", "S&P 500", "DJIA"],
"Dividend Yield": [
"Any",
"None (0%)",
"Positive (>0%)",
"High (>5%)",
"Very High (>10%)",
"Over 1%",
"Over 2%",
"Over 3%",
"Over 4%",
"Over 5%",
"Over 6%",
"Over 7%",
"Over 8%",
"Over 9%",
"Over 10%",
],
"Average Volume": [
"Any",
"Under 50K",
"Under 100K",
"Under 500K",
"Under 750K",
"Under 1M",
"Over 50K",
"Over 100K",
"Over 200K",
"Over 300K",
"Over 400K",
"Over 500K",
"Over 750K",
"Over 1M",
"Over 2M",
"100K to 500K",
"100K to 1M",
"500K to 1M",
"500K to 10M",
],
"IPO Date": [
"Any",
"Today",
"Yesterday",
"In the last week",
"In the last month",
"In the last quarter",
"In the last year",
"In the last 2 years",
"In the last 3 years",
"In the last 5 years",
"More than a year ago",
"More that 5 years ago",
"More than 10 years ago",
"More than 15 years ago",
"More than 20 years ago",
"More than 25 years ago",
],
"Sector": [
"Any",
"Basic Materials",
"Communication Services",
"Consumer Cyclical",
"Consumer Defensive",
"Energy",
"Financial",
"Healthcare",
"Industrials",
"Real Estate",
"Technology",
"Utilities",
],
"Float Short": [
"Any",
"Low (<5%)",
"High(>20%)",
"Under 5%",
"Under 10%",
"Under 15%",
"Under 20%",
"Under 25%",
"Under 30%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
],
"Relative Volume": [
"Any",
"Over 10",
"Over 5",
"Over 3",
"Over 2",
"Over 1.5",
"Over 1",
"Over 0.75",
"Over 0.5",
"Over 0.25",
"Under 2",
"Under 1",
"Under 1.5",
"Under 1",
"Under 0.75",
"Under 0.5",
"Under 0.25",
"Under 0.1",
],
"Shares Outstanding": [
"Any",
"Under 1M",
"Under 5M",
"Under 10M",
"Under 20M",
"Under 50M",
"Under 100M",
"Over 1M",
"Over 2M",
"Over 5M",
"Over 10M",
"Over 20M",
"Over 50M",
"Over 100M",
"Over 200M",
"Over 500M",
"Over 1000M",
],
"Industry": [
"Any",
"Agricultural Inputs",
"Aluminium",
"Building Materials",
"Chemicals",
"Coking Coal",
"Copper",
"Gold",
"Lumber & Wood Production",
"Other Industrial Metals & Mining",
"Other Precious Metals & Mining",
"Paper & Paper Products",
"Silver",
"Specialty Chemicals",
"Steel",
],
"Analyst Recom.": [
"Any",
"Strong Buy (1)",
"Buy or better",
"Buy",
"Hold or better",
"Hold",
"Hold or worse",
"Sell",
"Sell or worse",
"Strong Sell (5)",
],
"Current Volume": [
"Any",
"Under 100K",
"Under 500K",
"Under 750K",
"Under 1M",
"Over 0",
"Over 50K",
"Over 100K",
"Over 200K",
"Over 300K",
"Over 400K",
"Over 500K",
"Over 750K",
"Over 1M",
"Over 2M",
"Over 5M",
"Over 10M",
"Over 20M",
],
"Float": [
"Any",
"Under 1M",
"Under 5M",
"Under 10M",
"Under 20M",
"Under 50M",
"Under 100M",
"Over 1M",
"Over 2M",
"Over 5M",
"Over 10M",
"Over 20M",
"Over 50M",
"Over 100M",
"Over 200M",
"Over 500M",
"Over 1000M",
],
"Country": [
"Any",
"Asia",
"Europe",
"Latin America",
"BRIC",
"Argentina",
"Australia",
"Bahamas",
"Belgium",
"BeNeLux",
"Bermuda",
"Brazil",
"Canada",
"Cayman Islands",
"Chile",
"China",
"China & Hong Kong",
"Colombia",
"Cyprus",
"Denmark",
"Finland",
"France",
"Germany",
"Greece",
"Hong Kong",
"Hungary",
"Iceland",
"Iceland",
"India",
"Indonesia",
"Ireland",
"Israel",
"Italy",
"Japan",
"Kazakhstan",
"Luxembourg",
"Malaysia",
"Malta",
"Mexico",
"Monaco",
"Netherlands",
"New Zealand",
"Norway",
"Panama",
"Peru",
"Philippines",
"Portugal",
"Russia",
"Singapore",
"South Africa",
"South Korea",
"Spain",
"Sweden",
"Switzerland",
"Taiwan",
"Turkey",
"United Arab Emirates",
"United Kingdom",
"Uruguay",
"USA",
"Foreign (ex-USA)",
],
"Option/Short": ["Any", "Optionable", "Shortable", "Optionable and shortable"],
"Price": [
"Any",
"Under $1",
"Under $2",
"Under $3",
"Under $4",
"Under $5",
"Under $7",
"Under $10",
"Under $15",
"Under $20",
"Under $30",
"Under $40",
"Under $50",
"Over $1",
"Over $2",
"Over $3",
"Over $4",
"Over $5",
"Over $7",
"Over $10",
"Over $15",
"Over $20",
"Over $30",
"Over $40",
"Over $50",
"Over $60",
"Over $70",
"Over $80",
"Over $90",
"Over $100",
"$1 to $5",
"$1 to $10",
"$1 to $20",
"$5 to %10",
"$5 to $20",
"$5 to $50",
"$10 to $20",
"$10 to $50",
"$20 to $50",
"$50 to $100",
],
"P/E": [
"Any",
"Low (<15)",
"Profitable (>0)",
"High (>50)",
"Under 5",
"Under 10",
"Under 15",
"Under 20",
"Under 25",
"Under 30",
"Under 35",
"Under 40",
"Under 45",
"Under 50",
"Over 5",
"Over 10",
"Over 15",
"Over 25",
"Over 30",
"Over 35",
"Over 40",
"Over 45",
"Over 50",
],
"Price/Cash": [
"Any",
"Low (<3)",
"High (>50)",
"Under 1",
"Under 2",
"Under 3",
"Under 4",
"Under 5",
"Under 6",
"Under 7",
"Under 8",
"Under 9",
"Under 10",
"Over 1",
"Over 2",
"Over 3",
"Over 4",
"Over 5",
"Over 6",
"Over 7",
"Over 8",
"Over 9",
"Over 10",
"Over 20",
"Over 30",
"Over 40",
"Over 50",
],
"EPS growthnext 5 years": [
"Any",
"Negative (<0%)",
"Positive (>0%)",
"Positive Low (<10%)",
"High (>25%)",
"Under 5%",
"Under 10%",
"Under 15%",
"Under 20%",
"Under 25%",
"Under 30%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
],
"Return on Equity": [
"Any",
"Positive (>0%)",
"Negative (<0%)",
"Very Positive (>30%)",
"Very Negative (<-15%)",
"Under -50%",
"Under -40%",
"Under -35%",
"Under -30%",
"Under -25%",
"Under -20%",
"Under -15%",
"Under -10%",
"Under -5%",
"Over +50%",
"Over +45%",
"Over +40%",
"Over +35%",
"Over +30%",
"Over +25%",
"Over +20%",
"Over +15%",
"Over +10%",
"Over +5%",
],
"Debt/Equity": [
"Any",
"High (>0.5)",
"Low (<0.1)",
"Under 1",
"Under 0.9",
"Under 0.8",
"Under 0.7",
"Under 0.6",
"Under 0.5",
"Under 0.4",
"Under 0.3",
"Under 0.2",
"Under 0.1",
"Over 0.1",
"Over 0.2",
"Over 0.3",
"Over 0.4",
"Over 0.5",
"Over 0.6",
"Over 0.7",
"Over 0.8",
"Over 0.9",
"Over 1",
],
"InsiderOwnership": [
"Any",
"Low (<5%)",
"High (>30%)",
"Very High (>50%)",
"Over 10%",
"Over 20%",
"Over 30%",
"Over 40%",
"Over 50%",
"Over 60%",
"Over 70%",
"Over 80%",
"Over 90%",
],
"Forward P/E": [
"Any",
"Low (<15)",
"Profitable (>0)",
"High (>50)",
"Under 5",
"Under 10",
"Under 15",
"Under 20",
"Under 25",
"Under 30",
"Under 35",
"Under 40",
"Under 45",
"Under 50",
"Over 5",
"Over 10",
"Over 15",
"Over 20",
"Over 25",
"Over 30",
"Over 35",
"Over 40",
"Over 45",
"Over 50",
],
"Price/Free Cash Flow": [
"Any",
"Low (<15)",
"High (>50)",
"Under 5",
"Under 10",
"Under 15",
"Under 20",
"Under 25",
"Under 30",
"Under 35",
"Under 40",
"Under 45",
"Under 50",
"Under 60",
"Under 70",
"Under 80",
"Under 90",
"Under 100",
"Over 5",
"Over 10",
"Over 15",
"Over 20",
"Over 25",
"Over 30",
"Over 35",
"Over 40",
"Over 45",
"Over 50",
"Over 60",
"Over 70",
"Over 80",
"Over 90",
"Over 100",
],
"Sales growthpast 5 years": [
"Any",
"Negative (<0%)",
"Positive (>0%)",
"Positive Low (0-10%)",
"High (>25%)",
"Under 5%",
"Under 10%",
"Under 15%",
"Under 20%",
"Under 25%",
"Under 30%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
],
"Return on Investment": [
"Any",
"Positive (>0%)",
"Negative (<0%)",
"Very Positive (>25%)",
"Very Negative (<-10%)",
"Under -50%",
"Under -45%",
"Under -40%",
"Under -35%",
"Under -30%",
"Under -25%",
"Under -20%",
"Under -15%",
"Under -10%",
"Under -5%",
"Over +5%",
"Over +10%",
"Over +15%",
"Over +20%",
"Over +25%",
"Over +30%",
"Over +35%",
"Over +40%",
"Over +45%",
"Over +50%",
],
"Gross Margin": [
"Any",
"Positive (>0%)",
"Negative (<0%)",
"High (>50%)",
"Under 90%",
"Under 80%",
"Under 70%",
"Under 60%",
"Under 50%",
"Under 45%",
"Under 40%",
"Under 35%",
"Under 30%",
"Under 25%",
"Under 20%",
"Under 15%",
"Under 10%",
"Under 5%",
"Under 0%",
"Under -10%",
"Under -20%",
"Under -30%",
"Under -50%",
"Under -70%",
"Under -100%",
"Over 0%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
"Over 40%",
"Over 45%",
"Over 50%",
"Over 60%",
"Over 70%",
"Over 80%",
"Over 90%",
],
"InsiderTransactions": [
"Any",
"Very Negative (<20%)",
"Negative (<0%)",
"Positive (>0%)",
"Very Positive (>20%)",
"Under -90%",
"Under 80%",
"Under 70%",
"Under -60%",
"Under -50%",
"Under -45%",
"Under 40%",
"Under -35%",
"Under -30%",
"Under -25%",
"Under -20%",
"Under -15%",
"Under -10%",
"Under -5%",
"Over +5%",
"Over +10%",
"Over +15%",
"Over +20%",
"Over +25%",
"Over +30%",
"Over +35%",
"Over +40%",
"Over +45%",
"Over +50%",
"Over +60%",
"Over +70%",
"Over +80%",
"Over +90%",
],
"PEG": [
"Any",
"Low (<1)",
"High (>2)",
"Under 1",
"Under 2",
"Under 3",
"Over 1",
"Over 2",
"Over 3",
],
"EPS growththis year": [
"Any",
"Negative (<0%)",
"Positive (>0%)",
"Positive Low (0-10%)",
"High (>25%)",
"Under 5%",
"Under 10%",
"Under 15%",
"Under 20%",
"Under 25%",
"Under 30%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
],
"EPS growthqtr over qtr": [
"Any",
"Negative (<0%)",
"Positive (>0%)",
"Positive Low (0-10%)",
"High (>25%)",
"Under 5%",
"Under 10%",
"Under 15%",
"Under 20%",
"Under 25%",
"Under 30%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
],
"Current Ratio": [
"Any",
"High (>3)",
"Low (<1)",
"Under 1",
"Under 0.5",
"Over 0.5",
"Over 1",
"Over 1.5",
"Over 2",
"Over 3",
"Over 4",
"Over 5",
"Over 10",
],
"Operating Margin": [
"Any",
"Negative (<0%)",
"Positive (>0%)",
"Very Negative (<-20%)",
"High (>25%)",
"Under 90%",
"Under 80%",
"Under 70%",
"Under 60%",
"Under 50%",
"Under 45%",
"Under 40%",
"Under 35%",
"Under 30%",
"Under 25%",
"Under 20%",
"Under 15%",
"Under 10%",
"Under 5%",
"Under 0%",
"Under -10%",
"Under -20%",
"Under -30%",
"Under -50%",
"Under -70%",
"Under -100%",
"Over 0%",
"Over 10%",
"Under 15%",
"Over 20%",
"Over 25%",
"Over 30%",
"Over 35%",
"Over 40%",
"Over 45%",
"Over 50%",
"Over 60%",
"Over 70%",
"Over 80%",
"Over 90%",
],
"InstitutionalOwnership": [
"Any",
"Low (<5%)",
"High (>90%)",
"Under 90%",
"Under 80%",
"Under 70%",
"Under 60%",
"Under 50%",
"Under 40%",
"Under 30%",
"Under 20%",
"Under 10%",
"Over 10%",
"Over 20%",
"Over 30%",
"Over 40%",
"Over 50%",
"Over 60%",
"Over 70%",
"Over 80%",
"Over 90%",
],
"P/S": [
"Any",
"Low (<1)",
"High (>10)",
"Under 1",
"Under 2",
"Under 3",
"Under 4",
"Under 5",
"Under 6",
"Under 7",
"Under 8",
"Under 9",
"Under 10",
"Over 1",
"Over 2",
"Over 3",
"Over 4",
"Over 5",
"Over 6",
"Over 6",
"Over 7",
"Over 8",
"Over 9",
"Over 10",
],
"EPS growthnext year": [
"Any",
"Negative (<0%)",
"Positive (>0%)",
"Positive Low (0-10%)",
"High (>25%)",
"Under 5%",
"Under 10%",
"Under 15%",
"Under 20%",
"Under 25%",
"Under 30%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
],
"Sales growthqtr over qtr": [
"Any",
"Negative (<0%)",
"Positive (>0%)",
"Positive Low (0-10%)",
"High (>25%)",
"Under 5%",
"Under 10%",
"Under 15%",
"Under 20%",
"Under 25%",
"Under 30%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
],
"Quick Ratio": [
"Any",
"High (>3)",
"Low (<0.5)",
"Under 1",
"Under 0.5",
"Over 0.5",
"Over 1",
"Over 1.5",
"Over 2",
"Over 3",
"Over 4",
"Over 5",
"Over 10",
],
"Net Profit Margin": [
"Any",
"Positive (>0%)",
"Negative (<0%)",
"Very Negative (<-20%)",
"High (>20%)",
"Under 90%",
"Under 80%",
"Under 70%",
"Under 60%",
"Under 50%",
"Under 45%",
"Under 40%",
"Under 35%",
"Under 30%",
"Under 25%",
"Under 20%",
"Under 15%",
"Under 10%",
"Under 5%",
"Under 0%",
"Under -10%",
"Under -20%",
"Under -30%",
"Under -50%",
"Under -70%",
"Under -100%",
"Over 0%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
"Over 35%",
"Over 40%",
"Over 45%",
"Over 50%",
"Over 60%",
"Over 70%",
"Over 80%",
"Over 90%",
],
"InstitutionalTransactions": [
"Any",
"Very Negative (<20%)",
"Negative (<0%)",
"Positive (>0%)",
"Very Positive (>20%)",
"Under -50%",
"Under -45%",
"Under -40%",
"Under -35%",
"Under -30%",
"Under -25%",
"Under -20%",
"Under -15%",
"Under -10%",
"Under -5%",
"Over +5%",
"Over +10%",
"Over +15%",
"Over +20%",
"Over +25%",
"Over +30%",
"Over +35%",
"Over +40%",
"Over +45%",
"Over +50%",
],
"P/B": [
"Any",
"Low (<1)",
"High (>5)",
"Under 1",
"Under 2",
"Under 3",
"Under 4",
"Under 5",
"Under 6",
"Under 7",
"Under 8",
"Under 9",
"Under 10",
"Over 1",
"Over 2",
"Over 3",
"Over 4",
"Over 5",
"Over 6",
"Over 7",
"Over 8",
"Over 9",
"Over 10",
],
"EPS growthpast 5 years": [
"Any",
"Negative (<0%)",
"Positive (>0%)",
"Positive Low (0-10%)",
"High (>25%)",
"Under 5%",
"Under 10%",
"Under 15%",
"Under 20%",
"Under 25%",
"Under 30%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
],
"Return on Assets": [
"Any",
"Positive (>0%)",
"Negative (<0%)",
"Very Positive (>15%)",
"Very Negative (<-15%)",
"Under -50%",
"Under -45%",
"Under -40%",
"Under -35%",
"Under -30%",
"Under -25%",
"Under -20%",
"Under -15%",
"Under -10%",
"Under -5%",
"Over +5%",
"Over +10%",
"Over +15%",
"Over +20%",
"Over +25%",
"Over +30%",
"Over +35%",
"Over +40%",
"Over +45%",
"Over +50%",
],
"LT Debt/Equity": [
"Any",
"High (>0.5)",
"Low (<0.1)",
"Under 1",
"Under 0.9",
"Under 0.8",
"Under 0.7",
"Under 0.6",
"Under 0.5",
"Under 0.4",
"Under 0.3",
"Under 0.2",
"Under 0.1",
"Over 0.1",
"Over 0.2",
"Over 0.3",
"Over 0.4",
"Over.5",
"Over 0.6",
"Over 0.7",
"Over 0.8",
"Over 0.9",
"Over 1",
],
"Payout Ratio": [
"Any",
"None (0%)",
"Positive (>0%)",
"Low (<20%)",
"High (>50%)",
"Over 0%",
"Over 10%",
"Over 20%",
"Over 30%",
"Over 40%",
"Over 50%",
"Over 60%",
"Over 70%",
"Over 80%",
"Over 90%",
"Over 100%",
"Under 10%",
"Under 20%",
"Under 30%",
"Under 40%",
"Under 50%",
"Under 60%",
"Under 70%",
"Under 80%",
"Under 90%",
"Under 100%",
],
"Performance": [
"Any",
"Today Up",
"Today Down",
"Today -15%",
"Today -10%",
"Today -5%",
"Today +5%",
"Today +10%",
"Today +15%",
"Week -30%",
"Week -20%",
"Week -10%",
"Week Down",
"Week Up",
"Week +10%",
"Week +20%",
"Week +30%",
"Month -50%",
"Month -30%",
"Month -20%",
"Month -10%",
"Month Down",
"Month Up",
"Month +10%",
"Month +20%",
"Month +30%",
"Month +50%",
"Quarter -50%",
"Quarter -30%",
"Quarter -20%",
"Quarter -10%",
"Quarter Down",
"Quarter Up",
"Quarter +10%",
"Quarter +20%",
"Quarter +30%",
"Quarter +50%",
"Half -75%",
"Half -50%",
"Half -30%",
"Half -20%",
"Half -10%",
"Half Down",
"Half Up",
"Half +10%",
"Half +20%",
"Half +30%",
"Half +50%",
"Half +100%",
"Year -75%",
"Year -50%",
"Year -30%",
"Year -20%",
"Year -10%",
"Year Down",
"Year Up",
"Year +10%",
"Year +20%",
"Year +30%",
"Year +50%",
"Year +100%",
"Year +200%",
"Year +300%",
"Year +500%",
"YTD -75%",
"YTD -50%",
"YTD -30%",
"YTD -20%",
"YTD -10%",
"YTD -5%",
"YTD Down",
"YTD Up",
"YTD +5%",
"YTD +10%",
"YTD +20%",
"YTD +30",
"YTD +50%",
"YTD +100%",
],
"20-Day Simple Moving Average": [
"Any",
"Price below SMA20",
"Price 10% below SMA20",
"Price 20% below SMA20",
"Price 30% below SMA20",
"Price 40% below SMA20",
"Price 50% below SMA20",
"Price above SMA20",
"Price 10% above SMA20",
"Price 20% above SMA20",
"Price 30% above SMA20",
"Price 40% above SMA20",
"Price 50% above SMA20",
"Price crossed SMA20",
"Price crossed SMA20 above",
"Price crossed SMA20 below",
"SMA20 crossed SMA50",
"SMA20 crossed SMA50 above",
"SMA20 crossed SMA50 below",
"SMA20 cross SMA200",
"SMA20 crossed SMA200 below",
"SMA20 crossed SMA200 above",
"SMA20 above SMA50",
"SMA20 below SMA50",
"SMA20 above SMA200",
"SMA20 below SMA200",
],
"20-Day High/Low": [
"Any",
"New High",
"New Low",
"5% or more below High",
"10% or more below High",
"15% or more below High",
"20% or more below High",
"30% or more below High",
"40% or more below High",
"50% or more below High",
"0-3% below High",
"0-5% below High",
"0-10% below High",
"5% or more above Low",
"10% or more above Low",
"15% or more above Low",
"20% or more above Low",
"30% or more above Low",
"40% or more above Low",
"50% or more above Low",
"0-3% above Low",
"0-5% above Low",
"0-10% above Low",
],
"Beta": [
"Any",
"Under 0",
"Under 0.5",
"Under 1",
"Under 1.5",
"Under 2",
"Over 0",
"Over 0.5",
"Over 1",
"Over 1.5",
"Over 2",
"Over 2.5",
"Over 3",
"Over 4",
"0 to 0.5",
"0 to 1",
"0.5 to 1",
"0.5 to 1.5",
"1 to 1.5",
"1 to 2",
],
"Performance 2": [
"Any",
"Today Up",
"Today Down",
"Today -15%",
"Today -10%",
"Today -5%",
"Today +5%",
"Today +10%",
"Today +15%",
"Week -30%",
"Week -20%",
"Week -10%",
"Week Down",
"Week Up",
"Week +10%",
"Week +20%",
"Week +30%",
"Month -50%",
"Month -30%",
"Month -20%",
"Month -10%",
"Month Down",
"Month Up",
"Month +10%",
"Month +20%",
"Month +30%",
"Month +50%",
"Quarter -50%",
"Quarter -30%",
"Quarter -20%",
"Quarter -10%",
"Quarter Down",
"Quarter Up",
"Quarter +10%",
"Quarter +20%",
"Quarter +30%",
"Quarter +50%",
"Half -75%",
"Half -50%",
"Half -30%",
"Half -20%",
"Half -10%",
"Half Down",
"Half Up",
"Half +10%",
"Half +20%",
"Half +30%",
"Half +50%",
"Half +100%",
"Year -75%",
"Year -50%",
"Year -30%",
"Year -20%",
"Year -10%",
"Year Down",
"Year Up",
"Year +10%",
"Year +20%",
"Year +30%",
"Year +50%",
"Year +100%",
"Year +200%",
"Year +300%",
"Year +500%",
"YTD -75%",
"YTD -50%",
"YTD -30%",
"YTD -20%",
"YTD -10%",
"YTD -5%",
"YTD Down",
"YTD Up",
"YTD +5%",
"YTD +10%",
"YTD +20%",
"YTD +30",
"YTD +50%",
"YTD +100%",
],
"50-Day Simple Moving Average": [
"Any",
"Price below SMA50",
"Price 10% below SMA50",
"Price 20% below SMA50",
"Price 30% below SMA50",
"Price 40% below SMA50",
"Price 50% below SMA50",
"Price above SMA50",
"Price 10% above SMA50",
"Price 20% above SMA50",
"Price 30% above SMA50",
"Price 40% above SMA50",
"Price 50% above SMA50",
"Price crossed SMA50",
"Price crossed SMA50 above",
"Price crossed SMA50 below",
"SMA50 crossed SMA20",
"SMA50 crossed SMA20 above",
"SMA50 crossed SMA20 below",
"SMA50 cross SMA200",
"SMA50 crossed SMA200 below",
"SMA50 crossed SMA200 above",
"SMA50 above SMA20",
"SMA50 below SMA20",
"SMA50 above SMA200",
"SMA50 below SMA200",
],
"50-Day High/Low": [
"Any",
"New High",
"New Low",
"5% or more below High",
"10% or more below High",
"15% or more below High",
"20% or more below High",
"30% or more below High",
"40% or more below High",
"50% or more below High",
"0-3% below High",
"0-5% below High",
"0-10% below High",
"5% or more above Low",
"10% or more above Low",
"15% or more above Low",
"20% or more above Low",
"30% or more above Low",
"40% or more above Low",
"50% or more above Low",
"0-3% above Low",
"0-5% above Low",
"0-10% above Low",
],
"Average True Range": [
"Any",
"Over 0.25",
"Over 0.5",
"Over 0.75",
"Over 1",
"Over 1.5",
"Over 2. Over 2.5",
"Over 3",
"Over 3.5",
"Over 4",
"Over 4.5",
"Over 5",
"Under 0.25",
"Under 0.5",
"Under 0.75",
"Under 1",
"Under 1.5",
"Under 2",
"Under 2.5",
"Under 3",
"Under 3.5",
"Under 4",
"Under 4.5",
"Under 5",
],
"Volatility": [
"Any",
"Week - Over 3%",
"Week - Over 4%",
"Week - Over 5%",
"Week - 6%",
"Week - 7%",
"Week - 8%",
"Week - 9%",
"Week - 10%",
"Week - 12%",
"Week - 15%",
"Month - 2%",
"Month - 3%",
"Month - 4%",
"Month 5%",
"Month 5%",
"Month 6%",
"Month 7%",
"Month 8%",
"Month 9%",
"Month 10%",
"Month 12%",
"Month 15%",
],
"200-Day Simple Moving Average": [
"Any",
"Price below SMA200",
"Price 10% below SMA200",
"Price 20% below SMA200",
"Price 30% below SMA200",
"Price 40% below SMA200",
"Price 50% below SMA200",
"Price above SMA200",
"Price 10% above SMA200",
"Price 20% above SMA200",
"Price 30% above SMA200",
"Price 40% above SMA200",
"Price 50% above SMA200",
"Price crossed SMA200",
"Price crossed SMA200 above",
"Price crossed SMA200 below",
"SMA200 crossed SMA20",
"SMA20 crossed SMA20 above",
"SMA20 crossed SMA20 below",
"SMA200 cross SMA50",
"SMA200 crossed SMA50 below",
"SMA200 crossed SMA50 above",
"SMA200 above SMA20",
"SMA200 below SMA20",
"SMA200 above SMA50",
"SMA200 below SMA50",
],
"52-Week High/Low": [
"Any",
"New High",
"New Low",
"5% or more below High",
"10% or more below High",
"15% or more below High",
"20% or more below High",
"30% or more below High",
"40% or more below High",
"50% or more below High",
"0-3% below High",
"0-5% below High",
"0-10% below High",
"5% or more above Low",
"10% or more above Low",
"15% or more above Low",
"20% or more above Low",
"30% or more above Low",
"40% or more above Low",
"50% or more above Low",
"0-3% above Low",
"0-5% above Low",
"0-10% above Low",
],
"RSI (14)": [
"Any",
"Overbought (90)",
"Overbought (80)",
"Overbought (70)",
"Overbought (6)",
"Oversold (40)",
"Oversold (30)",
"Oversold (20)",
"Oversold (10)",
"Not Overbought (<60)",
"Not Overbought (<50)",
"Not Oversold (>50)",
"Not Oversold (>40)",
],
"Change": [
"Any",
"Up",
"Up 1%",
"Up 2%",
"Up 3%",
"Up 4%",
"Up 5%",
"Up 6%",
"Up 7%",
"Up 8%",
"Up 9%",
"Up 10%",
"Up 15%",
"Up 20%",
"Down",
"Down 1%",
"Down 2%",
"Down 3%",
"Down 4%",
"Down 5%",
"Down 6%",
"Down 7%",
"Down 8%",
"Down 9%",
"Down 10%",
"Down 15%",
"Down 20%",
],
"Pattern": [
"Any",
"Horizontal S/R",
"Horizontal S/R (Strong)",
"TL Resistance",
"TL Resistance (Strong)",
"TL Support",
"TL Support (Strong)",
"Wedge Up",
"Wedge Up (Strong)",
"Wedge Down",
"Wedge Down (Strong)",
"Triangle Ascending",
"Triangle Ascending (Strong)",
"Triangle Descending",
"Triangle Descending (Strong)",
"Wedge",
"Wedge (Strong)",
"Channel Up",
"Channel Up (Strong)",
"Channel Down",
"Channel Down (Strong)",
"Channel",
"Channel (Strong)",
"Double Top",
"Double Bottom",
"Multiple Top",
"Multiple Bottom",
"Head & Shoulders",
"Head & Shoulders Inverse",
],
"Gap": [
"Any",
"Up",
"Up 1%",
"Up 2%",
"Up 3%",
"Up 4%",
"Up 5%",
"Up 6%",
"Up 7%",
"Up 8%",
"Up 9%",
"Up 10%",
"Up 15%",
"Up 20%",
"Down",
"Down 1%",
"Down 2%",
"Down 3%",
"Down 4%",
"Down 5%",
"Down 6%",
"Down 7%",
"Down 8%",
"Down 9%",
"Down 10%",
"Down 15%",
"Down 20%",
],
"Change from Open": [
"Any",
"Up",
"Up 1%",
"Up 2%",
"Up 3%",
"Up 4%",
"Up 5%",
"Up 6%",
"Up 7%",
"Up 8%",
"Up 9%",
"Up 10%",
"Up 15%",
"Up 20%",
"Down",
"Down 1%",
"Down 2%",
"Down 3%",
"Down 4%",
"Down 5%",
"Down 6%",
"Down 7%",
"Down 8%",
"Down 9%",
"Down 10%",
"Down 15%",
"Down 20%",
],
"Candlestick": [
"Any",
"Long Lower Shadow",
"Long Upper Shadow",
"Hammer",
"Inverted Hammer",
"Spinning Top White",
"Spinning Top Black",
"Doji",
"Dragonfly Doji",
"Gravestone Doji",
"Marubozu White",
"Marubozu Black",
],
} | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/screener/finviz_model.py | 0.495361 | 0.302745 | finviz_model.py | pypi |
__docformat__ = "numpy"
import datetime
import logging
import os
from typing import List, Optional
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.screener import yahoofinance_model
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def historical(
preset_loaded: str = "top_gainers",
limit: int = 10,
start_date: str = (
datetime.datetime.now() - datetime.timedelta(days=6 * 30)
).strftime("%Y-%m-%d"),
type_candle: str = "a",
normalize: bool = True,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> List[str]:
"""View historical price of stocks that meet preset
Parameters
----------
preset_loaded: str
Preset loaded to filter for tickers
limit: int
Number of stocks to display
start_date: str
Start date to display historical data, in YYYY-MM-DD format
type_candle: str
Type of candle to display
normalize : bool
Boolean to normalize all stock prices using MinMax
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
Returns
-------
list[str]
List of stocks
"""
df_screener, l_stocks, limit_random_stocks = yahoofinance_model.historical(
preset_loaded, limit, start_date, type_candle, normalize
)
if df_screener.empty:
return []
if l_stocks:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return []
df_screener.plot(ax=ax)
if limit_random_stocks:
ax.set_title(
f"Screener Historical Price with {preset_loaded}\non 10 random stocks"
)
else:
ax.set_title(f"Screener Historical Price with {preset_loaded}")
ax.set_ylabel(
f"{['','Normalized'][normalize]} Share Price {['($)',''][normalize]}"
)
ax.legend()
# ensures that the historical data starts from same datapoint
ax.set_xlim([df_screener.index[0], df_screener.index[-1]])
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"historical",
df_screener,
)
return l_stocks
console.print("No screener stocks found with this preset", "\n")
return [] | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/screener/yahoofinance_view.py | 0.835685 | 0.331661 | yahoofinance_view.py | pypi |
__docformat__ = "numpy"
import configparser
import datetime
import logging
from pathlib import Path
from typing import List, Tuple
import random
import numpy as np
import pandas as pd
import yfinance as yf
from finvizfinance.screener import ticker
from pandas.plotting import register_matplotlib_converters
from sklearn.preprocessing import MinMaxScaler
from openbb_terminal.decorators import log_start_end
from openbb_terminal.core.config.paths import USER_PRESETS_DIRECTORY
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.screener import finviz_model
logger = logging.getLogger(__name__)
register_matplotlib_converters()
PRESETS_PATH = USER_PRESETS_DIRECTORY / "stocks" / "screener"
PRESETS_PATH_DEFAULT = Path(__file__).parent / "presets"
preset_choices = {
filepath.name: filepath
for filepath in PRESETS_PATH.iterdir()
if filepath.suffix == ".ini"
}
preset_choices.update(
{
filepath.name: filepath
for filepath in PRESETS_PATH_DEFAULT.iterdir()
if filepath.suffix == ".ini"
}
)
d_candle_types = {
"o": "Open",
"h": "High",
"l": "Low",
"c": "Close",
"a": "Adj Close",
}
@log_start_end(log=logger)
def historical(
preset_loaded: str = "top_gainers",
limit: int = 10,
start_date: str = (
datetime.datetime.now() - datetime.timedelta(days=6 * 30)
).strftime("%Y-%m-%d"),
type_candle: str = "a",
normalize: bool = True,
) -> Tuple[pd.DataFrame, List[str], bool]:
"""View historical price of stocks that meet preset
Parameters
----------
preset_loaded: str
Preset loaded to filter for tickers
limit: int
Number of stocks to display
start_date: str
Start date to display historical data, in YYYY-MM-DD format
type_candle: str
Type of candle to display
normalize : bool
Boolean to normalize all stock prices using MinMax
Returns
-------
pd.DataFrame
Dataframe of the screener
list[str]
List of stocks
bool
Whether some random stock selection due to limitations
"""
screen = ticker.Ticker()
if preset_loaded in finviz_model.d_signals:
screen.set_filter(signal=finviz_model.d_signals[preset_loaded])
else:
preset_filter = configparser.RawConfigParser()
preset_filter.optionxform = str # type: ignore
if preset_loaded not in preset_choices:
return pd.DataFrame, [], False
preset_filter.read(preset_choices[preset_loaded])
d_general = preset_filter["General"]
d_filters = {
**preset_filter["Descriptive"],
**preset_filter["Fundamental"],
**preset_filter["Technical"],
}
d_filters = {k: v for k, v in d_filters.items() if v}
if "Signal" in d_general and d_general["Signal"]:
screen.set_filter(filters_dict=d_filters, signal=d_general["Signal"])
else:
screen.set_filter(filters_dict=d_filters)
l_stocks = screen.screener_view(verbose=0)
limit_random_stocks = False
final_screener = pd.DataFrame()
if l_stocks:
if len(l_stocks) < 2:
console.print(
"The preset selected did not return a sufficient number of tickers. Two or more tickers are needed."
)
return pd.DataFrame(), [], False
if len(l_stocks) > limit:
random.shuffle(l_stocks)
l_stocks = sorted(l_stocks[:limit])
console.print(
"\nThe limit of stocks to compare with are 10. Hence, 10 random similar stocks will be displayed.",
f"\nThe selected list will be: {', '.join(l_stocks)}",
)
limit_random_stocks = True
selector = d_candle_types[type_candle]
df_screener = yf.download(
l_stocks, start=start_date, progress=False, threads=False
)[selector]
clean_screener = df_screener[l_stocks]
final_screener = clean_screener[l_stocks]
if np.any(final_screener.isna()):
nan_tickers = final_screener.columns[
final_screener.isna().sum() >= 1
].to_list()
console.print(
f"NaN values found in: {', '.join(nan_tickers)}. Replacing with zeros."
)
final_screener = final_screener.fillna(0)
# This puts everything on 0-1 scale for visualizing
if normalize:
mm_scale = MinMaxScaler()
final_screener = pd.DataFrame(
mm_scale.fit_transform(final_screener),
columns=final_screener.columns,
index=final_screener.index,
)
else:
console.print(
"The preset selected did not return a sufficient number of tickers. Two or more tickers are needed."
)
return pd.DataFrame(), [], False
return final_screener, l_stocks, limit_random_stocks | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/screener/yahoofinance_model.py | 0.632162 | 0.191309 | yahoofinance_model.py | pypi |
# PRESETS
- [How to add presets](#how-to-add-presets)
- [template](#template)
- [sexy_year](#sexy_year)
- [buffett_like](#buffett_like)
- [cheap_bottom_dividend](#cheap_bottom_dividend)
- [cheap_dividend](#cheap_dividend)
- [cheap_oversold](#cheap_oversold)
- [cdeath_cross](#death_cross)
- [golden_cross](#golden_cross)
- [heavy_inst_ins](#heavy_inst_ins)
- [modified_dreman](#modified_dreman)
- [modified_neff](#modified_neff)
- [rosenwald](#rosenwald)
- [rosenwald_gtfo](#rosenwald_gtfo)
- [undervalue](#undervalue)
---
## How to add presets
1. Go to the folder OpenBBTerminal/openbb_terminal/screener/presets.
2. There should be a `README.md` file and multiple `.ini` files. One of these `.ini` files should be named `template.ini`.
<img width="449" alt="1" src="https://user-images.githubusercontent.com/25267873/123713241-db765e00-d86b-11eb-9feb-f05f471d9aa5.png">
3. Copy the `template.ini` file and paste it in the same directory.
4. Rename that file to something you find meaningful, e.g. `my_own_filter.ini`.
<img width="448" alt="2" src="https://user-images.githubusercontent.com/25267873/123713233-da453100-d86b-11eb-853e-9a850cd064d1.png">
5. Open the file you just renamed (e.g. `my_own_filter.ini`), and set the parameters you want to filter.
<img width="859" alt="3" src="https://user-images.githubusercontent.com/25267873/123713235-da453100-d86b-11eb-9f65-f957f99d5d60.png">
6. It may be useful to play with the main source <https://finviz.com/screener.ashx> since you can tweak these and
understand how they influence the outcome of the filtered stocks.
<img width="1256" alt="4" src="https://user-images.githubusercontent.com/25267873/123713236-daddc780-d86b-11eb-9faf-2ee58fc304d3.png">
7. Update the Author and Description name. E.g.
<img width="807" alt="5" src="https://user-images.githubusercontent.com/25267873/123713239-db765e00-d86b-11eb-8b58-127205d75894.png">
8. Start the terminal, and go to the `> scr` menu. In there, you can play with it on the terminal as shown:
- **view**: Allows to see the screeners available. I.e. all `.ini` files in presets folder.
<img width="1201" alt="6" src="https://user-images.githubusercontent.com/25267873/123713231-d9ac9a80-d86b-11eb-920a-0959481de143.png">
- **view <selected_preset>**: Allows to see the specific parameters set for the preset selected.
<img width="443" alt="Captura de ecrã 2021-06-28, às 23 58 35" src="https://user-images.githubusercontent.com/25267873/123713683-d82fa200-d86c-11eb-92ee-bf5ae14d5f12.png">
- **set <selected_preset>**: Allows to set this preset as main filter. See that the help menu will display a different
`PRESET: my_own_filter`
<img width="857" alt="7" src="https://user-images.githubusercontent.com/25267873/123713226-d9140400-d86b-11eb-9a61-bce07b6f580d.png">
- **historical, overview, valuation, financial, ownership, performance, technical** commands will now be performed on
tickers that are filtered with the selected preset. Note: Since it is likely that there will be a big range of
tickers, we cap it to 10 randomly. So, it is normal if for the same filter the user sees 10 different tickers.
If the user wants to see more than 10 tickers, it can select a different limit using `-l` flag.
<img width="1049" alt="8" src="https://user-images.githubusercontent.com/25267873/123713223-d6191380-d86b-11eb-9e50-ac3a32d7922d.png">
9. Share with other Apes. You can do so by either creating yourself a Pull Request with this change, or asking a dev
(e.g. @Sexy_Year) on our discord server to add it for you.
---
## template
- **Author of preset:** OpenBBTerminal
- **Contact:** <https://github.com/OpenBB-finance/OpenBBTerminal>
- **Description:** Template with all available filters and their options menu. More information can be found in
<https://finviz.com/help/screener.ashx> and <https://finviz.com/help/technical-analysis/charts-patterns.ashx>
---
## sexy_year
- **Author of preset:** Sexy Year
- **Contact:** Add contact of author, if (s)he whishes to be contacted. This can be an hyperlink, an e-mail, wtv
- **Description:** This is just a sample. The user that adds the preset can add a description for what type of stocks
these filters are aimed for
---
## buffett_like
- **Author of preset:** Traceabl3
- **Contact:** via smoke signals
- **Description:** Buffet like value screener (Value invsting for long term growth)
---
## cheap_bottom_dividend
- **Author of preset:** Traceabl3
- **Contact:** swing the bullroarer
- **ABOUT:** High Yield Dividend stonks that are at-or-near their lowest price. Inverse Head and shoulders pattern recognized.
---
## cheap_dividend
- **Author of preset:** Traceabl3
- **Contact:** illuminated bat symbol in the sky
- **Description:** cheap dividend stocks
---
## cheap_oversold
- **Author of preset:** Traceabl3
- **Contact:** hit me on my skytel pager
- **Description:** Cheap stonks that are oversold: under 10% above the low, and oversold on the RSI.
---
## death_cross
- **Author of preset:** Traceabl3
- **Contact:** take the red pill
- **Description:** Death cross : when the 50sma crosses below the 200 sma
- **More information:** <https://www.investopedia.com/terms/d/deathcross.asp>
---
## golden_cross
- **Author of preset:** Traceabl3
- **Contact:** flip the pigeons
- **Description:** Golden Cross when the 50 day moves above the 200 day from below.
---
## heavy_inst_ins
- **Author of preset:** Traceabl3
- **Contact:** blow into the conch shell
- **Description:** Heavily owned by institutions and insiders (>30% each)
---
## modified_dreman
- **Author of preset:** Traceabl3
- **Contact:** Drum Telegraphy
- **Description:** Modified Version of the Dreman Screener.
---
## modified_neff
- **Author of preset:** Traceabl3
- **Contact:** bang the drums
- **Description:** Neff Screener with modifications // operational margin <50%
- **More information:** <https://marketxls.com/template/neff-screen/>
---
## rosenwald
- **Author of preset:** Traceabl3
- **Contact:** three shots in the air.
- **Description:** the classic rosenwald screen based on some dude I work with best guess.
---
## rosenwald_gtfo
- **Author of preset:** Traceabl3
- **Contact:** wave the white flag
- **Description:** Too many indicators indicating an impending downturn.
---
## undervalue
- **Author of preset:** Traceabl3
- **Contact:** message on tor browser.
- **Description:** Potential Undervalued stocks
| /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/screener/presets/README.md | 0.719975 | 0.816991 | README.md | pypi |
__docformat__ = "numpy"
import logging
from io import BytesIO
from typing import Tuple
from urllib.request import urlopen
from zipfile import ZipFile
import pandas as pd
import statsmodels.api as sm
import yfinance as yf
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_fama_raw() -> pd.DataFrame:
"""Gets base Fama French data to calculate risk
Returns
-------
fama : pd.DataFrame
A data with fama french model information
"""
with urlopen(
"http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/F-F_Research_Data_Factors_CSV.zip"
) as url:
# Download Zipfile and create pandas DataFrame
with ZipFile(BytesIO(url.read())) as zipfile:
with zipfile.open("F-F_Research_Data_Factors.CSV") as zip_open:
df = pd.read_csv(
zip_open,
header=0,
names=["Date", "MKT-RF", "SMB", "HML", "RF"],
skiprows=3,
)
df = df[df["Date"].apply(lambda x: len(str(x).strip()) == 6)]
df["Date"] = df["Date"].astype(str) + "01"
df["Date"] = pd.to_datetime(df["Date"], format="%Y%m%d")
df["MKT-RF"] = pd.to_numeric(df["MKT-RF"], downcast="float")
df["SMB"] = pd.to_numeric(df["SMB"], downcast="float")
df["HML"] = pd.to_numeric(df["HML"], downcast="float")
df["RF"] = pd.to_numeric(df["RF"], downcast="float")
df["MKT-RF"] = df["MKT-RF"] / 100
df["SMB"] = df["SMB"] / 100
df["HML"] = df["HML"] / 100
df["RF"] = df["RF"] / 100
df = df.set_index("Date")
return df
@log_start_end(log=logger)
def get_historical_5(symbol: str) -> pd.DataFrame:
"""Get 5 year monthly historical performance for a ticker with dividends filtered
Parameters
----------
symbol : str
A ticker symbol in string form
Returns
-------
data : pd.DataFrame
A dataframe with historical information
"""
tick = yf.Ticker(symbol)
df = tick.history(period="5y", interval="1mo")
df = df[df.index.to_series().apply(lambda x: x.day == 1)]
df = df.drop(["Dividends", "Stock Splits"], axis=1)
df = df.dropna()
df.index = [d.replace(tzinfo=None) for d in df.index]
return df
@log_start_end(log=logger)
def capm_information(symbol: str) -> Tuple[float, float]:
"""Provides information that relates to the CAPM model
Parameters
----------
symbol : str
A ticker symbol in string form
Returns
-------
Tuple[float, float]
The beta for a stock, The systematic risk for a stock
"""
df_f = get_fama_raw()
df_h = get_historical_5(symbol)
df = df_h.join(df_f)
df = df.dropna()
df["Monthly Return"] = df["Close"].pct_change()
df["Excess Monthly Return"] = df["Monthly Return"] - df["RF"]
df["Excess MKT-RF"] = df["MKT-RF"] - df["RF"]
df = df.dropna()
y = df[["Excess Monthly Return"]]
x = df["Excess MKT-RF"]
x = sm.add_constant(x)
model = sm.OLS(y, x).fit()
beta = model.params["Excess MKT-RF"]
sy = model.rsquared
return beta, sy | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/quantitative_analysis/factors_model.py | 0.876264 | 0.362151 | factors_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
import matplotlib.pyplot as plt
import pandas as pd
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.quantitative_analysis.beta_model import beta_model
from openbb_terminal.helper_funcs import export_data
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def beta_view(
symbol: str,
ref_symbol: str,
data: pd.DataFrame = None,
ref_data: pd.DataFrame = None,
interval: int = 1440,
export: str = "",
) -> None:
"""Display the beta scatterplot + linear regression.
Parameters
----------
symbol : str
A ticker to calculate beta for
ref_symbol : str
A reference ticker symbol for the beta calculation (default in terminal is SPY)
data : pd.DataFrame
The selected ticker symbols price data
ref_data : pd.DataFrame
The reference ticker symbols price data
interval: int
The interval of the ref_data. This will ONLY be used if ref_data is None
"""
try:
sr, rr, beta, alpha = beta_model(
symbol, ref_symbol, data, ref_data, interval=interval
)
except Exception as e:
if str(e) == "Invalid ref ticker":
console.print(str(e) + "\n")
return
raise e
_, ax = plt.subplots()
ax.scatter(rr, sr) # plot returns
ax.plot(ax.get_xlim(), [x * beta + alpha for x in ax.get_xlim()]) # plot lin reg
ax.set(
xlabel=f"{ref_symbol} Returns (%)",
ylabel=f"{symbol} Returns (%)",
title=f"Beta of {symbol} with respect to {ref_symbol}",
)
beta_text = f"Raw Beta={round(beta, 2)}\nAlpha={round(alpha, 2)}"
ax.text(0.9, 0.1, beta_text, horizontalalignment="right", transform=ax.transAxes)
console.print()
df = pd.DataFrame({"sr": sr, "rr": rr})
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
f"beta_alpha={alpha}_beta={beta}",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/quantitative_analysis/beta_view.py | 0.741019 | 0.318671 | beta_view.py | pypi |
__docforma__ = "numpy"
import logging
import os
from datetime import datetime, timedelta
from typing import List, Optional
import matplotlib.pyplot as plt
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
is_valid_axes_count,
plot_autoscale,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.options import alphaquery_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_put_call_ratio(
symbol: str,
window: int = 30,
start_date: str = (datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d"),
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display put call ratio [Source: AlphaQuery.com]
Parameters
----------
symbol : str
Stock ticker symbol
window : int, optional
Window length to look at, by default 30
start_date : str, optional
Starting date for data, by default (datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d")
export : str, optional
Format to export data, by default ""
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
pcr = alphaquery_model.get_put_call_ratio(symbol, window, start_date)
if pcr.empty:
console.print("No data found.\n")
return
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(pcr.index, pcr.values)
ax.set_title(f"Put Call Ratio for {symbol.upper()}")
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"pcr",
pcr,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/alphaquery_view.py | 0.836388 | 0.216736 | alphaquery_view.py | pypi |
__docformat__ = "numpy"
import argparse
import logging
import os
import warnings
from bisect import bisect_left
from typing import List, Optional, Tuple
import matplotlib.pyplot as plt
import mplfinance as mpf
import numpy as np
import pandas as pd
import seaborn as sns
from openbb_terminal import config_plot as cfp
from openbb_terminal import rich_config
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
is_valid_axes_count,
lambda_long_number_format_y_axis,
patch_pandas_text_adjustment,
plot_autoscale,
print_rich_table,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.options import op_helpers, tradier_model, yfinance_model
logger = logging.getLogger(__name__)
column_map = {"mid_iv": "iv", "open_interest": "oi", "volume": "vol"}
warnings.filterwarnings("ignore")
def get_strike_bounds(
options: pd.DataFrame, current_price: float, min_sp: float, max_sp: float
) -> Tuple[float, float]:
if min_sp == -1:
if current_price == 0:
min_strike = options["strike"].iat[0]
else:
min_strike = 0.75 * current_price
else:
min_strike = min_sp
if max_sp == -1:
if current_price == 0:
max_strike = options["strike"].iat[-1]
else:
max_strike = 1.25 * current_price
else:
max_strike = max_sp
return min_strike, max_strike
def lambda_red_highlight(val) -> str:
"""Red highlight
Parameters
----------
val
dataframe values to color
Returns
-------
str
colored dataframes values
"""
return f"[red]{val}[/red]"
def lambda_green_highlight(val) -> str:
"""Green highlight
Parameters
----------
values : List[str]
dataframe values to color
Returns
-------
List[str]
colored dataframes values
"""
return f"[green]{val}[/green]"
@log_start_end(log=logger)
def check_valid_option_chains_headers(headers: str) -> List[str]:
"""Check valid option chains headers
Parameters
----------
headers : str
Option chains headers
Returns
-------
List[str]
List of columns string
"""
columns = [str(item) for item in headers.split(",")]
for header in columns:
if header not in tradier_model.df_columns:
raise argparse.ArgumentTypeError("Invalid option chains header selected!")
return columns
@log_start_end(log=logger)
def display_expirations(ticker: str, source: str = "YahooFinance"):
"""Displays the expirations for a ticker
Parameters
----------
ticker: str
The ticker to look up
source: str
Where to get the data from. Options: yf (yahoo finance) or tr (tradier)
"""
if source == "YahooFinance":
exps = yfinance_model.option_expirations(ticker)
elif source == "Tradier":
exps = tradier_model.option_expirations(ticker)
else:
raise ValueError("Invalid source. Please select 'yf' or 'tr'")
display_expiry_dates(exps)
@log_start_end(log=logger)
def display_expiry_dates(expiry_dates: list):
"""Display expiry dates
Parameters
----------
expiry_dates: list
The expiry dates of the chosen ticker.
"""
expiry_dates_df = pd.DataFrame(expiry_dates, columns=["Date"])
print_rich_table(
expiry_dates_df,
headers=list(expiry_dates_df.columns),
title="Available expiry dates",
show_index=True,
index_name="Identifier",
)
@log_start_end(log=logger)
def display_chains(
symbol: str,
expiry: str,
to_display: List[str] = None,
min_sp: float = -1,
max_sp: float = -1,
calls_only: bool = False,
puts_only: bool = False,
export: str = "",
):
"""Display option chain
Parameters
----------
symbol: str
Stock ticker symbol
expiry: str
Expiration date of option
to_display: List[str]
List of columns to display
min_sp: float
Min strike price to display
max_sp: float
Max strike price to display
calls_only: bool
Only display calls
puts_only: bool
Only display puts
export: str
Format to export file
"""
if to_display is None:
to_display = tradier_model.default_columns
chains_df = tradier_model.get_option_chains(symbol, expiry)
columns = to_display + ["strike", "option_type"]
chains_df = chains_df[columns].rename(columns=column_map)
min_strike, max_strike = get_strike_bounds(chains_df, 0, min_sp, max_sp)
chains_df = chains_df[chains_df["strike"] >= min_strike]
chains_df = chains_df[chains_df["strike"] <= max_strike]
calls_df = chains_df[chains_df.option_type == "call"].drop(columns=["option_type"])
puts_df = chains_df[chains_df.option_type == "put"].drop(columns=["option_type"])
df = calls_df if calls_only else puts_df
if calls_only or puts_only:
print_rich_table(
df,
headers=[x.title() for x in df.columns],
show_index=False,
title=f"The strike prices are displayed between {min_strike} and {max_strike}",
)
else:
puts_df = puts_df[puts_df.columns[::-1]]
chain_table = calls_df.merge(puts_df, on="strike")
if rich_config.USE_COLOR:
call_cols = [col for col in chain_table if col.endswith("_x")]
put_cols = [col for col in chain_table if col.endswith("_y")]
patch_pandas_text_adjustment()
pd.set_option("display.max_colwidth", 0)
pd.set_option("display.max_rows", None)
for cc in call_cols:
chain_table[cc] = (
chain_table[cc].astype(str).apply(lambda_green_highlight)
)
for pc in put_cols:
chain_table[pc] = (
chain_table[pc].astype(str).apply(lambda_red_highlight)
)
headers = [
col.strip("_x")
if col.endswith("_x")
else col.strip("_y")
if col.endswith("_y")
else col
for col in chain_table.columns
]
print_rich_table(
chain_table,
headers=headers,
show_index=False,
title=f"{symbol} Option chain for {expiry}",
)
export_data(
export,
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"chains",
chains_df,
)
@log_start_end(log=logger)
def plot_oi(
symbol: str,
expiry: str,
min_sp: float = -1,
max_sp: float = -1,
calls_only: bool = False,
puts_only: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot open interest
Parameters
----------
symbol: str
Ticker symbol
expiry: str
Expiry date for options
min_sp: float
Min strike to consider
max_sp: float
Max strike to consider
calls_only: bool
Show calls only
puts_only: bool
Show puts only
export: str
Format to export file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
options = tradier_model.get_option_chains(symbol, expiry)
current_price = tradier_model.last_price(symbol)
min_strike, max_strike = get_strike_bounds(options, current_price, min_sp, max_sp)
if max_strike == min_strike:
console.print("[red]Not enough data for analysis[/red]\n")
return
if calls_only and puts_only:
console.print("Both flags selected, please select one", "\n")
return
calls = options[options.option_type == "call"][["strike", "open_interest"]]
puts = options[options.option_type == "put"][["strike", "open_interest"]]
call_oi = calls.set_index("strike")["open_interest"] / 1000
put_oi = puts.set_index("strike")["open_interest"] / 1000
df_opt = pd.merge(call_oi, put_oi, left_index=True, right_index=True)
df_opt = df_opt.rename(
columns={"open_interest_x": "OI_call", "open_interest_y": "OI_put"}
)
max_pain = op_helpers.calculate_max_pain(df_opt)
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
if not calls_only:
ax.plot(put_oi.index, put_oi.values, "-o", label="Puts")
if not puts_only:
ax.plot(call_oi.index, call_oi.values, "-o", label="Calls")
ax.legend(loc=0, fontsize="x-small")
ax.axvline(current_price, lw=2, ls="--", label="Current Price", alpha=0.7)
ax.axvline(max_pain, lw=3, label=f"Max Pain: {max_pain}", alpha=0.7)
ax.set_xlabel("Strike Price")
ax.set_ylabel("Open Interest [1k] ")
ax.set_xlim(min_strike, max_strike)
ax.set_title(f"Open Interest for {symbol.upper()} expiring {expiry}")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"oi_tr",
options,
)
@log_start_end(log=logger)
def plot_vol(
symbol: str,
expiry: str,
min_sp: float = 1,
max_sp: float = -1,
calls_only: bool = False,
puts_only: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot volume
Parameters
----------
symbol: str
Ticker symbol
expiry: str
Expiry date for options
min_sp: float
Min strike to consider
max_sp: float
Max strike to consider
calls_only: bool
Show calls only
puts_only: bool
Show puts only
export: str
Format to export file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
options = tradier_model.get_option_chains(symbol, expiry)
current_price = tradier_model.last_price(symbol)
min_strike, max_strike = get_strike_bounds(options, current_price, min_sp, max_sp)
if calls_only and puts_only:
console.print("Both flags selected, please select one", "\n")
return
calls = options[options.option_type == "call"][["strike", "volume"]]
puts = options[options.option_type == "put"][["strike", "volume"]]
call_v = calls.set_index("strike")["volume"] / 1000
put_v = puts.set_index("strike")["volume"] / 1000
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
else:
ax = external_axes[0]
if not calls_only:
put_v.plot(
x="strike",
y="volume",
label="Puts",
ax=ax,
marker="o",
ls="-",
c="r",
)
if not puts_only:
call_v.plot(
x="strike",
y="volume",
label="Calls",
ax=ax,
marker="o",
ls="-",
c="g",
)
ax.axvline(current_price, lw=2, c="k", ls="--", label="Current Price", alpha=0.7)
ax.grid("on")
ax.set_xlabel("Strike Price")
ax.set_ylabel("Volume [1k] ")
ax.set_xlim(min_strike, max_strike)
ax.set_title(f"Volume for {symbol.upper()} expiring {expiry}")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"vol_tr",
options,
)
@log_start_end(log=logger)
def plot_volume_open_interest(
symbol: str,
expiry: str,
min_sp: float = -1,
max_sp: float = -1,
min_vol: float = -1,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot volume and open interest
Parameters
----------
symbol: str
Stock ticker symbol
expiry: str
Option expiration
min_sp: float
Min strike price
max_sp: float
Max strike price
min_vol: float
Min volume to consider
export: str
Format for exporting data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
current_price = tradier_model.last_price(symbol)
options = tradier_model.get_option_chains(symbol, expiry)
calls = options[options.option_type == "call"][
["strike", "volume", "open_interest"]
]
puts = options[options.option_type == "put"][["strike", "volume", "open_interest"]]
# Process Calls Data
df_calls = calls.pivot_table(
index="strike", values=["volume", "open_interest"], aggfunc="sum"
).reindex()
df_calls["strike"] = df_calls.index
df_calls["type"] = "calls"
df_calls["open_interest"] = df_calls["open_interest"]
df_calls["volume"] = df_calls["volume"]
df_calls["oi+v"] = df_calls["open_interest"] + df_calls["volume"]
df_calls["spot"] = round(current_price, 2)
df_puts = puts.pivot_table(
index="strike", values=["volume", "open_interest"], aggfunc="sum"
).reindex()
df_puts["strike"] = df_puts.index
df_puts["type"] = "puts"
df_puts["open_interest"] = df_puts["open_interest"]
df_puts["volume"] = -df_puts["volume"]
df_puts["open_interest"] = -df_puts["open_interest"]
df_puts["oi+v"] = df_puts["open_interest"] + df_puts["volume"]
df_puts["spot"] = round(current_price, 2)
call_oi = calls.set_index("strike")["open_interest"] / 1000
put_oi = puts.set_index("strike")["open_interest"] / 1000
df_opt = pd.merge(call_oi, put_oi, left_index=True, right_index=True)
df_opt = df_opt.rename(
columns={"open_interest_x": "OI_call", "open_interest_y": "OI_put"}
)
max_pain = op_helpers.calculate_max_pain(df_opt)
if min_vol == -1 and min_sp == -1 and max_sp == -1:
# If no argument provided, we use the percentile 50 to get 50% of upper volume data
volume_percentile_threshold = 50
min_vol_calls = np.percentile(df_calls["oi+v"], volume_percentile_threshold)
min_vol_puts = np.percentile(df_puts["oi+v"], volume_percentile_threshold)
df_calls = df_calls[df_calls["oi+v"] > min_vol_calls]
df_puts = df_puts[df_puts["oi+v"] < min_vol_puts]
else:
if min_vol > -1:
df_calls = df_calls[df_calls["oi+v"] > min_vol]
df_puts = df_puts[df_puts["oi+v"] < -min_vol]
if min_sp > -1:
df_calls = df_calls[df_calls["strike"] > min_sp]
df_puts = df_puts[df_puts["strike"] > min_sp]
if max_sp > -1:
df_calls = df_calls[df_calls["strike"] < max_sp]
df_puts = df_puts[df_puts["strike"] < max_sp]
if df_calls.empty and df_puts.empty:
console.print(
"The filtering applied is too strong, there is no data available for such conditions.\n"
)
return
# Initialize the matplotlib figure
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
else:
ax = external_axes[0]
# make x axis symmetric
axis_origin = max(abs(max(df_puts["oi+v"])), abs(max(df_calls["oi+v"])))
ax.set_xlim(-axis_origin, +axis_origin)
g = sns.barplot(
x="oi+v",
y="strike",
data=df_calls,
label="Calls: Open Interest",
color="lightgreen",
orient="h",
)
g = sns.barplot(
x="volume",
y="strike",
data=df_calls,
label="Calls: Volume",
color="green",
orient="h",
)
g = sns.barplot(
x="oi+v",
y="strike",
data=df_puts,
label="Puts: Open Interest",
color="pink",
orient="h",
)
g = sns.barplot(
x="volume",
y="strike",
data=df_puts,
label="Puts: Volume",
color="red",
orient="h",
)
# draw spot line
s = [float(strike.get_text()) for strike in ax.get_yticklabels()]
spot_index = bisect_left(s, current_price) # find where the spot is on the graph
spot_line = ax.axhline(spot_index, ls="--", alpha=0.3)
# draw max pain line
max_pain_index = bisect_left(s, max_pain)
max_pain_line = ax.axhline(max_pain_index, ls="-", alpha=0.3, color="red")
max_pain_line.set_linewidth(5)
# format ticklabels without - for puts
g.set_xticks(g.get_xticks())
xlabels = [f"{x:,.0f}".replace("-", "") for x in g.get_xticks()]
g.set_xticklabels(xlabels)
ax.set_title(
f"{symbol} volumes for {expiry}\n(open interest displayed only during market hours)"
)
ax.invert_yaxis()
_ = ax.legend()
handles, _ = ax.get_legend_handles_labels()
handles.append(spot_line)
handles.append(max_pain_line)
# create legend labels + add to graph
labels = [
"Calls open interest",
"Calls volume ",
"Puts open interest",
"Puts volume",
"Current stock price",
f"Max pain = {max_pain}",
]
ax.legend(fontsize="xx-small", handles=handles[:], labels=labels, loc="lower left")
sns.despine(left=True, bottom=True)
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"voi_tr",
options,
)
@log_start_end(log=logger)
def display_historical(
symbol: str,
expiry: str,
strike: float = 0,
put: bool = False,
raw: bool = False,
chain_id: str = None,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot historical option prices
Parameters
----------
symbol: str
Stock ticker symbol
expiry: str
Expiry date of option
strike: float
Option strike price
put: bool
Is this a put option?
raw: bool
Print raw data
chain_id: str
OCC option symbol
export: str
Format of export file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_hist = tradier_model.get_historical_options(
symbol, expiry, strike, put, chain_id
)
if raw:
print_rich_table(
df_hist,
headers=[x.title() for x in df_hist.columns],
title="Historical Option Prices",
)
op_type = ["call", "put"][put]
candle_chart_kwargs = {
"type": "candle",
"style": theme.mpf_style,
"volume": True,
"xrotation": theme.xticks_rotation,
"scale_padding": {"left": 0.3, "right": 1.2, "top": 0.8, "bottom": 0.8},
"update_width_config": {
"candle_linewidth": 0.6,
"candle_width": 0.8,
"volume_linewidth": 0.8,
"volume_width": 0.8,
},
"datetime_format": "%Y-%b-%d",
}
if external_axes is None:
candle_chart_kwargs["returnfig"] = True
candle_chart_kwargs["figratio"] = (10, 7)
candle_chart_kwargs["figscale"] = 1.10
candle_chart_kwargs["figsize"] = plot_autoscale()
fig, ax = mpf.plot(df_hist, **candle_chart_kwargs)
fig.suptitle(
f"Historical {strike} {op_type.title()}",
x=0.055,
y=0.965,
horizontalalignment="left",
)
lambda_long_number_format_y_axis(df_hist, "volume", ax)
theme.visualize_output(force_tight_layout=False)
elif is_valid_axes_count(external_axes, 2):
(ax1, ax2) = external_axes
candle_chart_kwargs["ax"] = ax1
candle_chart_kwargs["volume"] = ax2
mpf.plot(df_hist, **candle_chart_kwargs)
else:
return
console.print()
if export:
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"hist",
df_hist,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/tradier_view.py | 0.799716 | 0.287118 | tradier_view.py | pypi |
import logging
from typing import Dict, List, Optional
from matplotlib import pyplot as plt
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import is_valid_axes_count, plot_autoscale
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.options import calculator_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def view_calculator(
strike: float = 10,
premium: float = 1,
put: bool = False,
sell: bool = False,
external_axes: Optional[List[plt.Axes]] = None,
**kwargs: Dict[str, int],
):
"""
Parameters
----------
strike: float
Strike price
premium: float
Premium
put: bool
Whether option is put
sell:
Whether selling option
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
kwargs: Dict[str,int]
"""
price_at_expiry, pnl, break_even = calculator_model.pnl_calculator(
strike, premium, put, sell, **kwargs
)
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(price_at_expiry, pnl, alpha=0.1, c="k")
ax.fill_between(
price_at_expiry, 0, pnl, where=(pnl > 0), facecolor="green", alpha=0.5
)
ax.fill_between(
price_at_expiry, 0, pnl, where=(pnl < 0), facecolor="red", alpha=0.5
)
ax.axvline(x=break_even, lw=3, alpha=0.6, label=f"Breakeven: ${break_even}")
ax.axvline(x=strike, lw=3, alpha=0.6, label=f"Strike: ${strike}")
if sell:
ax.axhline(
y=100 * premium,
c="seagreen",
lw=3,
alpha=0.6,
label=f"Max Profit: ${100 * premium}",
)
else:
ax.axhline(
y=-100 * premium,
c="firebrick",
lw=3,
alpha=0.6,
label=f"Max Loss: ${-100 * premium}",
)
ax.set_xlabel("Price at Expiry")
ax.set_ylabel("Profit")
ax.set_title(
f"Profit for {['Buying', 'Selling'][sell]} {['Call', 'Put'][put]} option"
)
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
print_string = f"""Strike: ${strike}
Premium: ${premium}
Breakeven price: ${break_even}\n"""
if sell:
print_string += f"""Max profit: ${100 * premium}
Max loss: Unlimited\n"""
else:
print_string += f"""Max profit: Unlimited
Max loss: ${-100 * premium}\n"""
console.print(print_string) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/calculator_view.py | 0.810141 | 0.357539 | calculator_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
import re
from bisect import bisect_left
from datetime import date, datetime, timedelta
from typing import Any, Dict, List, Optional
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import numpy as np
import pandas as pd
import seaborn as sns
import yfinance as yf
from openpyxl import Workbook
from scipy.stats import binom
import openbb_terminal.config_plot as cfp
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.config_terminal import theme
from openbb_terminal.core.config.paths import MISCELLANEOUS_DIRECTORY
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
excel_columns,
export_data,
get_rf,
is_valid_axes_count,
plot_autoscale,
print_rich_table,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.options import op_helpers, yfinance_model
from openbb_terminal.stocks.options.yfinance_model import (
generate_data,
get_option_chain,
get_price,
)
# pylint: disable=C0302
logger = logging.getLogger(__name__)
def header_fmt(header: str) -> str:
"""
Formats strings to appear as titles
Parameters
----------
header: str
The string to be formatted
Returns
-------
new_header: str
The clean string to use as a header
"""
words = re.findall("[A-Z][^A-Z]*", header)
if words == []:
words = [header]
new_header = " ".join(words)
new_header = new_header.replace("_", " ")
return new_header.title()
@log_start_end(log=logger)
def display_chains(
symbol: str,
expiry: str,
min_sp: float = -1,
max_sp: float = -1,
calls_only: bool = False,
puts_only: bool = False,
export: str = "",
):
"""Display option chains for given ticker and expiration
Parameters
----------
symbol: str
Stock ticker symbol
expiry: str
Expiration for option chain
min_sp: float
Min strike
max_sp: float
Max strike
calls_only: bool
Flag to get calls only
puts_only: bool
Flag to get puts only
export: str
Format to export data
"""
# Logic for converting calls/puts into "get calls/puts"
call_bool = True
put_bool = True
if calls_only:
call_bool = True
put_bool = False
if puts_only:
call_bool = False
put_bool = True
option_chains = yfinance_model.get_option_chain_expiry(
symbol=symbol,
expiry=expiry,
calls=call_bool,
puts=put_bool,
min_sp=min_sp,
max_sp=max_sp,
).fillna("-")
if option_chains.empty:
console.print("[red]Option chains not found.[/red]")
return
# There are 3 possibilities. Calls only, puts only or both.
# If calls only or puts only, we are actually set
# because the columns are nicely named
if calls_only or puts_only:
title = "Call " if calls_only else "Put "
print_rich_table(
option_chains,
title=f"{symbol} {title} Option Chain\nYahoo (15 min delayed)",
floatfmt=[
".2f",
".2f",
".2f",
".2f",
".0f",
".0f",
".3f",
".3f",
".3f",
".3f",
],
headers=[
"Strike",
"Last Price",
"Bid",
"Ask",
"Volume",
"Open Interest",
"IV",
"Delta",
"Gamma",
"Theta",
],
)
# Put the columns into the order for showing them
if calls_only and puts_only:
option_chains = option_chains[
[
"impliedVolatility_call",
"Theta_call",
"Gamma_call",
"Delta_call",
"volume_call",
"openInterest_call",
"bid_call",
"ask_call",
"lastPrice_call",
"strike",
"lastPrice_put",
"ask_put",
"bid_put",
"openInterest_put",
"volume_put",
"Delta_put",
"Gamma_put",
"Theta_put",
"impliedVolatility_put",
]
]
# In order to add color to call/put, the numbers will have to be strings.
# So floatfmt will not work in print_rich_table, so lets format them now.
float_fmt = [
".3f",
".3f",
".3f",
".3f",
".0f",
".0f",
".2f",
".2f",
".2f",
".2f",
".2f",
".2f",
".2f",
".0f",
".0f",
".3f",
".3f",
".3f",
".3f",
]
# pylint: disable=W0640
for idx, fmt in enumerate(float_fmt):
option_chains.iloc[:, idx] = option_chains.iloc[:, idx].apply(
lambda x: str("{:" + fmt + "}").format(float(x)) if x != "-" else x
)
# pylint: enable=W0640
# Make anything _call green and anything _put red
for col in option_chains.columns:
if col.endswith("_call"):
option_chains[col] = option_chains[col].apply(
lambda x: f"[green]{x}[/green]"
)
if col.endswith("_put"):
option_chains[col] = option_chains[col].apply(lambda x: f"[red]{x}[/red]")
print_rich_table(
option_chains,
title=f"{symbol} Option Chain for {expiry}\nYahoo (15 min delayed)",
headers=[header_fmt(x) for x in option_chains.columns],
)
console.print("Greeks calculated by OpenBB")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"chains_yf",
option_chains,
)
@log_start_end(log=logger)
def plot_oi(
symbol: str,
expiry: str,
min_sp: float = -1,
max_sp: float = -1,
calls_only: bool = False,
puts_only: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot open interest
Parameters
----------
symbol: str
Ticker symbol
expiry: str
expiration date for options
min_sp: float
Min strike to consider
max_sp: float
Max strike to consider
calls_only: bool
Show calls only
puts_only: bool
Show puts only
export: str
Format to export file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
options = yfinance_model.get_option_chain(symbol, expiry)
op_helpers.export_yf_options(export, options, "oi_yf")
calls = options.calls
puts = options.puts
current_price = float(yf.Ticker(symbol).info["regularMarketPrice"])
if min_sp == -1:
min_strike = 0.75 * current_price
else:
min_strike = min_sp
if max_sp == -1:
max_strike = 1.25 * current_price
else:
max_strike = max_sp
if calls_only and puts_only:
console.print("Both flags selected, please select one", "\n")
return
call_oi = calls.set_index("strike")["openInterest"] / 1000
put_oi = puts.set_index("strike")["openInterest"] / 1000
df_opt = pd.merge(call_oi, put_oi, left_index=True, right_index=True)
df_opt = df_opt.rename(
columns={"openInterest_x": "OI_call", "openInterest_y": "OI_put"}
)
max_pain = op_helpers.calculate_max_pain(df_opt)
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
if not calls_only:
put_oi.plot(
x="strike",
y="openInterest",
label="Puts",
ax=ax,
marker="o",
ls="-",
)
if not puts_only:
call_oi.plot(
x="strike",
y="openInterest",
label="Calls",
ax=ax,
marker="o",
ls="-",
)
ax.axvline(current_price, lw=2, ls="--", label="Current Price", alpha=0.7)
ax.axvline(max_pain, lw=3, label=f"Max Pain: {max_pain}", alpha=0.7)
ax.set_xlabel("Strike Price")
ax.set_ylabel("Open Interest [1k] ")
ax.set_xlim(min_strike, max_strike)
ax.legend(fontsize="x-small")
ax.set_title(f"Open Interest for {symbol.upper()} expiring {expiry}")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
@log_start_end(log=logger)
def plot_vol(
symbol: str,
expiry: str,
min_sp: float = -1,
max_sp: float = -1,
calls_only: bool = False,
puts_only: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot volume
Parameters
----------
symbol: str
Ticker symbol
expiry: str
expiration date for options
min_sp: float
Min strike to consider
max_sp: float
Max strike to consider
calls_only: bool
Show calls only
puts_only: bool
Show puts only
export: str
Format to export file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
options = yfinance_model.get_vol(symbol, expiry)
calls = options.calls
puts = options.puts
current_price = float(yf.Ticker(symbol).info["regularMarketPrice"])
if min_sp == -1:
min_strike = 0.75 * current_price
else:
min_strike = min_sp
if max_sp == -1:
max_strike = 1.25 * current_price
else:
max_strike = max_sp
if calls_only and puts_only:
console.print("Both flags selected, please select one", "\n")
return
call_v = calls.set_index("strike")["volume"] / 1000
put_v = puts.set_index("strike")["volume"] / 1000
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
if not calls_only:
put_v.plot(
x="strike",
y="volume",
label="Puts",
ax=ax,
marker="o",
ls="-",
)
if not puts_only:
call_v.plot(
x="strike",
y="volume",
label="Calls",
ax=ax,
marker="o",
ls="-",
)
ax.axvline(current_price, lw=2, ls="--", label="Current Price", alpha=0.7)
ax.set_xlabel("Strike Price")
ax.set_ylabel("Volume [1k] ")
ax.set_xlim(min_strike, max_strike)
ax.legend(fontsize="x-small")
ax.set_title(f"Volume for {symbol.upper()} expiring {expiry}")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
op_helpers.export_yf_options(export, options, "vol_yf")
@log_start_end(log=logger)
def plot_volume_open_interest(
symbol: str,
expiry: str,
min_sp: float = -1,
max_sp: float = -1,
min_vol: float = -1,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot volume and open interest
Parameters
----------
symbol: str
Stock ticker symbol
expiry: str
Option expiration
min_sp: float
Min strike price
max_sp: float
Max strike price
min_vol: float
Min volume to consider
export: str
Format for exporting data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
options = yfinance_model.get_volume_open_interest(symbol, expiry)
calls = options.calls
puts = options.puts
current_price = float(yf.Ticker(symbol).info["regularMarketPrice"])
# Process Calls Data
df_calls = calls.pivot_table(
index="strike", values=["volume", "openInterest"], aggfunc="sum"
).reindex()
df_calls["strike"] = df_calls.index
df_calls["type"] = "calls"
df_calls["openInterest"] = df_calls["openInterest"]
df_calls["volume"] = df_calls["volume"]
df_calls["oi+v"] = df_calls["openInterest"] + df_calls["volume"]
df_calls["spot"] = round(current_price, 2)
df_puts = puts.pivot_table(
index="strike", values=["volume", "openInterest"], aggfunc="sum"
).reindex()
df_puts["strike"] = df_puts.index
df_puts["type"] = "puts"
df_puts["openInterest"] = df_puts["openInterest"]
df_puts["volume"] = -df_puts["volume"]
df_puts["openInterest"] = -df_puts["openInterest"]
df_puts["oi+v"] = df_puts["openInterest"] + df_puts["volume"]
df_puts["spot"] = round(current_price, 2)
call_oi = calls.set_index("strike")["openInterest"] / 1000
put_oi = puts.set_index("strike")["openInterest"] / 1000
df_opt = pd.merge(call_oi, put_oi, left_index=True, right_index=True)
df_opt = df_opt.rename(
columns={"openInterest_x": "OI_call", "openInterest_y": "OI_put"}
)
max_pain = op_helpers.calculate_max_pain(df_opt)
if min_vol == -1 and min_sp == -1 and max_sp == -1:
# If no argument provided, we use the percentile 50 to get 50% of upper volume data
volume_percentile_threshold = 50
min_vol_calls = np.percentile(df_calls["oi+v"], volume_percentile_threshold)
min_vol_puts = np.percentile(df_puts["oi+v"], volume_percentile_threshold)
df_calls = df_calls.loc[df_calls.index.intersection(df_puts.index)]
df_calls = (
df_calls[df_calls["oi+v"] > min_vol_calls]
.drop(["strike"], axis=1)
.reset_index()
.merge(
df_calls[df_puts["oi+v"] < min_vol_puts][
["openInterest", "volume", "type", "oi+v", "spot"]
].reset_index()
)
.set_index("strike")
)
df_calls["strike"] = df_calls.index
df_puts = df_puts.loc[df_puts.index.intersection(df_calls.index)]
df_calls = df_calls[df_calls["strike"] > 0.75 * current_price]
df_calls = df_calls[df_calls["strike"] < 1.25 * current_price]
df_puts = df_puts[df_puts["strike"] > 0.75 * current_price]
df_puts = df_puts[df_puts["strike"] < 1.25 * current_price]
else:
df_calls = df_calls.loc[df_calls.index.intersection(df_puts.index)]
if min_vol > -1:
df_calls = (
df_calls[df_calls["oi+v"] > min_vol]
.drop(["strike"], axis=1)
.reset_index()
.merge(
df_calls[df_puts["oi+v"] < min_vol][
["openInterest", "volume", "type", "oi+v", "spot"]
].reset_index()
)
.set_index("strike")
)
df_calls["strike"] = df_calls.index
df_puts = df_puts.loc[df_puts.index.intersection(df_calls.index)]
if min_sp > -1:
df_calls = df_calls[df_calls["strike"] > min_sp]
df_puts = df_puts[df_puts["strike"] > min_sp]
else:
df_calls = df_calls[df_calls["strike"] > 0.75 * current_price]
df_puts = df_puts[df_puts["strike"] > 0.75 * current_price]
if max_sp > -1:
df_calls = df_calls[df_calls["strike"] < max_sp]
df_puts = df_puts[df_puts["strike"] < max_sp]
else:
df_calls = df_calls[df_calls["strike"] < 1.25 * current_price]
df_puts = df_puts[df_puts["strike"] < 1.25 * current_price]
if df_calls.empty and df_puts.empty:
console.print(
"The filtering applied is too strong, there is no data available for such conditions.\n"
)
return
# Initialize the matplotlib figure
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
# make x axis symmetric
axis_origin = max(abs(max(df_puts["oi+v"])), abs(max(df_calls["oi+v"])))
ax.set_xlim(-axis_origin, +axis_origin)
g = sns.barplot(
x="oi+v",
y="strike",
data=df_calls,
label="Calls: Open Interest",
color="lightgreen",
orient="h",
)
g = sns.barplot(
x="volume",
y="strike",
data=df_calls,
label="Calls: Volume",
color="green",
orient="h",
)
g = sns.barplot(
x="oi+v",
y="strike",
data=df_puts,
label="Puts: Open Interest",
color="pink",
orient="h",
)
g = sns.barplot(
x="volume",
y="strike",
data=df_puts,
label="Puts: Volume",
color="red",
orient="h",
)
# draw spot line
s = [float(strike.get_text()) for strike in ax.get_yticklabels()]
spot_index = bisect_left(s, current_price) # find where the spot is on the graph
spot_line = ax.axhline(spot_index, ls="--", alpha=0.3)
# draw max pain line
max_pain_index = bisect_left(s, max_pain)
max_pain_line = ax.axhline(max_pain_index, ls="-", alpha=0.3, color="red")
max_pain_line.set_linewidth(5)
# format ticklabels without - for puts
g.set_xticks(g.get_xticks())
xlabels = [f"{x:,.0f}".replace("-", "") for x in g.get_xticks()]
g.set_xticklabels(xlabels)
ax.set_title(
f"{symbol} volumes for {expiry} \n(open interest displayed only during market hours)",
)
ax.invert_yaxis()
handles, _ = ax.get_legend_handles_labels()
handles.append(spot_line)
handles.append(max_pain_line)
# create legend labels + add to graph
labels = [
"Calls open interest",
"Calls volume ",
"Puts open interest",
"Puts volume",
"Current stock price",
f"Max pain = {max_pain}",
]
ax.legend(fontsize="xx-small", handles=handles[:], labels=labels, loc="lower left")
sns.despine(left=True, bottom=True)
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
op_helpers.export_yf_options(export, options, "voi_yf")
@log_start_end(log=logger)
def plot_plot(
symbol: str,
expiry: str,
put: bool = False,
x: str = "s",
y: str = "iv",
custom: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Generate a graph custom graph based on user input
Parameters
----------
symbol: str
Stock ticker symbol
expiry: str
Option expiration
x: str
variable to display in x axis, choose from:
ltd, s, lp, b, a, c, pc, v, oi, iv
y: str
variable to display in y axis, choose from:
ltd, s, lp, b, a, c, pc, v, oi, iv
custom: str
type of plot
put: bool
put option instead of call
export: str
type of data to export
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
convert = {
"ltd": "lastTradeDate",
"s": "strike",
"lp": "lastPrice",
"b": "bid",
"a": "ask",
"c": "change",
"pc": "percentChange",
"v": "volume",
"oi": "openInterest",
"iv": "impliedVolatility",
}
if custom == "smile":
x = "strike"
y = "impliedVolatility"
else:
if x is None:
console.print("[red]Invalid option sent for x-axis[/red]\n")
return
if y is None:
console.print("[red]Invalid option sent for y-axis[/red]\n")
return
if x in convert:
x = convert[x]
else:
x = "strike"
console.print(
f"[red]'{x}' is not a valid option. Defaulting to `strike`.[/red]\n"
)
if y in convert:
y = convert[y]
else:
y = "impliedVolatility"
console.print(
f"[red]'{y}' is not a valid option. Defaulting to `impliedVolatility`.[/red]\n"
)
varis = op_helpers.opt_chain_cols
chain = yfinance_model.get_option_chain(symbol, expiry)
values = chain.puts if put else chain.calls
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
x_data = values[x]
y_data = values[y]
ax.plot(x_data, y_data, "--bo")
option = "puts" if put else "calls"
ax.set_title(
f"{varis[y]['label']} vs. {varis[x]['label']} for {symbol} {option} on {expiry}"
)
ax.set_ylabel(varis[y]["label"])
ax.set_xlabel(varis[x]["label"])
if varis[x]["format"] == "date":
ax.get_xaxis().set_major_formatter(mdates.DateFormatter("%Y/%m/%d"))
ax.get_xaxis().set_major_locator(mdates.DayLocator(interval=1))
elif varis[x]["format"]:
ax.get_xaxis().set_major_formatter(varis[x]["format"])
if varis[y]["format"] == "date":
ax.get_yaxis().set_major_formatter(mdates.DateFormatter("%Y/%m/%d"))
ax.get_yaxis().set_major_locator(mdates.DayLocator(interval=1))
elif varis[y]["format"]:
ax.get_yaxis().set_major_formatter(varis[y]["format"])
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "plot")
@log_start_end(log=logger)
def plot_payoff(
current_price: float,
options: List[Dict[Any, Any]],
underlying: float,
symbol: str,
expiry: str,
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Generate a graph showing the option payoff diagram"""
x, yb, ya = generate_data(current_price, options, underlying)
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
if ya:
ax.plot(x, yb, label="Payoff Before Premium")
ax.plot(x, ya, label="Payoff After Premium")
else:
ax.plot(x, yb, label="Payoff")
ax.set_title(f"Option Payoff Diagram for {symbol} on {expiry}")
ax.set_ylabel("Profit")
ax.set_xlabel("Underlying Asset Price at Expiration")
ax.legend()
ax.xaxis.set_major_formatter("${x:.2f}")
ax.yaxis.set_major_formatter("${x:.2f}")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
@log_start_end(log=logger)
def show_parity(
symbol: str,
expiry: str,
put: bool = False,
ask: bool = False,
mini: float = None,
maxi: float = None,
export: str = "",
) -> None:
"""Prints options and whether they are under or over priced [Source: Yahoo Finance]
Parameters
----------
symbol : str
Ticker symbol to get expirations for
expiration : str
Expiration to use for options
put : bool
Whether to use puts or calls
ask : bool
Whether to use ask or lastPrice
mini : float
Minimum strike price to show
maxi : float
Maximum strike price to show
export : str
Export data
"""
r_date = datetime.strptime(expiry, "%Y-%m-%d").date()
delta = (r_date - date.today()).days
rate = ((1 + get_rf()) ** (delta / 365)) - 1
stock = get_price(symbol)
div_info = yfinance_model.get_dividend(symbol)
div_dts = div_info.index.values.tolist()
if div_dts:
last_div = pd.to_datetime(div_dts[-1])
if len(div_dts) > 3:
avg_div = np.mean(div_info.to_list()[-4:])
else:
avg_div = np.mean(div_info.to_list())
next_div = last_div + timedelta(days=91)
dividends = []
while next_div < datetime.strptime(expiry, "%Y-%m-%d"):
day_dif = (next_div - datetime.now()).days
dividends.append((avg_div, day_dif))
next_div += timedelta(days=91)
div_pvs = [x[0] / ((1 + get_rf()) ** (x[1] / 365)) for x in dividends]
pv_dividend = sum(div_pvs)
else:
pv_dividend = 0
chain = get_option_chain(symbol, expiry)
name = "ask" if ask else "lastPrice"
o_type = "put" if put else "call"
calls = chain.calls[["strike", name]].copy()
calls = calls.rename(columns={name: "callPrice"})
puts = chain.puts[["strike", name]].copy()
puts = puts.rename(columns={name: "putPrice"})
opts = pd.merge(calls, puts, on="strike")
opts = opts.dropna()
opts = opts.loc[opts["callPrice"] * opts["putPrice"] != 0]
opts["callParity"] = (
opts["putPrice"] + stock - (opts["strike"] / (1 + rate)) - pv_dividend
)
opts["putParity"] = (
(opts["strike"] / (1 + rate)) + opts["callPrice"] - stock + pv_dividend
)
diff = o_type + " Difference"
opts[diff] = opts[o_type + "Price"] - opts[o_type + "Parity"]
opts["distance"] = abs(stock - opts["strike"])
filtered = opts.copy()
if mini is None:
mini = filtered.strike.quantile(0.25)
if maxi is None:
maxi = filtered.strike.quantile(0.75)
filtered = filtered.loc[filtered["strike"] >= mini]
filtered = filtered.loc[filtered["strike"] <= maxi]
show = filtered[["strike", diff]].copy()
if ask:
console.print("Warning: Options with no current ask price not shown.\n")
print_rich_table(
show,
headers=[x.title() for x in show.columns],
show_index=False,
title=f"{symbol} Parity",
)
console.print(
"[yellow]Warning: Low volume options may be difficult to trade.[/yellow]"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"parity",
show,
)
@log_start_end(log=logger)
def risk_neutral_vals(
symbol: str,
expiry: str,
data: pd.DataFrame,
put: bool = False,
mini: float = None,
maxi: float = None,
risk: float = None,
) -> None:
"""Prints current options prices and risk neutral values [Source: Yahoo Finance]
Parameters
----------
symbol: str
Ticker symbol to get expirations for
expiry: str
Expiration to use for options
put: bool
Whether to use puts or calls
data: pd.DataFrame
Estimates for stocks prices and probabilities
mini: float
Minimum strike price to show
maxi: float
Maximum strike price to show
risk: float
The risk-free rate for the asset
"""
if put:
chain = get_option_chain(symbol, expiry).puts
else:
chain = get_option_chain(symbol, expiry).calls
r_date = datetime.strptime(expiry, "%Y-%m-%d").date()
delta = (r_date - date.today()).days
vals = []
if risk is None:
risk = get_rf()
for _, row in chain.iterrows():
vals.append(
[
row["strike"],
row["lastPrice"],
op_helpers.rn_payoff(row["strike"], data, put, delta, risk),
]
)
new_df = pd.DataFrame(vals, columns=["Strike", "Last Price", "Value"], dtype=float)
new_df["Difference"] = new_df["Last Price"] - new_df["Value"]
if mini is None:
mini = new_df.Strike.quantile(0.25)
if maxi is None:
maxi = new_df.Strike.quantile(0.75)
new_df = new_df[new_df["Strike"] >= mini]
new_df = new_df[new_df["Strike"] <= maxi]
print_rich_table(
new_df,
headers=[x.title() for x in new_df.columns],
show_index=False,
title="Risk Neutral Values",
)
@log_start_end(log=logger)
def plot_expected_prices(
und_vals: List[List[float]],
p: float,
symbol: str,
expiry: str,
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plot expected prices of the underlying asset at expiration
Parameters
----------
und_vals : List[List[float]]
The expected underlying values at the expiration date
p : float
The probability of the stock price moving upward each round
symbol : str
The ticker symbol of the option's underlying asset
expiry : str
The expiration for the option
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
up_moves = list(range(len(und_vals[-1])))
up_moves.reverse()
probs = [100 * binom.pmf(r, len(up_moves), p) for r in up_moves]
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.set_title(f"Probabilities for ending prices of {symbol} on {expiry}")
ax.xaxis.set_major_formatter("${x:1.2f}")
ax.yaxis.set_major_formatter(mtick.PercentFormatter())
ax.plot(und_vals[-1], probs)
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
@log_start_end(log=logger)
def export_binomial_calcs(
up: float,
prob_up: float,
discount: float,
und_vals: List[List[float]],
opt_vals: List[List[float]],
days: int,
symbol: str,
) -> None:
"""Create an excel spreadsheet with binomial tables for underlying asset value and option value
Parameters
----------
up : float
The stock's increase on an upward move
prob_up : float
The probability of an upward move
discount : float
The daily discount rate
und_vals : List[List[float]]
The underlying asset values at each step
opt_vals : List[List[float]]
The values for the option at each step
days : int
The number of days until the option expires
symbol : str
The ticker symbol for the company
"""
opts = excel_columns()
wb = Workbook()
ws = wb.active
ws["A1"] = "Up Move"
ws["B1"] = up
ws["A2"] = "Down Move"
ws["B2"] = 1 / up
ws["D1"] = "Prob Up"
ws["E1"] = prob_up
ws["D2"] = "Prob Down"
ws["E2"] = 1 - prob_up
ws["D3"] = "Discount"
ws["E3"] = discount
ws["A4"] = "Binomial Tree for Underlying Values"
for i, _ in enumerate(und_vals):
for j, _ in enumerate(und_vals[i]):
ws[f"{opts[i]}{j+5}"] = und_vals[i][j]
ws[f"A{days+7}"] = "Binomial Tree for Option Values"
for i, _ in enumerate(opt_vals):
for j, _ in enumerate(opt_vals[i]):
ws[f"{opts[i]}{j+8+days}"] = opt_vals[i][j]
trypath = str(
MISCELLANEOUS_DIRECTORY
/ "exports"
/ "stocks"
/ "options"
/ f"{symbol} {datetime.now()}.xlsx"
)
wb.save(trypath)
console.print(
f"Analysis ran for {symbol}\nPlease look in {trypath} for the file.\n"
)
@log_start_end(log=logger)
def show_binom(
symbol: str,
expiry: str,
strike: float = 0,
put: bool = False,
europe: bool = False,
export: str = "",
plot: bool = False,
vol: float = None,
) -> None:
"""Get binomial pricing for option
Parameters
----------
symbol : str
The ticker symbol of the option's underlying asset
expiry : str
The expiration for the option
strike : float
The strike price for the option
put : bool
Value a put instead of a call
europe : bool
Value a European option instead of an American option
export : str
Export the options data to an excel spreadsheet
plot : bool
Show a graph of expected ending prices
vol : float
The annualized volatility for the underlying asset
"""
up, prob_up, discount, und_vals, opt_vals, days = yfinance_model.get_binom(
symbol, expiry, strike, put, europe, vol
)
if export:
export_binomial_calcs(up, prob_up, discount, und_vals, opt_vals, days, symbol)
if plot:
plot_expected_prices(und_vals, prob_up, symbol, expiry)
option = "put" if put else "call"
console.print(
f"{symbol} {option} at ${strike:.2f} expiring on {expiry} is worth ${opt_vals[0][0]:.2f}\n"
)
@log_start_end(log=logger)
def display_vol_surface(
symbol: str,
export: str = "",
z: str = "IV",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display vol surface
Parameters
----------
symbol : str
Ticker symbol to get surface for
export : str
Format to export data
z : str
The variable for the Z axis
external_axes: Optional[List[plt.Axes]]
External axes (1 axis is expected in the list), by default None
"""
data = yfinance_model.get_iv_surface(symbol)
if data.empty:
console.print(f"No options data found for {symbol}.\n")
return
X = data.dte
Y = data.strike
if z == "IV":
Z = data.impliedVolatility
label = "Volatility"
elif z == "OI":
Z = data.openInterest
label = "Open Interest"
elif z == "LP":
Z = data.lastPrice
label = "Last Price"
if external_axes is None:
fig = plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax = plt.axes(projection="3d")
else:
ax = external_axes[0]
ax.plot_trisurf(X, Y, Z, cmap="jet", linewidth=0.2)
ax.set_xlabel("DTE")
ax.set_ylabel("Strike")
ax.set_zlabel(z)
if external_axes is None:
fig.suptitle(f"{label} Surface for {symbol.upper()}")
theme.visualize_output(force_tight_layout=False)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"vsurf",
data,
)
@log_start_end(log=logger)
def show_greeks(
symbol: str,
expiry: str,
div_cont: float = 0,
rf: float = None,
opt_type: int = 1,
mini: float = None,
maxi: float = None,
show_all: bool = False,
) -> None:
"""
Shows the greeks for a given option
Parameters
----------
symbol: str
The ticker symbol value of the option
div_cont: float
The dividend continuous rate
expiry: str
The date of expiration, format "YYYY-MM-DD", i.e. 2010-12-31.
rf: float
The risk-free rate
opt_type: Union[1, -1]
The option type 1 is for call and -1 is for put
mini: float
The minimum strike price to include in the table
maxi: float
The maximum strike price to include in the table
show_all: bool
Whether to show all greeks
"""
df = yfinance_model.get_greeks(
symbol, expiry, div_cont, rf, opt_type, mini, maxi, show_all
)
column_formatting = [
".1f",
".4f",
".6f",
".6f",
".6f",
".6f",
]
if show_all:
additional_columns = ["Rho", "Phi", "Charm", "Vanna", "Vomma"]
column_formatting += [".6f"] * len(additional_columns)
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title=f"{symbol} Greeks",
floatfmt=column_formatting,
)
return None | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/yfinance_view.py | 0.78469 | 0.257964 | yfinance_view.py | pypi |
__docformat__ = "numpy"
import os
from datetime import datetime, timedelta
from math import e, log
from typing import Union
import numpy as np
import pandas as pd
from scipy.stats import norm
from openbb_terminal.helper_funcs import export_data
from openbb_terminal.rich_config import console
def get_dte_from_expiration(date: str) -> float:
"""
Converts a date to total days until the option would expire.
This assumes that the date is in the form %B %d, %Y such as January 11, 2023
This calculates time from 'now' to 4 PM the date of expiration
This is particularly a helper for nasdaq results.
Parameters
----------
date: str
Date in format %B %d, %Y
Returns
-------
float
Days to expiration as a decimal
"""
# Get the date as a datetime and add 16 hours (4PM)
expiration_time = datetime.strptime(date, "%B %d, %Y") + timedelta(hours=16)
# Find total seconds from now
time_to_now = (expiration_time - datetime.now()).total_seconds()
# Convert to days
time_to_now /= 60 * 60 * 24
return time_to_now
def get_loss_at_strike(strike: float, chain: pd.DataFrame) -> float:
"""Function to get the loss at the given expiry
Parameters
----------
strike : Union[int,float]
Value to calculate total loss at
chain : Dataframe:
Dataframe containing at least strike and openInterest
Returns
-------
loss : Union[float,int]
Total loss
"""
itm_calls = chain[chain.index < strike][["OI_call"]]
itm_calls["loss"] = (strike - itm_calls.index) * itm_calls["OI_call"]
call_loss = itm_calls["loss"].sum()
itm_puts = chain[chain.index > strike][["OI_put"]]
itm_puts["loss"] = (itm_puts.index - strike) * itm_puts["OI_put"]
put_loss = itm_puts.loss.sum()
loss = call_loss + put_loss
return loss
def calculate_max_pain(chain: pd.DataFrame) -> Union[int, float]:
"""Returns the max pain for a given call/put dataframe
Parameters
----------
chain : DataFrame
Dataframe to calculate value from
Returns
-------
max_pain : int
Max pain value
"""
strikes = np.array(chain.index)
if ("OI_call" not in chain.columns) or ("OI_put" not in chain.columns):
console.print("Incorrect columns. Unable to parse max pain")
return np.nan
loss = [get_loss_at_strike(price_at_exp, chain) for price_at_exp in strikes]
chain["loss"] = loss
max_pain = chain["loss"].idxmin()
return max_pain
def convert(orig: str, to: str) -> float:
"""Convert a string to a specific type of number
Parameters
----------
orig : str
String to convert
Returns
-------
number : float
Decimal value of string
"""
clean = orig.replace("%", "").replace("+", "").replace(",", "")
if to == "%":
return float(clean) / 100
if to == ",":
return float(clean)
raise ValueError("Invalid to format, please use '%' or ','.")
def rn_payoff(x: str, df: pd.DataFrame, put: bool, delta: int, rf: float) -> float:
"""The risk neutral payoff for a stock
Parameters
----------
x : str
Strike price
df : pd.DataFrame
Dataframe of stocks prices and probabilities
put : bool
Whether the asset is a put or a call
delta : int
Difference between today's date and expirations date in days
rf : float
The current risk-free rate
Returns
-------
number : float
Risk neutral value of option
"""
if put:
df["Gain"] = np.where(x > df["Price"], x - df["Price"], 0)
else:
df["Gain"] = np.where(x < df["Price"], df["Price"] - x, 0)
df["Vals"] = df["Chance"] * df["Gain"]
risk_free = (1 + rf) ** (delta / 365)
return sum(df["Vals"]) / risk_free
def export_yf_options(export: str, options, file_name: str):
"""Special function to assist in exporting yf options
Parameters
----------
export: str
Format to export file
options: Options
The yfinance Options object
file_name: str
The file_name to export to
"""
for option_name in ["calls", "puts"]:
option = getattr(options, option_name)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
f"{file_name}_{option_name}",
option,
)
opt_chain_cols = {
"lastTradeDate": {"format": "date", "label": "Last Trade Date"},
"strike": {"format": "${x:.2f}", "label": "Strike"},
"lastPrice": {"format": "${x:.2f}", "label": "Last Price"},
"bid": {"format": "${x:.2f}", "label": "Bid"},
"ask": {"format": "${x:.2f}", "label": "Ask"},
"change": {"format": "${x:.2f}", "label": "Change"},
"percentChange": {"format": "{x:.2f}%", "label": "Percent Change"},
"volume": {"format": "{x:.2f}", "label": "Volume"},
"openInterest": {"format": "", "label": "Open Interest"},
"impliedVolatility": {"format": "{x:.2f}", "label": "Implied Volatility"},
}
# pylint: disable=R0903
class Chain:
def __init__(self, df: pd.DataFrame, source: str = "tradier"):
if source == "tradier":
self.calls = df[df["option_type"] == "call"]
self.puts = df[df["option_type"] == "put"]
elif source == "nasdaq":
# These guys have different column names
call_columns = ["expiryDate", "strike"] + [
col for col in df.columns if col.startswith("c_")
]
put_columns = ["expiryDate", "strike"] + [
col for col in df.columns if col.startswith("p_")
]
self.calls = df[call_columns]
self.puts = df[put_columns]
else:
self.calls = None
self.puts = None
class Option:
def __init__(
self,
s: float,
k: float,
rf: float,
div_cont: float,
expiry: float,
vol: float,
opt_type: int = 1,
):
"""
Class for getting the greeks of options. Inspiration from:
http://www.smileofthales.com/computation/option-pricing-python-inheritance/
Parameters
----------
s : float
The underlying asset price
k : float
The option strike price
rf : float
The risk-free rate
div_cont : float
The dividend continuous rate
expiry : float
The number of days until expiration
vol : float
The underlying volatility for an option
opt_type : int
put == -1; call == +1
"""
self.Type = int(opt_type)
self.price = float(s)
self.strike = float(k)
self.risk_free = float(rf)
self.div_cont = float(div_cont)
self.exp_time = float(expiry) / 365.0
self._sigma = float(vol)
self.sigmaT = self._sigma * self.exp_time**0.5
@property
def d1(self):
return (
log(self.price / self.strike)
+ (self.risk_free - self.div_cont + 0.5 * (self.sigma**2)) * self.exp_time
) / self.sigmaT
@property
def d2(self):
return self.d1 - self.sigmaT
@property
def sigma(self):
return self._sigma
@sigma.setter
def sigma(self, val):
self._sigma = val
self.sigmaT = val * self.exp_time**0.5
def Premium(self):
tmpprem = self.Type * (
self.price
* e ** (-self.div_cont * self.exp_time)
* norm.cdf(self.Type * self.d1)
- self.strike
* e ** (-self.risk_free * self.exp_time)
* norm.cdf(self.Type * self.d2)
)
return tmpprem
# 1st order greeks
def Delta(self):
dfq = np.exp(-self.div_cont * self.exp_time)
if self.Type == 1:
return dfq * norm.cdf(self.d1)
return dfq * (norm.cdf(self.d1) - 1)
def Vega(self):
"""Vega for 1% change in vol"""
dfq = np.exp(-self.div_cont * self.exp_time)
return 0.01 * self.price * dfq * norm.pdf(self.d1) * self.exp_time**0.5
def Theta(self, time_factor=1.0 / 365.0):
"""Theta, by default for 1 calendar day change"""
df = np.exp(-self.risk_free * self.exp_time)
dfq = np.exp(-self.div_cont * self.exp_time)
tmptheta = time_factor * (
-0.5
* self.price
* dfq
* norm.pdf(self.d1)
* self.sigma
/ (self.exp_time**0.5)
+ self.Type
* (
self.div_cont * self.price * dfq * norm.cdf(self.Type * self.d1)
- self.risk_free * self.strike * df * norm.cdf(self.Type * self.d2)
)
)
return tmptheta
def Rho(self):
df = np.exp(-self.risk_free * self.exp_time)
return (
self.Type
* self.strike
* self.exp_time
* df
* 0.01
* norm.cdf(self.Type * self.d2)
)
def Phi(self):
dfq = np.exp(-self.div_cont * self.exp_time)
return (
0.01
* -self.Type
* self.exp_time
* self.price
* dfq
* norm.cdf(self.Type * self.d1)
)
# 2nd order greeks
def Gamma(self):
dfq = np.exp(-self.div_cont * self.exp_time)
return dfq * norm.pdf(self.d1) / (self.price * self.sigmaT)
def Charm(self, time_factor=1.0 / 365.0):
"""Calculates Charm, by default for 1 calendar day change"""
dfq = np.exp(-self.div_cont * self.exp_time)
cdf = norm.cdf(self.Type * self.d1)
return (
time_factor
* -dfq
* (
norm.pdf(self.d1)
* (
(self.risk_free - self.div_cont) / (self.sigmaT)
- self.d2 / (2 * self.exp_time)
)
+ (self.Type * -self.div_cont) * cdf
)
)
def Vanna(self, change: float):
"""
Vanna for a given percent change in volatility
Parameters
----------
change : float
The change in volatility
Returns
----------
num : float
The Vanna
"""
return (
change
* -(e ** (-self.div_cont * self.exp_time))
* self.d2
/ self.sigma
* norm.pdf(self.d1)
)
def Vomma(self, change):
"""
Vomma for a given percent change in volatility
Parameters
----------
change : float
The change in volatility
Returns
----------
num : float
The Vomma
"""
return (
change
* np.exp(-self.div_cont * self.exp_time)
* self.d1
* self.d2
* np.sqrt(self.exp_time)
* self.price
* norm.pdf(self.d1)
/ self._sigma
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/op_helpers.py | 0.885037 | 0.466846 | op_helpers.py | pypi |
import logging
from typing import Union
import pandas as pd
from openbb_terminal.decorators import log_start_end
from openbb_terminal.stocks.options import (
chartexchange_model,
nasdaq_model,
tradier_model,
yfinance_model,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_full_option_chain(
symbol: str, source: str = "Nasdaq", expiration: Union[str, None] = None
) -> pd.DataFrame:
"""Get Option Chain For A Stock. No greek data is returned
Parameters
----------
symbol : str
Symbol to get chain for
source : str, optional
Source to get data from, by default "Nasdaq"
expiration : Union[str, None], optional
Date to get chain for. By default returns all dates
Returns
-------
pd.DataFrame
Dataframe of full option chain.
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> aapl_option_chain = openbb.stocks.options.chains("AAPL", source = "Nasdaq")
To get a specific expiration date, use the expiration parameter
>>> aapl_chain_date = openbb.stocks.options.chains("AAPL", expiration="2023-07-21", source="Nasdaq")
"""
if source == "Tradier":
df = tradier_model.get_full_option_chain(symbol)
if expiration:
return df[df.expiration == expiration]
return df
if source == "YahooFinance":
df = yfinance_model.get_full_option_chain(symbol)
if expiration:
return df[df.expiration == expiration]
return df
if source == "Nasdaq":
# Nasdaq handles these slightly differently
if expiration:
return nasdaq_model.get_chain_given_expiration(symbol, expiration)
return nasdaq_model.get_full_option_chain(symbol)
logger.info("Invalid Source")
return pd.DataFrame()
@log_start_end(log=logger)
def get_option_expirations(symbol: str, source: str = "Nasdaq") -> list:
"""Get Option Chain Expirations
Parameters
----------
symbol : str
Symbol to get chain for
source : str, optional
Source to get data from, by default "Nasdaq"
Returns
-------
pd.DataFrame
Dataframe of full option chain.
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> SPX_expirations = openbb.stocks.options.expirations("SPX", source = "Tradier")
"""
if source == "Tradier":
return tradier_model.option_expirations(symbol)
if source == "YahooFinance":
return yfinance_model.option_expirations(symbol)
if source == "Nasdaq":
return nasdaq_model.get_expirations(symbol)
logger.info("Invalid Source")
return pd.DataFrame()
def hist(
symbol: str,
exp: str,
strike: Union[int, Union[float, str]],
call: bool = True,
source="ChartExchange",
) -> pd.DataFrame:
"""Get historical option pricing.
Parameters
----------
symbol : str
Symbol to get data for
exp : str
Expiration date
strike : Union[int ,Union[float,str]]
Strike price
call : bool, optional
Flag to indicate a call, by default True
source : str, optional
Source to get data from. Can be ChartExchange or Tradier, by default "ChartExchange"
Returns
-------
pd.DataFrame
DataFrame of historical option pricing
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> aapl_150_call = openbb.stocks.options.hist("AAPL", "2022-11-18", 150, call=True, source="ChartExchange")
Because this generates a dataframe, we can easily plot the close price for a SPY put:
(Note that Tradier requires an API key)
>>> openbb.stocks.options.hist("SPY", "2022-11-18", 400, call=False, source="Tradier").plot(y="close)
"""
if source.lower() == "chartexchange":
return chartexchange_model.get_option_history(symbol, exp, call, strike)
if source.lower() == "tradier":
return tradier_model.get_historical_options(symbol, exp, strike, not call)
return pd.DataFrame() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/options_sdk_helper.py | 0.897749 | 0.290742 | options_sdk_helper.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import matplotlib.pyplot as plt
import openbb_terminal.config_plot as cfp
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
is_valid_axes_count,
plot_autoscale,
print_rich_table,
)
from openbb_terminal.stocks.options import nasdaq_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_oi(
symbol: str,
expiry: str,
min_sp: float = -1,
max_sp: float = -1,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot open interest
Parameters
----------
symbol: str
Ticker symbol
expiry: str
Expiry date for options
min_sp: float
Min strike to consider
max_sp: float
Max strike to consider
raw: bool
Flag to display raw data
export: str
Format to export file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
option_chain = nasdaq_model.get_chain_given_expiration(symbol, expiry)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"oi_nasdaq",
option_chain,
)
current_price = nasdaq_model.get_last_price(symbol)
if min_sp == -1:
min_strike = 0.75 * current_price
else:
min_strike = min_sp
if max_sp == -1:
max_strike = 1.25 * current_price
else:
max_strike = max_sp
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(
option_chain.strike,
option_chain["c_Openinterest"] / 1000,
ls="-",
marker="o",
label="Calls",
)
ax.plot(
option_chain.strike,
option_chain["p_Openinterest"] / 1000,
ls="-",
marker="o",
label="Puts",
)
ax.axvline(current_price, lw=2, ls="--", label="Current Price", alpha=0.7)
ax.set_xlabel("Strike Price")
ax.set_ylabel("Open Interest (1k) ")
ax.set_xlim(min_strike, max_strike)
ax.legend(loc="best", fontsite="x-small")
ax.set_title(f"Open Interest for {symbol.upper()} expiring {expiry}")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
if raw:
to_print = option_chain[["c_Openinterest", "strike", "p_Openinterest"]]
print_rich_table(
to_print[(to_print.strike < max_strike) & (to_print.strike > min_strike)],
headers=to_print.columns,
title=f"Open Interest for {symbol} expiring on {expiry}.",
)
@log_start_end(log=logger)
def display_volume(
symbol: str,
expiry: str,
min_sp: float = -1,
max_sp: float = -1,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot volume
Parameters
----------
symbol: str
Ticker symbol
expiry: str
Expiry date for options
min_sp: float
Min strike to consider
max_sp: float
Max strike to consider
raw:bool
Flag to display raw data
export: str
Format to export file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
option_chain = nasdaq_model.get_chain_given_expiration(symbol, expiry)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"oi_nasdaq",
option_chain,
)
current_price = nasdaq_model.get_last_price(symbol)
if min_sp == -1:
min_strike = 0.75 * current_price
else:
min_strike = min_sp
if max_sp == -1:
max_strike = 1.25 * current_price
else:
max_strike = max_sp
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(
option_chain.strike,
option_chain["c_Volume"] / 1000,
ls="-",
marker="o",
label="Calls",
)
ax.plot(
option_chain.strike,
option_chain["p_Volume"] / 1000,
ls="-",
marker="o",
label="Puts",
)
ax.axvline(current_price, lw=2, ls="--", label="Current Price", alpha=0.7)
ax.set_xlabel("Strike Price")
ax.set_ylabel("Volume (1k) ")
ax.set_xlim(min_strike, max_strike)
ax.legend(loc="best", fontsize="x-small")
ax.set_title(f"Volume for {symbol.upper()} expiring {expiry}")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
if raw:
to_print = option_chain[["c_Volume", "strike", "p_Volume"]]
print_rich_table(
to_print[(to_print.strike < max_strike) & (to_print.strike > min_strike)],
headers=to_print.columns,
title=f"Volume for {symbol} expiring on {expiry}.",
)
@log_start_end(log=logger)
def display_volume_and_oi(
symbol: str,
expiry: str,
min_sp: float = -1,
max_sp: float = -1,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot volume and open interest
Parameters
----------
symbol: str
Ticker symbol
expiry: str
Expiry date for options
min_sp: float
Min strike to consider
max_sp: float
Max strike to consider
raw:bool
Flag to display raw data
export: str
Format to export file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
option_chain = nasdaq_model.get_chain_given_expiration(symbol, expiry)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"voi_nasdaq",
option_chain,
)
current_price = nasdaq_model.get_last_price(symbol)
if min_sp == -1:
min_strike = 0.75 * current_price
else:
min_strike = min_sp
if max_sp == -1:
max_strike = 1.25 * current_price
else:
max_strike = max_sp
if raw:
to_print = option_chain[
["c_Openinterest", "c_Volume", "strike", "p_Volume", "p_Openinterest"]
]
print_rich_table(
to_print[(to_print.strike < max_strike) & (to_print.strike > min_strike)],
headers=to_print.columns,
title=f"Volume and Open Interest for {symbol} expiring on {expiry}.",
)
option_chain = option_chain.copy()[
["c_Volume", "c_Openinterest", "strike", "p_Openinterest", "p_Volume"]
]
option_chain[["p_Openinterest", "p_Volume"]] = (
option_chain[["p_Openinterest", "p_Volume"]] * -1 / 1000
)
option_chain[["c_Openinterest", "c_Volume"]] = (
option_chain[["c_Openinterest", "c_Volume"]] / 1000
)
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.bar(
option_chain.strike,
option_chain.c_Openinterest,
color="green",
label="Calls: OI",
)
ax.bar(
option_chain.strike,
option_chain.c_Volume,
color="lightgreen",
label="Calls: Vol",
)
ax.bar(
option_chain.strike, option_chain.p_Openinterest, color="red", label="Puts: OI"
)
ax.bar(option_chain.strike, option_chain.p_Volume, color="pink", label="Puts:Vol")
ax.axvline(
current_price, lw=2, ls="--", label=f"Current Price: {current_price}", alpha=0.7
)
ax.set_xlabel("Strike Price")
ax.set_ylabel("Volume or OI (1k)")
ax.set_xlim(min_strike, max_strike)
ax.legend(loc="best", fontsize="xx-small")
ax.set_title(f"Volume and Open Interest for {symbol.upper()} expiring {expiry}")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
@log_start_end(log=logger)
def display_chains(symbol: str, expiry: str, export: str = ""):
"""Display option chain for given expiration
Parameters
----------
symbol: str
Ticker symbol
expiry: str
Expiry date for options
export: str
Format to export data
"""
option_chain = nasdaq_model.get_chain_given_expiration(symbol, expiry)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"chain_nasdaq",
option_chain,
)
print_rich_table(option_chain, headers=option_chain.columns) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/nasdaq_view.py | 0.863147 | 0.404566 | nasdaq_view.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Tuple
import numpy as np
import pandas as pd
import requests
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def unusual_options(limit: int = 100) -> Tuple[pd.DataFrame, pd.Timestamp]:
"""Get unusual option activity from fdscanner.com
Parameters
----------
limit: int
Number to show
Returns
-------
Tuple[pd.DataFrame, pd.Timestamp]
Dataframe containing options information, Timestamp indicated when data was updated from website
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> unu_df = openbb.stocks.options.unu()
"""
pages = np.arange(0, limit // 20 + 1)
data_list = []
for page_num in pages:
r = requests.get(
f"https://app.fdscanner.com/api2/unusualvolume?p=0&page_size=20&page={int(page_num)}",
headers={"User-Agent": get_user_agent()},
)
if r.status_code != 200:
console.print("Error in fdscanner request")
return pd.DataFrame(), "request error"
data_list.append(r.json())
ticker, expiry, option_strike, option_type, ask, bid, oi, vol, voi = (
[],
[],
[],
[],
[],
[],
[],
[],
[],
)
for data in data_list:
for entry in data["data"]:
ticker.append(entry["tk"])
expiry.append(entry["expiry"])
option_strike.append(float(entry["s"]))
option_type.append("Put" if entry["t"] == "P" else "Call")
ask.append(entry["a"])
bid.append(entry["b"])
oi.append(entry["oi"])
vol.append(entry["v"])
voi.append(entry["vol/oi"])
# Subtract an hour to align with NYSE timezone
last_updated = pd.to_datetime(
data_list[-1]["last_updated"], unit="s"
) - pd.Timedelta(hours=5)
df = pd.DataFrame(
{
"Ticker": ticker,
"Exp": expiry,
"Strike": option_strike,
"Type": option_type,
"Vol/OI": voi,
"Vol": vol,
"OI": oi,
"Bid": bid,
"Ask": ask,
}
)
return df, last_updated | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/fdscanner_model.py | 0.678859 | 0.242744 | fdscanner_model.py | pypi |
__docformat__ = "numpy"
import logging
import math
import warnings
from datetime import date, datetime, timedelta
from typing import Any, Dict, List, Tuple
import numpy as np
import pandas as pd
import yfinance as yf
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_rf
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.options import op_helpers
from openbb_terminal.stocks.options.op_helpers import Option
logger = logging.getLogger(__name__)
option_chain_cols = [
"strike",
"lastPrice",
"bid",
"ask",
"volume",
"openInterest",
"impliedVolatility",
]
option_chain_dict = {"openInterest": "openinterest", "impliedVolatility": "iv"}
def get_full_option_chain(symbol: str) -> pd.DataFrame:
"""Get all options for given ticker [Source: Yahoo Finance]
Parameters
----------
symbol: str
Stock ticker symbol
Returns
-------
pd.Dataframe
Option chain
"""
ticker = yf.Ticker(symbol)
dates = ticker.options
options = pd.DataFrame()
for _date in dates:
calls = ticker.option_chain(_date).calls
puts = ticker.option_chain(_date).puts
calls = calls[option_chain_cols].rename(columns=option_chain_dict)
puts = puts[option_chain_cols].rename(columns=option_chain_dict)
calls.columns = [x + "_c" if x != "strike" else x for x in calls.columns]
puts.columns = [x + "_p" if x != "strike" else x for x in puts.columns]
temp = pd.merge(calls, puts, how="outer", on="strike")
temp["expiration"] = _date
options = pd.concat([options, temp], axis=0).reset_index(drop=True)
return options
# pylint: disable=W0640
@log_start_end(log=logger)
def get_option_chain_expiry(
symbol: str,
expiry: str,
min_sp: float = -1,
max_sp: float = -1,
calls: bool = True,
puts: bool = True,
) -> pd.DataFrame:
"""Get full option chains with calculated greeks
Parameters
----------
symbol: str
Stock ticker symbol
expiry: str
Expiration date for chain in format YYY-mm-dd
calls: bool
Flag to get calls
puts: bool
Flag to get puts
Returns
-------
pd.DataFrame
DataFrame of option chain. If both calls and puts
"""
try:
yf_ticker = yf.Ticker(symbol)
options = yf_ticker.option_chain(expiry)
except ValueError:
console.print(f"[red]{symbol} options for {expiry} not found.[/red]")
return pd.DataFrame()
last_price = yf_ticker.info["regularMarketPrice"]
# Columns we want to get
yf_option_cols = [
"strike",
"lastPrice",
"bid",
"ask",
"volume",
"openInterest",
"impliedVolatility",
]
# Get call and put dataframes if the booleans are true
put_df = options.puts[yf_option_cols].copy() if puts else pd.DataFrame()
call_df = options.calls[yf_option_cols].copy() if calls else pd.DataFrame()
# so that the loop below doesn't break if only one call/put is supplied
df_list, option_factor = [], []
if puts:
df_list.append(put_df)
option_factor.append(-1)
if calls:
df_list.append(call_df)
option_factor.append(1)
df_list = [x[x["impliedVolatility"] > 0].copy() for x in df_list]
# Add in greeks to each df
# Time to expiration:
dt = (
datetime.strptime(expiry, "%Y-%m-%d") + timedelta(hours=16) - datetime.now()
).total_seconds() / (60 * 60 * 24)
rf = get_rf()
# Note the way the Option class is defined, put has a -1 input and call has a +1 input
for df, option_type in zip(df_list, option_factor):
df["Delta"] = df.apply(
lambda x: Option(
last_price, x.strike, rf, 0, dt, x.impliedVolatility, option_type
).Delta(),
axis=1,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
df["Gamma"] = df.apply(
lambda x: Option(
last_price, x.strike, rf, 0, dt, x.impliedVolatility, option_type
).Gamma(),
axis=1,
)
df["Theta"] = df.apply(
lambda x: Option(
last_price, x.strike, rf, 0, dt, x.impliedVolatility, option_type
).Theta(),
axis=1,
)
if len(df_list) == 1:
options_df = df_list[0]
if len(df_list) == 2:
options_df = pd.merge(
left=df_list[1],
right=df_list[0],
on="strike",
how="outer",
suffixes=["_call", "_put"],
)
# If min/max strike aren't provided, just get the middle 50% of strikes
if min_sp == -1:
min_strike = np.percentile(options_df["strike"], 25)
else:
min_strike = min_sp
if max_sp == -1:
max_strike = np.percentile(options_df["strike"], 75)
else:
max_strike = max_sp
options_df = options_df[
(options_df.strike >= min_strike) & (options_df.strike <= max_strike)
]
return options_df
@log_start_end(log=logger)
def option_expirations(symbol: str):
"""Get available expiration dates for given ticker
Parameters
----------
symbol: str
Ticker symbol to get expirations for
Returns
-------
dates: List[str]
List of of available expirations
"""
yf_ticker = yf.Ticker(symbol)
dates = list(yf_ticker.options)
if not dates:
console.print("No expiration dates found for ticker. \n")
return dates
@log_start_end(log=logger)
def get_option_chain(symbol: str, expiry: str):
"""Gets option chain from yf for given ticker and expiration
Parameters
----------
symbol: str
Ticker symbol to get options for
expiry: str
Date to get options for. YYYY-MM-DD
Returns
-------
chains: yf.ticker.Options
Options chain
"""
yf_ticker = yf.Ticker(symbol)
try:
chains = yf_ticker.option_chain(expiry)
except Exception:
console.print(f"[red]Error: Expiration {expiry} cannot be found.[/red]")
chains = op_helpers.Chain(pd.DataFrame(), "yahoo")
return chains
@log_start_end(log=logger)
def get_dividend(symbol: str) -> pd.Series:
"""Gets option chain from yf for given ticker and expiration
Parameters
----------
symbol: str
Ticker symbol to get options for
Returns
-------
chains: yf.ticker.Dividends
Dividends
"""
yf_ticker = yf.Ticker(symbol)
dividend = yf_ticker.dividends
return dividend
@log_start_end(log=logger)
def get_x_values(current_price: float, options: List[Dict[str, int]]) -> List[float]:
"""Generates different price values that need to be tested"""
x_list = list(range(101))
mini = current_price
maxi = current_price
if len(options) == 0:
mini *= 0.5
maxi *= 1.5
elif len(options) > 0:
biggest = max(options, key=lambda x: x["strike"])
smallest = min(options, key=lambda x: x["strike"])
maxi = max(maxi, biggest["strike"]) * 1.2
mini = min(mini, smallest["strike"]) * 0.8
num_range = maxi - mini
return [(x / 100) * num_range + mini for x in x_list]
def get_y_values(
base: float,
price: float,
options: List[Dict[Any, Any]],
underlying: int,
) -> float:
"""Generates y values for corresponding x values"""
option_change = 0
change = price - base
for option in options:
if option["type"] == "Call":
abs_change = price - option["strike"] if price > option["strike"] else 0
option_change += option["sign"] * abs_change
elif option["type"] == "Put":
abs_change = option["strike"] - price if price < option["strike"] else 0
option_change += option["sign"] * abs_change
return (change * underlying) + option_change
@log_start_end(log=logger)
def generate_data(
current_price: float, options: List[Dict[str, int]], underlying: int
) -> Tuple[List[float], List[float], List[float]]:
"""Gets x values, and y values before and after premiums"""
# Remove empty elements from options
options = [o for o in options if o]
x_vals = get_x_values(current_price, options)
base = current_price
total_cost = sum(x["cost"] for x in options)
before = [get_y_values(base, x, options, underlying) for x in x_vals]
if total_cost != 0:
after = [
get_y_values(base, x, options, underlying) - total_cost for x in x_vals
]
return x_vals, before, after
return x_vals, before, []
@log_start_end(log=logger)
def get_price(symbol: str) -> float:
"""Get current price for a given ticker
Parameters
----------
symbol : str
The ticker symbol to get the price for
Returns
-------
price : float
The price of the ticker
"""
ticker_yahoo = yf.Ticker(symbol)
data = ticker_yahoo.history()
last_quote = data.tail(1)["Close"].iloc[0]
return last_quote
@log_start_end(log=logger)
def get_info(symbol: str):
"""Get info for a given ticker
Parameters
----------
symbol : str
The ticker symbol to get the price for
Returns
-------
price : float
The info for a given ticker
"""
tick = yf.Ticker(symbol)
return tick.info
@log_start_end(log=logger)
def get_closing(symbol: str) -> pd.Series:
"""Get closing prices for a given ticker
Parameters
----------
symbol : str
The ticker symbol to get the price for
Returns
-------
price : List[float]
A list of closing prices for a ticker
"""
tick = yf.Ticker(symbol)
return tick.history(period="1y")["Close"]
@log_start_end(log=logger)
def get_dte(date_value: str) -> int:
"""Gets days to expiration from yfinance option date"""
return (datetime.strptime(date_value, "%Y-%m-%d") - datetime.now()).days
@log_start_end(log=logger)
def get_iv_surface(symbol: str) -> pd.DataFrame:
"""Gets IV surface for calls and puts for ticker
Parameters
----------
symbol: str
Stock ticker symbol to get
Returns
-------
pd.DataFrame
Dataframe of DTE, Strike and IV
"""
stock = yf.Ticker(symbol)
dates = stock.options
vol_df = pd.DataFrame()
columns = ["strike", "impliedVolatility", "openInterest", "lastPrice"]
for date_value in dates:
df = stock.option_chain(date_value).calls[columns]
df["dte"] = get_dte(date_value)
vol_df = pd.concat([vol_df, df], axis=0)
df = stock.option_chain(date_value).puts[columns]
df["dte"] = get_dte(date_value)
vol_df = pd.concat([vol_df, df], axis=0)
return vol_df
@log_start_end(log=logger)
def get_binom(
symbol: str,
expiry: str,
strike: float = 0,
put: bool = False,
europe: bool = False,
vol: float = None,
):
"""Gets binomial pricing for options
Parameters
----------
symbol : str
The ticker symbol of the option's underlying asset
expiry : str
The expiration for the option
strike : float
The strike price for the option
put : bool
Value a put instead of a call
europe : bool
Value a European option instead of an American option
vol : float
The annualized volatility for the underlying asset
"""
# Base variables to calculate values
info = get_info(symbol)
price = info["regularMarketPrice"]
if vol is None:
closings = get_closing(symbol)
vol = (closings / closings.shift()).std() * (252**0.5)
div_yield = (
info["trailingAnnualDividendYield"]
if info["trailingAnnualDividendYield"] is not None
else 0
)
delta_t = 1 / 252
rf = get_rf()
exp_date = datetime.strptime(expiry, "%Y-%m-%d").date()
today = date.today()
days = (exp_date - today).days
# Binomial pricing specific variables
up = math.exp(vol * (delta_t**0.5))
down = 1 / up
prob_up = (math.exp((rf - div_yield) * delta_t) - down) / (up - down)
prob_down = 1 - prob_up
discount = math.exp(delta_t * rf)
und_vals: List[List[float]] = [[price]]
# Binomial tree for underlying values
for i in range(days):
cur_date = today + timedelta(days=i + 1)
if cur_date.weekday() < 5:
last = und_vals[-1]
new = [x * up for x in last]
new.append(last[-1] * down)
und_vals.append(new)
# Binomial tree for option values
if put:
opt_vals = [[max(strike - x, 0) for x in und_vals[-1]]]
else:
opt_vals = [[max(x - strike, 0) for x in und_vals[-1]]]
j = 2
while len(opt_vals[0]) > 1:
new_vals = []
for i in range(len(opt_vals[0]) - 1):
if europe:
value = (
opt_vals[0][i] * prob_up + opt_vals[0][i + 1] * prob_down
) / discount
else:
if put:
value = max(
(opt_vals[0][i] * prob_up + opt_vals[0][i + 1] * prob_down)
/ discount,
strike - und_vals[-j][i],
)
else:
value = max(
(opt_vals[0][i] * prob_up + opt_vals[0][i + 1] * prob_down)
/ discount,
und_vals[-j][i] - strike,
)
new_vals.append(value)
opt_vals.insert(0, new_vals)
j += 1
return up, prob_up, discount, und_vals, opt_vals, days
@log_start_end(log=logger)
def get_greeks(
symbol: str,
expire: str,
div_cont: float = 0,
rf: float = None,
opt_type: int = 1,
mini: float = None,
maxi: float = None,
show_all: bool = False,
) -> pd.DataFrame:
"""
Gets the greeks for a given option
Parameters
----------
symbol: str
The ticker symbol value of the option
div_cont: float
The dividend continuous rate
expire: str
The date of expiration
rf: float
The risk-free rate
opt_type: Union[1, -1]
The option type 1 is for call and -1 is for put
mini: float
The minimum strike price to include in the table
maxi: float
The maximum strike price to include in the table
show_all: bool
Whether to show all greeks
"""
s = get_price(symbol)
chains = get_option_chain(symbol, expire)
chain = chains.calls if opt_type == 1 else chains.puts
if mini is None:
mini = chain.strike.quantile(0.25)
if maxi is None:
maxi = chain.strike.quantile(0.75)
chain = chain[chain["strike"] >= mini]
chain = chain[chain["strike"] <= maxi]
risk_free = rf if rf is not None else get_rf()
expire_dt = datetime.strptime(expire, "%Y-%m-%d")
dif = (expire_dt - datetime.now() + timedelta(hours=16)).total_seconds() / (
60 * 60 * 24
)
strikes = []
for _, row in chain.iterrows():
vol = row["impliedVolatility"]
opt = Option(s, row["strike"], risk_free, div_cont, dif, vol, opt_type)
result = [
row["strike"],
row["impliedVolatility"],
opt.Delta(),
opt.Gamma(),
opt.Vega(),
opt.Theta(),
]
if show_all:
result += [
opt.Rho(),
opt.Phi(),
opt.Charm(),
opt.Vanna(0.01),
opt.Vomma(0.01),
]
strikes.append(result)
columns = [
"Strike",
"Implied Vol",
"Delta",
"Gamma",
"Vega",
"Theta",
]
if show_all:
additional_columns = ["Rho", "Phi", "Charm", "Vanna", "Vomma"]
columns += additional_columns
df = pd.DataFrame(strikes, columns=columns)
return df
@log_start_end(log=logger)
def get_vol(
symbol: str,
expiry: str,
) -> pd.DataFrame:
"""Plot volume
Parameters
----------
symbol: str
Ticker symbol
expiry: str
expiration date for options
"""
options = get_option_chain(symbol, expiry)
return options
@log_start_end(log=logger)
def get_volume_open_interest(
symbol: str,
expiry: str,
) -> pd.DataFrame:
"""Plot volume and open interest
Parameters
----------
symbol: str
Stock ticker symbol
expiry: str
Option expiration
"""
options = get_option_chain(symbol, expiry)
return options | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/yfinance_model.py | 0.709523 | 0.232615 | yfinance_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import List, Optional
import pandas as pd
import requests
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
option_columns = [
"symbol",
"bid",
"ask",
"strike",
"bidsize",
"asksize",
"volume",
"open_interest",
"option_type",
]
greek_columns = ["delta", "gamma", "theta", "vega", "ask_iv", "bid_iv", "mid_iv"]
df_columns = option_columns + greek_columns
default_columns = [
"mid_iv",
"vega",
"delta",
"gamma",
"theta",
"volume",
"open_interest",
"bid",
"ask",
]
@log_start_end(log=logger)
@check_api_key(["API_TRADIER_TOKEN"])
def get_historical_options(
symbol: str,
expiry: str,
strike: float = 0,
put: bool = False,
chain_id: Optional[str] = None,
) -> pd.DataFrame:
"""
Gets historical option pricing. This inputs either ticker, expiration, strike or the OCC chain ID and processes
the request to tradier for historical premiums.
Parameters
----------
symbol: str
Stock ticker symbol
expiry: str
Option expiration date
strike: int
Option strike price
put: bool
Is this a put option?
chain_id: Optional[str]
OCC chain ID
Returns
-------
df_hist: pd.DataFrame
Dataframe of historical option prices
"""
if not chain_id:
op_type = ["call", "put"][put]
chain = get_option_chains(symbol, expiry)
try:
symbol = chain[(chain.strike == strike) & (chain.option_type == op_type)][
"symbol"
].values[0]
except IndexError:
error = f"Strike: {strike}, Option type: {op_type} not not found"
logging.exception(error)
console.print(f"{error}\n")
return pd.DataFrame()
else:
symbol = chain_id
response = requests.get(
"https://sandbox.tradier.com/v1/markets/history",
params={"symbol": {symbol}, "interval": "daily"},
headers={
"Authorization": f"Bearer {cfg.API_TRADIER_TOKEN}",
"Accept": "application/json",
},
)
if response.status_code != 200:
console.print("Error with request")
return pd.DataFrame()
data = response.json()["history"]
if not data:
console.print("No historical data available")
return pd.DataFrame()
df_hist = pd.DataFrame(data["day"])
df_hist = df_hist.set_index("date")
df_hist.index = pd.DatetimeIndex(df_hist.index)
return df_hist
# pylint: disable=no-else-return
option_cols = [
"strike",
"bid",
"ask",
"volume",
"open_interest",
"mid_iv",
]
option_col_map = {"open_interest": "openinterest", "mid_iv": "iv"}
@log_start_end(log=logger)
@check_api_key(["API_TRADIER_TOKEN"])
def get_full_option_chain(symbol: str) -> pd.DataFrame:
"""Get available expiration dates for given ticker
Parameters
----------
symbol: str
Ticker symbol to get expirations for
Returns
-------
pd.DataFrame
Dataframe of all option chains
"""
expirations = option_expirations(symbol)
options_dfs: pd.DataFrame = []
for expiry in expirations:
options_dfs.append(get_option_chains(symbol, expiry))
options_df = pd.concat(options_dfs)
options_df.set_index(keys="symbol", inplace=True)
option_df_index = pd.Series(options_df.index).str.extractall(
r"^(?P<Ticker>\D*)(?P<Expiration>\d*)(?P<Type>\D*)(?P<Strike>\d*)"
)
option_df_index.reset_index(inplace=True)
option_df_index = pd.DataFrame(
option_df_index, columns=["Ticker", "Expiration", "Strike", "Type"]
)
option_df_index["Strike"] = options_df["strike"].values
option_df_index["Type"] = options_df["option_type"].values
option_df_index["Expiration"] = pd.DatetimeIndex(
data=option_df_index["Expiration"], yearfirst=True
).strftime("%Y-%m-%d")
option_df_index["Type"] = pd.DataFrame(option_df_index["Type"]).replace(
to_replace=["put", "call"], value=["Put", "Call"]
)
options_df_columns = list(options_df.columns)
option_df_index.set_index(
keys=["Ticker", "Expiration", "Strike", "Type"], inplace=True
)
options_df = pd.DataFrame(
data=options_df.values, index=option_df_index.index, columns=options_df_columns
)
options_df.rename(
columns={
"bid": "Bid",
"ask": "Ask",
"strike": "Strike",
"bidsize": "Bid Size",
"asksize": "Ask Size",
"volume": "Volume",
"open_interest": "OI",
"delta": "Delta",
"gamma": "Gamma",
"theta": "Theta",
"vega": "Vega",
"ask_iv": "Ask IV",
"bid_iv": "Bid IV",
"mid_iv": "IV",
},
inplace=True,
)
options_columns = [
"Volume",
"OI",
"IV",
"Delta",
"Gamma",
"Theta",
"Vega",
"Bid Size",
"Bid",
"Ask",
"Ask Size",
"Bid IV",
"Ask IV",
]
options = pd.DataFrame(options_df, columns=options_columns)
options = options.reset_index()
options.drop(labels=["Ticker"], inplace=True, axis=1)
options.rename(columns={"Expiration": "expiration"}, inplace=True)
return options
@log_start_end(log=logger)
@check_api_key(["API_TRADIER_TOKEN"])
def option_expirations(symbol: str) -> List[str]:
"""Get available expiration dates for given ticker
Parameters
----------
symbol: str
Ticker symbol to get expirations for
Returns
-------
dates: List[str]
List of of available expirations
"""
r = requests.get(
"https://sandbox.tradier.com/v1/markets/options/expirations",
params={"symbol": symbol, "includeAllRoots": "true", "strikes": "false"},
headers={
"Authorization": f"Bearer {cfg.API_TRADIER_TOKEN}",
"Accept": "application/json",
},
)
if r.status_code == 200:
try:
dates = r.json()["expirations"]["date"]
return dates
except TypeError:
logging.exception("Error in tradier JSON response. Check loaded ticker.")
console.print("Error in tradier JSON response. Check loaded ticker.\n")
return []
else:
console.print("Tradier request failed. Check token. \n")
return []
@log_start_end(log=logger)
@check_api_key(["API_TRADIER_TOKEN"])
def get_option_chains(symbol: str, expiry: str) -> pd.DataFrame:
"""Display option chains [Source: Tradier]"
Parameters
----------
symbol : str
Ticker to get options for
expiry : str
Expiration date in the form of "YYYY-MM-DD"
Returns
-------
chains: pd.DataFrame
Dataframe with options for the given Symbol and Expiration date
"""
params = {"symbol": symbol, "expiration": expiry, "greeks": "true"}
headers = {
"Authorization": f"Bearer {cfg.API_TRADIER_TOKEN}",
"Accept": "application/json",
}
response = requests.get(
"https://sandbox.tradier.com/v1/markets/options/chains",
params=params,
headers=headers,
)
if response.status_code != 200:
console.print("Error in request. Check API_TRADIER_TOKEN\n")
return pd.DataFrame()
chains = process_chains(response)
return chains
@log_start_end(log=logger)
def process_chains(response: requests.models.Response) -> pd.DataFrame:
"""Function to take in the requests.get and return a DataFrame
Parameters
----------
response: requests.models.Response
This is the response from tradier api.
Returns
-------
opt_chain: pd.DataFrame
Dataframe with all available options
"""
json_response = response.json()
options = json_response["options"]["option"]
opt_chain = pd.DataFrame(columns=df_columns)
for idx, option in enumerate(options):
# initialize empty dictionary
d = {col: "" for col in df_columns}
# populate main dictionary values
for col in option_columns:
if col in option:
d[col] = option[col]
# populate greek dictionary values
if option["greeks"]:
for col in greek_columns:
if col in option["greeks"]:
d[col] = option["greeks"][col]
opt_chain.loc[idx, :] = d
return opt_chain
@log_start_end(log=logger)
@check_api_key(["API_TRADIER_TOKEN"])
def last_price(symbol: str):
"""Makes api request for last price
Parameters
----------
symbol: str
Ticker symbol
Returns
-------
float:
Last price
"""
r = requests.get(
"https://sandbox.tradier.com/v1/markets/quotes",
params={"symbols": symbol, "includeAllRoots": "true", "strikes": "false"},
headers={
"Authorization": f"Bearer {cfg.API_TRADIER_TOKEN}",
"Accept": "application/json",
},
)
if r.status_code == 200:
last = r.json()["quotes"]["quote"]["last"]
if last is None:
return 0
return float(last)
else:
console.print("Error getting last price")
return None | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/tradier_model.py | 0.837487 | 0.263327 | tradier_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import matplotlib.pyplot as plt
import mplfinance as mpf
import pandas as pd
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
is_valid_axes_count,
lambda_long_number_format_y_axis,
plot_autoscale,
print_rich_table,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.options import chartexchange_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def plot_chart(
df: pd.DataFrame,
candle_chart_kwargs: dict,
option_type: str,
symbol: str,
external_axes: Optional[List[plt.Axes]] = None,
):
if not external_axes:
candle_chart_kwargs["returnfig"] = True
candle_chart_kwargs["figratio"] = (10, 7)
candle_chart_kwargs["figscale"] = 1.10
candle_chart_kwargs["figsize"] = plot_autoscale()
fig, ax = mpf.plot(df, **candle_chart_kwargs)
fig.suptitle(
f"Historical quotes for {symbol} {option_type}",
x=0.055,
y=0.965,
horizontalalignment="left",
)
lambda_long_number_format_y_axis(df, "Volume", ax)
theme.visualize_output(force_tight_layout=False)
ax[0].legend()
elif is_valid_axes_count(external_axes, 1):
(ax1,) = external_axes
candle_chart_kwargs["ax"] = ax1
mpf.plot(df, **candle_chart_kwargs)
else:
return
@log_start_end(log=logger)
def display_raw(
symbol: str = "GME",
expiry: str = "2021-02-05",
call: bool = True,
price: float = 90,
limit: int = 10,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Return raw stock data[chartexchange]
Parameters
----------
symbol : str
Ticker symbol for the given option
expiry : str
The expiry of expiration, format "YYYY-MM-DD", i.e. 2010-12-31.
call : bool
Whether the underlying asset should be a call or a put
price : float
The strike of the expiration
limit : int
Number of rows to show
export : str
Export data as CSV, JSON, XLSX
external_axes: Optional[List[plt.Axes]]
External axes (1 axis is expected in the list), by default None
"""
df = chartexchange_model.get_option_history(symbol, expiry, call, price)[::-1]
if df.empty:
console.print("[red]No data found[/red]\n")
return
df["Date"] = pd.to_datetime(df["Date"])
df = df.set_index("Date")
candle_chart_kwargs = {
"type": "candle",
"style": theme.mpf_style,
"volume": True,
"xrotation": theme.xticks_rotation,
"scale_padding": {"left": 0.3, "right": 1, "top": 0.8, "bottom": 0.8},
"update_width_config": {
"candle_linewidth": 0.6,
"candle_width": 0.8,
"volume_linewidth": 0.8,
"volume_width": 0.8,
},
"warn_too_much_data": 10000,
"datetime_format": "%Y-%b-%d",
}
# This plot has 2 axes
option_type = "call" if call else "put"
plot_chart(
df=df,
candle_chart_kwargs=candle_chart_kwargs,
option_type=option_type,
symbol=symbol,
external_axes=external_axes,
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"hist",
df,
)
print_rich_table(
df.head(limit),
headers=list(df.columns),
show_index=True,
title=f"{symbol.upper()} raw data",
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/chartexchange_view.py | 0.864925 | 0.310198 | chartexchange_view.py | pypi |
import logging
from typing import Union
import pandas as pd
import requests
from bs4 import BeautifulSoup
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.options.op_helpers import convert
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_option_history(
symbol: str = "GME",
date: str = "2021-02-05",
call: bool = True,
price: Union[str, Union[int, float]] = "90",
) -> pd.DataFrame:
"""Historic prices for a specific option [chartexchange]
Parameters
----------
symbol : str
Ticker symbol to get historical data from
date : str
Date as a string YYYYMMDD
call : bool
Whether to show a call or a put
price : Union[str, Union[int, float]]
Strike price for a specific option
Returns
-------
historical : pd.Dataframe
Historic information for an option
"""
url = (
f"https://chartexchange.com/symbol/opra-{symbol.lower()}{date.replace('-', '')}"
)
url += f"{'c' if call else 'p'}{float(price):g}/historical/"
data = requests.get(url, headers={"User-Agent": get_user_agent()}).content
soup = BeautifulSoup(data, "html.parser")
table = soup.find("div", attrs={"style": "display: table; font-size: 0.9em; "})
if table:
rows = table.find_all("div", attrs={"style": "display: table-row;"})
else:
return pd.DataFrame()
clean_rows = []
if rows:
for row in rows[1:]:
item = row.find_all("div")
clean_rows.append([x.text for x in item])
else:
console.print("No data for this option\n")
return pd.DataFrame()
df = pd.DataFrame()
df["Date"] = [x[0] for x in clean_rows]
df["Open"] = [convert(x[2], ",") for x in clean_rows]
df["High"] = [convert(x[3], ",") for x in clean_rows]
df["Low"] = [convert(x[4], ",") for x in clean_rows]
df["Close"] = [convert(x[5], ",") for x in clean_rows]
df["Change"] = [convert(x[6], "%") for x in clean_rows]
df["Volume"] = [convert(x[7], ",") for x in clean_rows]
df["Open Interest"] = [convert(x[8], ",") for x in clean_rows]
df["Change Since"] = [convert(x[9], "%") for x in clean_rows]
return df | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/chartexchange_model.py | 0.811825 | 0.3254 | chartexchange_model.py | pypi |
__docformat__ = "numpy"
import logging
from datetime import datetime
from typing import List
import numpy as np
import pandas as pd
import requests
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.options.op_helpers import get_dte_from_expiration as get_dte
logger = logging.getLogger(__name__)
# pylint: disable=unsupported-assignment-operation
@log_start_end(log=logger)
def get_full_option_chain(symbol: str) -> pd.DataFrame:
"""Get the full option chain for symbol over all expirations
Parameters
----------
symbol: str
Symbol to get options for. Can be a stock, etf or index.
Returns
-------
pd.DataFrame
Dataframe of option chain
"""
# Nasdaq requires an asset code, so instead of making user supply one, just loop through all
for asset in ["stocks", "index", "etf"]:
url = (
f"https://api.nasdaq.com/api/quote/{symbol}/option-chain?assetclass={asset}&"
"fromdate=2010-09-09&todate=2030-09-09&excode=oprac&callput=callput&money=all&type=all"
)
# I have had issues with nasdaq requests, and this user agent seems to work in US and EU
response_json = requests.get(
url,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)"
" AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6 Safari/605.1.15"
},
).json()
if response_json["status"]["rCode"] == 200:
df = pd.DataFrame(response_json["data"]["table"]["rows"]).drop(
columns=["c_colour", "p_colour", "drillDownURL"]
)
df["expirygroup"] = (
df["expirygroup"].replace("", np.nan).fillna(method="ffill")
)
# Make numeric
columns_w_types = {
"c_Last": float,
"c_Change": float,
"c_Bid": float,
"c_Ask": float,
"c_Volume": int,
"c_Openinterest": int,
"strike": float,
"p_Last": float,
"p_Change": float,
"p_Bid": float,
"p_Ask": float,
"p_Volume": int,
"p_Openinterest": int,
}
for key, _ in columns_w_types.items():
df[key] = df[key].replace(",", "", regex=True)
df = (
df.fillna(np.nan)
.dropna(axis=0)
.replace("--", 0)
.astype(columns_w_types)
)
df["DTE"] = df["expirygroup"].apply(lambda t: get_dte(t))
df = df[df.DTE > 0]
df = df.drop(columns=["DTE"])
return df
console.print(f"[red]{symbol} Option Chain not found.[/red]\n")
return pd.DataFrame()
@log_start_end(log=logger)
def get_expirations(symbol: str) -> List[str]:
"""Get available expirations
Parameters
----------
symbol : str
Ticker symbol to get expirations for
Returns
-------
List[str]
List of expiration dates
"""
df = get_full_option_chain(symbol)
if df.empty:
return []
# get everything that is not an empty string
exps = [exp for exp in list(df.expirygroup.unique()) if exp]
# Convert 'January 11, 1993' into '1993-01-11'
return [datetime.strptime(exp, "%B %d, %Y").strftime("%Y-%m-%d") for exp in exps]
@log_start_end(log=logger)
def get_chain_given_expiration(symbol: str, expiration: str) -> pd.DataFrame:
"""Get option chain for symbol at a given expiration
Parameters
----------
symbol: str
Symbol to get chain for
expiration: str
Expiration to get chain for
Returns
-------
pd.DataFrame
Dataframe of option chain
"""
for asset in ["stocks", "index", "etf"]:
url = (
f"https://api.nasdaq.com/api/quote/{symbol}/option-chain?assetclass={asset}&"
f"fromdate={expiration}&todate={expiration}&excode=oprac&callput=callput&money=all&type=all"
)
response_json = requests.get(
url,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)"
" AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6 Safari/605.1.15"
},
).json()
if response_json["status"]["rCode"] == 200:
df = (
pd.DataFrame(
response_json.get("data", {}).get("table", {}).get("rows", {})
)
.drop(columns=["c_colour", "p_colour", "drillDownURL", "expirygroup"])
.fillna(np.nan)
.dropna(axis=0)
)
# Make numeric
columns_w_types = {
"c_Last": float,
"c_Change": float,
"c_Bid": float,
"c_Ask": float,
"c_Volume": int,
"c_Openinterest": int,
"strike": float,
"p_Last": float,
"p_Change": float,
"p_Bid": float,
"p_Ask": float,
"p_Volume": int,
"p_Openinterest": int,
}
for key, _ in columns_w_types.items():
df[key] = df[key].replace(",", "", regex=True)
df = df.replace("--", 0).astype(columns_w_types)
return df
console.print(f"[red]{symbol} Option Chain not found.[/red]\n")
return pd.DataFrame()
@log_start_end(log=logger)
def get_last_price(symbol: str) -> float:
"""Get the last price from nasdaq
Parameters
----------
symbol: str
Symbol to get quote for
Returns
-------
float
Last price
"""
for asset in ["stocks", "index", "etf"]:
url = f"https://api.nasdaq.com/api/quote/{symbol}/info?assetclass={asset}"
response_json = requests.get(
url,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)"
" AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6 Safari/605.1.15"
},
).json()
if response_json["status"]["rCode"] == 200:
return float(
response_json["data"]["primaryData"]["lastSalePrice"]
.strip("$")
.replace(",", "")
)
console.print(f"[red]Last price for {symbol} not found[/red]\n")
return np.nan
# Ugh this doesn't get the full chain
# TODO: apply CRR binomial tree to backtrack IV for greeks
@log_start_end(log=logger)
def get_option_greeks(symbol: str, expiration: str) -> pd.DataFrame:
"""Get option greeks from nasdaq
Parameters
----------
symbol: str
Symbol to get
expiration: str
Option expiration
Returns
-------
pd.DataFrame
Dataframe with option greeks
"""
for asset in ["stocks", "index", "etf"]:
url_greeks = f"https://api.nasdaq.com/api/quote/{symbol}/option-chain/greeks?assetclass={asset}&date={expiration}"
response_json = requests.get(
url_greeks,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)"
" AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6 Safari/605.1.15"
},
).json()
if response_json["status"]["rCode"] == 200:
greeks = pd.DataFrame(response_json["data"]["table"]["rows"])
greeks = greeks.drop(columns="url")
return greeks
console.print(f"[red]Greeks not found for {symbol} on {expiration}[/red].")
return pd.DataFrame() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/nasdaq_model.py | 0.723114 | 0.210097 | nasdaq_model.py | pypi |
__docformat__ = "numpy"
import argparse
import logging
from typing import List
import pandas as pd
from openbb_terminal import feature_flags as obbff
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import print_rich_table
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.rich_config import MenuText, console
from openbb_terminal.stocks.options import yfinance_view
logger = logging.getLogger(__name__)
class PricingController(BaseController):
"""Pricing Controller class"""
CHOICES_COMMANDS = [
"add",
"rmv",
"show",
"rnval",
]
PATH = "/stocks/options/pricing/"
CHOICES_GENERATION = True
def __init__(
self,
ticker: str,
selected_date: str,
prices: pd.DataFrame,
queue: List[str] = None,
):
"""Constructor"""
super().__init__(queue)
self.ticker = ticker
self.selected_date = selected_date
self.prices = prices
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
mt = MenuText("stocks/options/pricing/")
mt.add_param("_ticker", self.ticker or "")
mt.add_param("_expiry", self.selected_date or "")
mt.add_raw("\n")
mt.add_cmd("add")
mt.add_cmd("rmv")
mt.add_raw("\n")
mt.add_cmd("show")
mt.add_cmd("rnval")
console.print(text=mt.menu_text, menu="Stocks - Options - Pricing")
def custom_reset(self):
"""Class specific component of reset command"""
if self.ticker:
if self.selected_date:
return [
"stocks",
f"load {self.ticker}",
"options",
f"exp -d {self.selected_date}",
"pricing",
]
return ["stocks", f"load {self.ticker}", "options", "payoff"]
return []
@log_start_end(log=logger)
def call_add(self, other_args: List[str]):
"""Process add command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="add",
description="Adds a price to the list",
)
parser.add_argument(
"-p",
"--price",
type=float,
required="-h" not in other_args,
dest="price",
help="Projected price of the stock at the expiration date",
)
parser.add_argument(
"-c",
"--chance",
type=float,
required="-h" not in other_args,
dest="chance",
help="Chance that the stock is at a given projected price",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-p")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if ns_parser.price in self.prices["Price"].to_list():
df = self.prices[(self.prices["Price"] != ns_parser.price)]
else:
df = self.prices
new = {"Price": ns_parser.price, "Chance": ns_parser.chance}
df = df.append(new, ignore_index=True)
self.prices = df.sort_values("Price")
@log_start_end(log=logger)
def call_rmv(self, other_args: List[str]):
"""Process rmv command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="rmv",
description="Removes a price from the list",
)
parser.add_argument(
"-p",
"--price",
type=float,
required="-h" not in other_args and "-a" not in other_args,
dest="price",
help="Price you want to remove from the list",
)
parser.add_argument(
"-a",
"--all",
action="store_true",
default=False,
dest="all",
help="Remove all prices from the list",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-p")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if ns_parser.all:
self.prices = pd.DataFrame(columns=["Price", "Chance"])
else:
self.prices = self.prices[(self.prices["Price"] != ns_parser.price)]
@log_start_end(log=logger)
def call_show(self, other_args):
"""Process show command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="show",
description="Display prices",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
print_rich_table(
self.prices,
headers=list(self.prices.columns),
show_index=False,
title=f"Estimated price(s) of {self.ticker} at {self.selected_date}",
)
@log_start_end(log=logger)
def call_rnval(self, other_args: List[str]):
"""Process rnval command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="rnval",
description="The risk neutral value of the options",
)
parser.add_argument(
"-p",
"--put",
action="store_true",
default=False,
help="Show puts instead of calls",
)
parser.add_argument(
"-m",
"--min",
type=float,
default=None,
dest="mini",
help="Minimum strike price shown",
)
parser.add_argument(
"-M",
"--max",
type=float,
default=None,
dest="maxi",
help="Maximum strike price shown",
)
parser.add_argument(
"-r",
"--risk",
type=float,
default=None,
dest="risk",
help="The risk-free rate to use",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if self.ticker:
if self.selected_date:
if sum(self.prices["Chance"]) == 1:
yfinance_view.risk_neutral_vals(
self.ticker,
self.selected_date,
self.prices,
ns_parser.put,
ns_parser.mini,
ns_parser.maxi,
ns_parser.risk,
)
else:
console.print("Total chances must equal one\n")
else:
console.print("No expiry loaded. First use `exp {expiry date}`\n")
else:
console.print("No ticker loaded. First use `load <ticker>`\n") | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/pricing/pricing_controller.py | 0.619817 | 0.162845 | pricing_controller.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
from openbb_terminal.helper_funcs import print_rich_table
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.options.hedge import hedge_model
logger = logging.getLogger(__name__)
def add_and_show_greeks(
price: float = 100,
implied_volatility: float = 20,
strike: float = 120,
days: float = 30,
sign: int = 1,
):
"""Determine the delta, gamma and vega value of the portfolio and/or options and show them.
Parameters
----------
price: float
The price.
implied_volatility: float
The implied volatility.
strike: float
The strike price.
days: float
The amount of days until expiration. Use annual notation thus a month would be 30 / 360.
sign: int
Whether you have a long (1) or short (-1) position
Returns
-------
delta: float
gamma: float
vega: float
"""
# Add in hedge option
delta, gamma, vega = hedge_model.add_hedge_option(
price, implied_volatility, strike, days, sign
)
# Show the added delta, gamma and vega positions. Next to that, also show the inputted
# implied volatility and strike
positions = pd.DataFrame(
[delta, gamma, vega, implied_volatility, strike],
index=["Delta", "Gamma", "Vega", "Implied Volatility", "Strike Price"],
columns=["Positions"],
)
# Show table
print_rich_table(positions, show_index=True, headers=list(positions.columns))
return delta, gamma, vega
def show_calculated_hedge(
portfolio_option_amount: float = 100,
side: str = "Call",
greeks: dict = {
"Portfolio": {
"Delta": 1,
"Gamma": 9.1268e-05,
"Vega": 5.4661,
},
"Option A": {
"Delta": 1,
"Gamma": 9.1268e-05,
"Vega": 5.4661,
},
"Option B": {
"Delta": 1,
"Gamma": 9.1268e-05,
"Vega": 5.4661,
},
},
sign: int = 1,
):
"""Determine the hedge position and the weights within each option and
underlying asset to hold a neutral portfolio and show them
Parameters
----------
portfolio_option_amount: float
Number to show
side: str
Whether you have a Call or Put instrument
greeks: dict
Dictionary containing delta, gamma and vega values for the portfolio and option A and B. Structure is
as follows: {'Portfolio': {'Delta': VALUE, 'Gamma': VALUE, 'Vega': VALUE}} etc
sign: int
Whether you have a long (1) or short (-1) position
Returns
-------
A table with the neutral portfolio weights.
"""
# Calculate hedge position
(
weight_option_a,
weight_option_b,
weight_shares,
is_singular,
) = hedge_model.calc_hedge(portfolio_option_amount, side, greeks, sign)
if sum([weight_option_a, weight_option_b, weight_shares]):
# Show the weights that would create a neutral portfolio
positions = pd.DataFrame(
[weight_option_a, weight_option_b, weight_shares],
index=["Weight Option A", "Weight Option B", "Weight Shares"],
columns=["Positions"],
)
print_rich_table(
positions,
title="Neutral Portfolio Weights",
headers=list(positions.columns),
show_index=True,
)
if is_singular:
console.print(
"[red]Warning\n[/red]"
"The selected combination of options yields multiple solutions.\n"
"This is the first feasible solution, possibly not the best one."
)
else:
console.print(
"[red]Due to there being multiple solutions (Singular Matrix) the current options "
"combination can not be solved. Please input different options.[/red]"
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/hedge/hedge_view.py | 0.895981 | 0.572723 | hedge_view.py | pypi |
__docformat__ = "numpy"
import argparse
import logging
from datetime import datetime
from typing import Dict, List
import pandas as pd
from openbb_terminal import feature_flags as obbff
from openbb_terminal.custom_prompt_toolkit import NestedCompleter
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import print_rich_table
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.rich_config import MenuText, console
from openbb_terminal.stocks.options.hedge import hedge_view
from openbb_terminal.stocks.options.hedge.hedge_model import add_hedge_option
from openbb_terminal.stocks.options.yfinance_model import get_option_chain, get_price
from openbb_terminal.stocks.options.yfinance_view import plot_payoff
from openbb_terminal.stocks import stocks_helper
# pylint: disable=R0902
logger = logging.getLogger(__name__)
class HedgeController(BaseController):
"""Hedge Controller class"""
CHOICES_COMMANDS = [
"list",
"add",
"rmv",
"pick",
"sop",
"plot",
]
PATH = "/stocks/options/hedge/"
CHOICES_GENERATION = True
def __init__(self, ticker: str, expiration: str, queue: List[str] = None):
"""Constructor"""
super().__init__(queue)
self.underlying_asset_position: str = ""
self.chain = get_option_chain(ticker, expiration)
self.calls = list(
zip(
self.chain.calls["strike"].tolist(),
self.chain.calls["impliedVolatility"].tolist(),
self.chain.calls["lastPrice"].tolist(),
self.chain.calls["currency"].tolist(),
)
)
self.puts = list(
zip(
self.chain.puts["strike"].tolist(),
self.chain.puts["impliedVolatility"].tolist(),
self.chain.puts["lastPrice"].tolist(),
self.chain.calls["currency"].tolist(),
)
)
self.PICK_CHOICES = [
f"{strike} {position} {side}"
for strike in range(int(self.calls[0][0]), int(self.calls[-1][0]), 5)
for position in ["Long", "Short"]
for side in ["Call", "Put"]
]
self.ticker = ticker
self.current_price: float = get_price(ticker)
self.expiration = expiration
self.implied_volatility = self.chain.calls["impliedVolatility"]
self.options: Dict = {"Portfolio": {}, "Option A": {}, "Option B": {}}
self.underlying = 0.0
self.side: str = ""
self.amount = 0.0
self.strike = 0.0
self.call_index_choices = range(len(self.calls))
self.put_index_choices = range(len(self.puts))
self.greeks: Dict = {"Portfolio": {}, "Option A": {}, "Option B": {}}
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = self.choices_default
# This menu contains dynamic choices that may change during runtime
self.choices = choices
self.completer = NestedCompleter.from_nested_dict(choices)
def update_runtime_choices(self):
"""Update runtime choices"""
if self.options and session and obbff.USE_PROMPT_TOOLKIT:
self.choices["rmv"] = {c: None for c in ["Option A", "Option B"]}
self.choices["rmv"]["--all"] = {}
self.choices["rmv"]["-a"] = "--all"
self.completer = NestedCompleter.from_nested_dict(self.choices)
def print_help(self):
"""Print help"""
mt = MenuText("stocks/options/hedge/")
mt.add_param("_ticker", self.ticker or "")
mt.add_param("_expiry", self.expiration or "")
mt.add_raw("\n")
mt.add_cmd("pick")
mt.add_raw("\n")
mt.add_param("_underlying", self.underlying_asset_position)
mt.add_raw("\n")
mt.add_cmd("list")
mt.add_cmd("add", "Delta" in self.greeks["Portfolio"])
mt.add_cmd(
"rmv",
"Delta" in self.greeks["Option A"] or "Delta" in self.greeks["Option B"],
)
mt.add_cmd(
"sop",
"Delta" in self.greeks["Option A"] or "Delta" in self.greeks["Option B"],
)
mt.add_cmd(
"plot",
"Delta" in self.greeks["Option A"] or "Delta" in self.greeks["Option B"],
)
console.print(text=mt.menu_text, menu="Stocks - Options - Hedge")
def custom_reset(self):
"""Class specific component of reset command"""
if self.ticker:
if self.expiration:
return [
"stocks",
f"load {self.ticker}",
"options",
f"exp -d {self.expiration}",
"hedge",
]
return ["stocks", f"load {self.ticker}", "options", "hedge"]
return []
@log_start_end(log=logger)
def call_list(self, other_args):
"""Lists available calls and puts"""
parser = argparse.ArgumentParser(
add_help=False,
prog="list",
description="""Lists available calls and puts.""",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
calls = pd.DataFrame([call[0] for call in self.calls])
puts = pd.DataFrame([put[0] for put in self.puts])
options = pd.concat([calls, puts], axis=1).fillna("-")
print_rich_table(
options,
title="Available Calls and Puts",
headers=list(["Calls", "Puts"]),
show_index=True,
index_name="Identifier",
)
@log_start_end(log=logger)
def call_add(self, other_args: List[str]):
"""Process add command"""
parser = argparse.ArgumentParser(
add_help=False,
prog="add",
description="""Add options to the diagram.""",
)
parser.add_argument(
"-p",
"--put",
dest="put",
action="store_true",
help="Buy a put instead of a call",
default=False,
)
parser.add_argument(
"-s",
"--short",
dest="short",
action="store_true",
help="Short the option instead of buying it",
default=False,
)
option_type = (
self.put_index_choices if "-p" in other_args else self.call_index_choices
)
parser.add_argument(
"-i",
"--identifier",
dest="identifier",
help="The identifier of the option as found in the list command",
required="-h" not in other_args and "-k" not in other_args,
type=int,
choices=option_type,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-i")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if not self.greeks["Portfolio"]:
console.print(
"Please set the Underlying Asset Position by using the 'pick' command.\n"
)
else:
opt_type = "Put" if ns_parser.put else "Call"
sign = -1 if ns_parser.short else 1
options_list = self.puts if ns_parser.put else self.calls
if ns_parser.identifier < len(options_list):
strike = options_list[ns_parser.identifier][0]
implied_volatility = options_list[ns_parser.identifier][1]
cost = options_list[ns_parser.identifier][2]
currency = options_list[ns_parser.identifier][3]
option = {
"type": opt_type,
"sign": sign,
"strike": strike,
"implied_volatility": implied_volatility,
"cost": cost,
"currency": currency,
}
if opt_type == "Call":
side = 1
else:
# Implies the option type is a put
side = -1
date_obj = datetime.strptime(self.expiration, "%Y-%m-%d")
days = float((date_obj - datetime.now()).days + 1)
if days == 0.0:
days = 0.01
if "Delta" not in self.greeks["Option A"]:
self.options["Option A"] = option
(
self.greeks["Option A"]["Delta"],
self.greeks["Option A"]["Gamma"],
self.greeks["Option A"]["Vega"],
) = hedge_view.add_and_show_greeks(
self.current_price,
implied_volatility,
strike,
days / 365,
side,
)
elif "Delta" not in self.greeks["Option B"]:
self.options["Option B"] = option
(
self.greeks["Option B"]["Delta"],
self.greeks["Option B"]["Gamma"],
self.greeks["Option B"]["Vega"],
) = hedge_view.add_and_show_greeks(
self.current_price,
implied_volatility,
strike,
days / 365,
side,
)
else:
console.print(
"[red]The functionality only accepts two options. Therefore, please remove an "
"option with 'rmv' before continuing.[/red]\n"
)
return
positions = pd.DataFrame()
for _, values in self.options.items():
# Loops over options in the dictionary. If a position is empty, skips the printing.
if values:
option_position: str = (
"Long" if values["sign"] == 1 else "Short"
)
positions = positions.append(
[
[
values["type"],
option_position,
values["strike"],
values["implied_volatility"],
values["cost"],
values["currency"],
]
]
)
positions.columns = [
"Type",
"Hold",
"Strike",
"Implied Volatility",
"Cost",
"Currency",
]
console.print("")
print_rich_table(
positions,
title="Current Option Positions",
headers=list(positions.columns),
show_index=False,
)
if (
"Delta" in self.greeks["Option A"]
and "Delta" in self.greeks["Option B"]
):
hedge_view.show_calculated_hedge(
self.amount, option["type"], self.greeks, sign
)
self.update_runtime_choices()
else:
console.print("Please use a valid index\n")
@log_start_end(log=logger)
def call_rmv(self, other_args: List[str]):
"""Process rmv command"""
parser = argparse.ArgumentParser(
add_help=False,
prog="rmv",
description="""Remove one of the options to be shown in the hedge.""",
)
parser.add_argument(
"-o",
"--option",
dest="option",
help="index of the option to remove",
nargs="+",
)
parser.add_argument(
"-a",
"--all",
dest="all",
action="store_true",
help="remove all of the options",
default=False,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-o")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if not self.options["Option A"] and not self.options["Option B"]:
console.print("Please add Options by using the 'add' command.\n")
else:
if ns_parser.all:
self.options = {"Option A": {}, "Option B": {}}
else:
option_name = " ".join(ns_parser.option)
if option_name in self.options:
self.options[option_name] = {}
self.greeks[option_name] = {}
self.update_runtime_choices()
else:
console.print(f"{option_name} is not an option.")
if self.options["Option A"] or self.options["Option B"]:
positions = pd.DataFrame()
for _, value in self.options.items():
if value:
option_side: str = "Long" if value["sign"] == 1 else "Short"
positions = positions.append(
[
[
value["type"],
option_side,
value["strike"],
value["implied_volatility"],
]
]
)
positions.columns = ["Type", "Hold", "Strike", "Implied Volatility"]
print_rich_table(
positions,
title="Current Option Positions",
headers=list(positions.columns),
show_index=False,
)
else:
console.print(
"No options have been selected, removing them is not possible\n"
)
@log_start_end(log=logger)
def call_pick(self, other_args: List[str]):
"""Process pick command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="pick",
description="This function plots option hedge diagrams",
)
parser.add_argument(
"-p",
"--pick",
dest="pick",
type=str.lower,
choices=stocks_helper.format_parse_choices(self.PICK_CHOICES),
help="Choose what you would like to pick",
required="-h" not in other_args,
)
parser.add_argument(
"-a",
"--amount",
dest="amount",
default=1000,
help="Choose the amount invested",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-p")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
pick = stocks_helper.map_parse_choices(self.PICK_CHOICES)[ns_parser.pick]
strike_type, underlying_type, side_type = pick.split(" ")
amount_type = ns_parser.amount
self.underlying_asset_position = (
f"{underlying_type} {side_type} {amount_type} @ {strike_type}"
)
if underlying_type == "Short":
self.underlying = -1
else:
self.underlying = 1
if side_type == "Put":
self.side = "Put"
side = -1
else:
self.side = "Call"
side = 1
self.amount = float(amount_type)
self.strike = float(strike_type)
index = -1
date_obj = datetime.strptime(self.expiration, "%Y-%m-%d")
days = float((date_obj - datetime.now()).days + 1)
if days == 0.0:
days = 0.01
if side == -1:
for i in range(len(self.chain.puts["strike"])):
if self.chain.puts["strike"][i] == self.strike:
index = i
break
implied_volatility = self.chain.puts["impliedVolatility"].iloc[index]
else:
for i in range(len(self.chain.calls["strike"])):
if self.chain.calls["strike"][i] == self.strike:
index = i
break
implied_volatility = self.chain.calls["impliedVolatility"].iloc[index]
(
self.greeks["Portfolio"]["Delta"],
self.greeks["Portfolio"]["Gamma"],
self.greeks["Portfolio"]["Vega"],
) = add_hedge_option(
self.current_price,
implied_volatility,
float(self.strike),
days / 365,
side,
)
@log_start_end(log=logger)
def call_sop(self, other_args):
"""Process sop command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="sop",
description="Displays selected option",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if not self.options["Option A"] and not self.options["Option B"]:
console.print("Please add Options by using the 'add' command.\n")
else:
positions = pd.DataFrame()
for _, value in self.options.items():
if value:
option_side: str = "Long" if value["sign"] == 1 else "Short"
positions = positions.append(
[
[
value["type"],
option_side,
value["strike"],
value["implied_volatility"],
value["cost"],
value["currency"],
]
]
)
positions.columns = [
"Type",
"Hold",
"Strike",
"Implied Volatility",
"Cost",
"Currency",
]
print_rich_table(
positions,
title="Current Option Positions",
headers=list(positions.columns),
show_index=False,
)
if (
"Delta" in self.greeks["Option A"]
and "Delta" in self.greeks["Option B"]
):
hedge_view.show_calculated_hedge(
self.amount,
self.options["Option A"]["type"],
self.greeks,
self.options["Option A"]["sign"],
)
@log_start_end(log=logger)
def call_plot(self, other_args):
"""Process plot command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="plot",
description="This function plots option payoff diagrams",
)
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
if not self.options["Option A"] and not self.options["Option B"]:
console.print("Please add Options by using the 'add' command.\n")
else:
plot_payoff(
self.current_price,
[self.options["Option A"], self.options["Option B"]],
self.underlying,
self.ticker,
self.expiration,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/hedge/hedge_controller.py | 0.606732 | 0.174305 | hedge_controller.py | pypi |
__docformat__ = "numpy"
import math
import numpy as np
from scipy.stats import norm
# Based on article of Roman Paolucci: https://towardsdatascience.com/algorithmic-portfolio-hedging-9e069aafff5a
def calc_hedge(
portfolio_option_amount: float = 100,
side: str = "Call",
greeks: dict = {
"Portfolio": {
"Delta": 1,
"Gamma": 9.1268e-05,
"Vega": 5.4661,
},
"Option A": {
"Delta": 1,
"Gamma": 9.1268e-05,
"Vega": 5.4661,
},
"Option B": {
"Delta": 1,
"Gamma": 9.1268e-05,
"Vega": 5.4661,
},
},
sign: int = 1,
):
"""Determine the hedge position and the weights within each option and
underlying asset to hold a neutral portfolio
Parameters
----------
portfolio_option_amount: float
Number to show
side: str
Whether you have a Call or Put instrument
greeks: dict
Dictionary containing delta, gamma and vega values for the portfolio and option A and B. Structure is
as follows: {'Portfolio': {'Delta': VALUE, 'Gamma': VALUE, 'Vega': VALUE}} etc
sign: int
Whether you have a long (1) or short (-1) position
Returns
-------
option A weight: float
option B weight: float
portfolio weight: float
is_singular: boolean
"""
# Shortnames for delta, gamma and vega of portfolio
portfolio_option_delta = greeks["Portfolio"]["Delta"]
portfolio_option_gamma = greeks["Portfolio"]["Gamma"]
portfolio_option_vega = greeks["Portfolio"]["Vega"]
# Shortnames for delta, gamma and vega of option A
option_a_delta = greeks["Option A"]["Delta"]
option_a_gamma = greeks["Option A"]["Gamma"]
option_a_vega = greeks["Option A"]["Vega"]
# Shortnames for delta, gamma and vega of option B
option_b_delta = greeks["Option B"]["Delta"]
option_b_gamma = greeks["Option B"]["Gamma"]
option_b_vega = greeks["Option B"]["Vega"]
# Delta will be positive for long call and short put positions, negative for short call and long put positions.
delta_multiplier = 1
# Gamma is always positive for long positions and negative for short positions.
gamma_multiplier = 1
# Vega will be positive for long positions and negative for short positions.
vega_multiplier = 1
# Initialize variable
short = False
# Short position
if sign == -1:
short = True
gamma_multiplier = -1
vega_multiplier = -1
if side == "Call":
# Short call position
delta_multiplier = -1
elif side == "Put":
if sign == 1:
# Long put position
delta_multiplier = -1
options_array = np.array(
[[option_a_gamma, option_b_gamma], [option_a_vega, option_b_vega]]
)
portfolio_greeks = [
[portfolio_option_gamma * portfolio_option_amount],
[portfolio_option_vega * portfolio_option_amount],
]
singular = False
try:
inv = np.linalg.inv(np.round(options_array, 2))
except np.linalg.LinAlgError:
options_array = np.round(options_array, 2)
a = options_array.shape[0]
i = np.eye(a, a)
inv = np.linalg.lstsq(options_array, i)[0]
singular = True
weights = np.dot(inv, portfolio_greeks)
portfolio_greeks = [
[portfolio_option_delta * delta_multiplier * portfolio_option_amount],
[portfolio_option_gamma * gamma_multiplier * portfolio_option_amount],
[portfolio_option_vega * vega_multiplier * portfolio_option_amount],
]
options_array = np.array(
[
[option_a_delta, option_b_delta],
[option_a_gamma, option_b_gamma],
[option_a_vega, option_b_vega],
]
)
if not short:
neutral = np.round(
np.dot(np.round(options_array, 2), weights) - portfolio_greeks
)
else:
neutral = np.round(
np.dot(np.round(options_array, 2), weights) + portfolio_greeks
)
return weights[0][0], weights[1][0], neutral[0][0], singular
def add_hedge_option(
price: float = 100,
implied_volatility: float = 20,
strike: float = 120,
days: float = 30,
sign: int = 1,
) -> tuple:
"""Determine the delta, gamma and vega value of the portfolio and/or options.
Parameters
----------
price: float
The price.
implied_volatility: float
The implied volatility.
strike: float
The strike price.
days: float
The amount of days until expiration. Use annual notation thus a month would be 30 / 360.
sign: int
Whether you have a long (1) or short (-1) position
Returns
-------
delta: float
gamma: float
portfolio: float
"""
# Determine delta position given the option
delta = calc_delta(price, implied_volatility, strike, days, 0, sign)
# Determine gamma position given the option
gamma = calc_gamma(price, implied_volatility, strike, days, 0)
# Determine vega position given the option
vega = calc_vega(price, implied_volatility, strike, days, 0)
return delta, gamma, vega
def calc_delta(
asset_price: float = 100,
asset_volatility: float = 20,
strike_price: float = 120,
time_to_expiration: float = 30,
risk_free_rate: float = 0,
sign: int = 1,
):
"""The first-order partial-derivative with respect to the underlying asset of the Black-Scholes
equation is known as delta. Delta refers to how the option value changes when there is a change in
the underlying asset price. Multiplying delta by a +-$1 change in the underlying asset, holding all other
parameters constant, will give you the new value of the option. Delta will be positive for long call and
short put positions, negative for short call and long put positions.
Parameters
----------
asset_price: int
The price.
asset_volatility: float
The implied volatility.
strike_price: float
The strike price.
time_to_expiration: float
The amount of days until expiration. Use annual notation thus a month would be 30 / 360.
risk_free_rate: float
The risk free rate.
sign: int
Whether you have a long (1) or short (-1) position
Returns
-------
delta: float
Returns the value for the delta.
"""
b = math.exp(-risk_free_rate * time_to_expiration)
x1 = (
math.log(asset_price / (b * strike_price))
+ 0.5 * (asset_volatility * asset_volatility) * time_to_expiration
)
x1 = x1 / (asset_volatility * (time_to_expiration**0.5))
delta = norm.cdf(x1)
if sign == 1:
return delta
return delta - 1
def calc_gamma(
asset_price: float = 100,
asset_volatility: float = 20,
strike_price: float = 120,
time_to_expiration: float = 30,
risk_free_rate: float = 0,
):
"""The second-order partial-derivative with respect to the underlying asset of the Black-Scholes equation
is known as gamma. Gamma refers to how the option’s delta changes when there is a change in the underlying
asset price. Multiplying gamma by a +-$1 change in the underlying asset, holding all other parameters constant,
will give you the new value of the option’s delta. Essentially, gamma is telling us the rate of change of delta
given a +-1 change in the underlying asset price. Gamma is always positive for long positions and
negative for short positions.
Parameters
----------
asset_price: int
The price.
asset_volatility: float
The implied volatility.
strike_price: float
The strike price.
time_to_expiration: float
The amount of days until expiration. Use annual notation thus a month would be 30 / 360.
risk_free_rate: float
The risk free rate.
Returns
-------
gamma: float
Returns the value for the gamma.
"""
b = math.exp(-risk_free_rate * time_to_expiration)
x1 = (
math.log(asset_price / (b * strike_price))
+ 0.5 * (asset_volatility * asset_volatility) * time_to_expiration
)
x1 = x1 / (asset_volatility * (time_to_expiration**0.5))
z1 = norm.cdf(x1)
gamma = z1 / (asset_price * asset_volatility * math.sqrt(time_to_expiration))
return gamma
def calc_vega(
asset_price: float = 100,
asset_volatility: float = 20,
strike_price: float = 120,
time_to_expiration: float = 30,
risk_free_rate: float = 0,
):
"""The first-order partial-derivative with respect to the underlying asset volatility of
the Black-Scholes equation is known as vega. Vega refers to how the option value
changes when there is a change in the underlying asset volatility. Multiplying vega by
a +-1% change in the underlying asset volatility, holding all other parameters constant, will give
you the new value of the option. Vega will be positive for long positions and negative for short positions.
Parameters
----------
asset_price: int
The price.
asset_volatility: float
The implied volatility.
strike_price: float
The strike price.
time_to_expiration: float
The amount of days until expiration. Use annual notation thus a month would be 30 / 360.
risk_free_rate: float
The risk free rate.
Returns
-------
vega: float
Returns the value for the gamma.
"""
b = math.exp(-risk_free_rate * time_to_expiration)
x1 = (
math.log(asset_price / (b * strike_price))
+ 0.5 * (asset_volatility * asset_volatility) * time_to_expiration
)
x1 = x1 / (asset_volatility * (time_to_expiration**0.5))
z1 = norm.cdf(x1)
vega = asset_price * z1 * math.sqrt(time_to_expiration)
return vega / 100 | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/hedge/hedge_model.py | 0.915995 | 0.667852 | hedge_model.py | pypi |
__docformat__ = "numpy"
import configparser
import logging
from pathlib import Path
from typing import Dict, Tuple, Union
import pandas as pd
import requests
import yfinance as yf
from openbb_terminal.core.config.paths import USER_PRESETS_DIRECTORY
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.options import yfinance_model
logger = logging.getLogger(__name__)
accepted_orders = [
"e_desc",
"e_asc",
"iv_desc",
"iv_asc",
"md_desc",
"md_asc",
"lp_desc",
"lp_asc",
"oi_asc",
"oi_desc",
"v_desc",
"v_asc",
]
@log_start_end(log=logger)
def get_historical_greeks(
symbol: str,
expiry: str,
strike: Union[str, float],
chain_id: str = "",
put: bool = False,
) -> pd.DataFrame:
"""Get histoical option greeks
Parameters
----------
symbol: str
Stock ticker symbol
expiry: str
Option expiration date
strike: Union[str, float]
Strike price to look for
chain_id: str
OCC option symbol. Overwrites other inputs
put: bool
Is this a put option?
Returns
-------
df: pd.DataFrame
Dataframe containing historical greeks
"""
if isinstance(strike, str):
try:
strike = float(strike)
except ValueError:
console.print(
f"[red]Strike of {strike} cannot be converted to a number.[/red]\n"
)
return pd.DataFrame()
if not chain_id:
options = yfinance_model.get_option_chain(symbol, expiry)
if put:
options = options.puts
else:
options = options.calls
selection = options.loc[options.strike == strike, "contractSymbol"]
try:
chain_id = selection.values[0]
except IndexError:
console.print(f"[red]Strike price of {strike} not found.[/red]\n")
return pd.DataFrame()
r = requests.get(f"https://api.syncretism.io/ops/historical/{chain_id}")
if r.status_code != 200:
console.print("Error in request.")
return pd.DataFrame()
history = r.json()
iv, delta, gamma, theta, rho, vega, premium, price, time = (
[],
[],
[],
[],
[],
[],
[],
[],
[],
)
for entry in history:
time.append(pd.to_datetime(entry["timestamp"], unit="s"))
iv.append(entry["impliedVolatility"])
gamma.append(entry["gamma"])
delta.append(entry["delta"])
theta.append(entry["theta"])
rho.append(entry["rho"])
vega.append(entry["vega"])
premium.append(entry["premium"])
price.append(entry["regularMarketPrice"])
data = {
"iv": iv,
"gamma": gamma,
"delta": delta,
"theta": theta,
"rho": rho,
"vega": vega,
"premium": premium,
"price": price,
}
df = pd.DataFrame(data, index=time)
return df
@log_start_end(log=logger)
def get_preset_choices() -> Dict:
"""
Return a dict containing keys as name of preset and
filepath as value
"""
PRESETS_PATH = USER_PRESETS_DIRECTORY / "stocks" / "options"
PRESETS_PATH_DEFAULT = Path(__file__).parent.parent / "presets"
preset_choices = {
filepath.name: filepath
for filepath in PRESETS_PATH.iterdir()
if filepath.suffix == ".ini"
}
preset_choices.update(
{
filepath.name: filepath
for filepath in PRESETS_PATH_DEFAULT.iterdir()
if filepath.suffix == ".ini"
}
)
return preset_choices
@log_start_end(log=logger)
def get_screener_output(preset: str) -> Tuple[pd.DataFrame, str]:
"""Screen options based on preset filters
Parameters
----------
preset: str
Chosen preset
Returns
-------
Tuple[pd.DataFrame, str]
DataFrame with screener data or empty if errors, String containing error message if supplied
"""
d_cols = {
"contractSymbol": "CS",
"symbol": "S",
"optType": "T",
"strike": "Str",
"expiration": "Exp ∨",
"impliedVolatility": "IV",
"lastPrice": "LP",
"bid": "B",
"ask": "A",
"volume": "V",
"openInterest": "OI",
"yield": "Y",
"monthlyyield": "MY",
"regularMarketPrice": "SMP",
"regularMarketDayLow": "SMDL",
"regularMarketDayHigh": "SMDH",
"lastTradeDate": "LU",
"lastCrawl": "LC",
"inTheMoney": "ITM",
"pChange": "PC",
"priceToBook": "PB",
}
preset_filter = configparser.RawConfigParser()
preset_filter.optionxform = str # type: ignore
choices = get_preset_choices()
if preset not in choices:
return pd.DataFrame(), "No data found"
preset_filter.read(choices[preset])
d_filters = {k: v for k, v in dict(preset_filter["FILTER"]).items() if v}
s_filters = str(d_filters)
s_filters = (
s_filters.replace(": '", ": ")
.replace("',", ",")
.replace("'}", "}")
.replace("'", '"')
)
for order in accepted_orders:
s_filters = s_filters.replace(f" {order}", f' "{order}"')
errors = check_presets(d_filters)
if errors:
return pd.DataFrame(), errors
link = "https://api.syncretism.io/ops"
res = requests.get(
link, headers={"Content-type": "application/json"}, data=s_filters
)
# pylint:disable=no-else-return
if res.status_code == 200:
df_res = pd.DataFrame(res.json())
if df_res.empty:
return df_res, f"No options data found for preset: {preset}"
df_res = df_res.rename(columns=d_cols)[list(d_cols.values())[:17]]
df_res["Exp ∨"] = df_res["Exp ∨"].apply(
lambda x: pd.to_datetime(x, unit="s").strftime("%m-%d-%y")
)
df_res["LU"] = df_res["LU"].apply(
lambda x: pd.to_datetime(x, unit="s").strftime("%m-%d-%y")
)
df_res["Y"] = df_res["Y"].round(3)
df_res["MY"] = df_res["MY"].round(3)
return df_res, ""
else:
return pd.DataFrame(), f"Request Error: {res.status_code}"
# pylint: disable=eval-used
@log_start_end(log=logger)
def check_presets(preset_dict: dict) -> str:
"""Checks option screener preset values
Parameters
----------
preset_dict: dict
Defined presets from configparser
Returns
-------
error: str
String of all errors accumulated
"""
float_list = [
"min-iv",
"max-iv",
"min-oi",
"max-oi",
"min-strike",
"max-strike",
"min-volume",
"max-volume",
"min-voi",
"max-voi",
"min-diff",
"max-diff",
"min-ask-bid",
"max-ask-bid",
"min-exp",
"max-exp",
"min-price",
"max-price",
"min-price-20d",
"max-price-20d",
"min-volume-20d",
"max-volume-20d",
"min-iv-20d",
"max-iv-20d",
"min-delta-20d",
"max-delta-20d",
"min-gamma-20d",
"max-gamma-20d",
"min-theta-20d",
"max-theta-20d",
"min-vega-20d",
"max-vega-20d",
"min-rho-20d",
"max-rho-20d",
"min-price-100d",
"max-price-100d",
"min-volume-100d",
"max-volume-100d",
"min-iv-100d",
"max-iv-100d",
"min-delta-100d",
"max-delta-100d",
"min-gamma-100d",
"max-gamma-100d",
"min-theta-100d",
"max-theta-100d",
"min-vega-100d",
"max-vega-100d",
"min-rho-100d",
"max-rho-100d",
"min-sto",
"max-sto",
"min-yield",
"max-yield",
"min-myield",
"max-myield",
"min-delta",
"max-delta",
"min-gamma",
"max-gamma",
"min-theta",
"max-theta",
"min-vega",
"max-vega",
"min-cap",
"max-cap",
]
bool_list = ["active", "stock", "etf", "puts", "calls", "itm", "otm", "exclude"]
error = ""
for key, value in preset_dict.items():
if key in float_list:
try:
float(value)
if value.startswith("."):
error += f"{key} : {value} needs to be formatted with leading 0\n"
except Exception:
error += f"{key} : {value}, should be float\n"
elif key in bool_list:
if value not in ["true", "false"]:
error += f"{key} : {value}, Should be [true/false]\n"
elif key == "tickers":
for symbol in value.split(","):
try:
if yf.Ticker(eval(symbol)).info["regularMarketPrice"] is None:
error += f"{key} : {symbol} not found on yfinance"
except NameError:
error += f"{key} : {value}, {symbol} failed"
elif key == "limit":
try:
int(value)
except Exception:
error += f"{key} : {value} , should be integer\n"
elif key == "order-by":
if value.replace('"', "") not in accepted_orders:
error += f"{key} : {value} not accepted ordering\n"
if error:
logging.exception(error)
return error | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/screen/syncretism_model.py | 0.742702 | 0.261195 | syncretism_model.py | pypi |
__docformat__ = "numpy"
import configparser
import logging
import os
from typing import List, Optional, Union
import matplotlib.pyplot as plt
from openbb_terminal import config_plot as cfp
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
is_valid_axes_count,
plot_autoscale,
print_rich_table,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.options.screen import syncretism_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def view_available_presets(preset: str):
"""View available presets.
Parameters
----------
preset: str
Chosen preset
"""
if preset:
preset_filter = configparser.RawConfigParser()
preset_filter.optionxform = str # type: ignore
preset_choices = syncretism_model.get_preset_choices()
preset_filter.read(preset_choices[preset])
filters_headers = ["FILTER"]
for i, filter_header in enumerate(filters_headers):
console.print(f" - {filter_header} -")
d_filters = {**preset_filter[filter_header]}
d_filters = {k: v for k, v in d_filters.items() if v}
if d_filters:
max_len = len(max(d_filters, key=len)) + 2
for key, value in d_filters.items():
console.print(f"{key}{(max_len-len(key))*' '}: {value}")
if i < len(filters_headers) - 1:
console.print("\n")
else:
console.print("Please provide a preset template.")
@log_start_end(log=logger)
def view_screener_output(
preset: str,
limit: int = 20,
export: str = "",
) -> List:
"""Print the output of screener
Parameters
----------
preset: str
Chosen preset
limit: int
Number of randomly sorted rows to display
export: str
Format for export file
Returns
-------
List
List of tickers screened
"""
df_res, error_msg = syncretism_model.get_screener_output(preset)
if error_msg:
console.print(error_msg, "\n")
return []
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"scr",
df_res,
)
if limit > 0:
df_res = df_res.head(limit)
print_rich_table(
df_res, headers=list(df_res.columns), show_index=False, title="Screener Output"
)
return list(set(df_res["S"].values.tolist()))
# pylint:disable=too-many-arguments
@log_start_end(log=logger)
def view_historical_greeks(
symbol: str,
expiry: str,
strike: Union[float, str],
greek: str = "Delta",
chain_id: str = "",
put: bool = False,
raw: bool = False,
limit: Union[int, str] = 20,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots historical greeks for a given option. [Source: Syncretism]
Parameters
----------
symbol: str
Stock ticker
expiry: str
Expiration date
strike: Union[str, float]
Strike price to consider
greek: str
Greek variable to plot
chain_id: str
OCC option chain. Overwrites other variables
put: bool
Is this a put option?
raw: bool
Print to console
limit: int
Number of rows to show in raw
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = syncretism_model.get_historical_greeks(symbol, expiry, strike, chain_id, put)
if df is None:
return
if df.empty:
return
if isinstance(limit, str):
try:
limit = int(limit)
except ValueError:
console.print(
f"[red]Could not convert limit of {limit} to a number.[/red]\n"
)
return
if raw:
print_rich_table(
df.tail(limit),
headers=list(df.columns),
title="Historical Greeks",
show_index=True,
)
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
try:
greek_df = df[greek.lower()]
except KeyError:
console.print(f"[red]Could not find greek {greek} in data.[/red]\n")
return
im1 = ax.plot(df.index, greek_df, label=greek.title(), color=theme.up_color)
ax.set_ylabel(greek)
ax1 = ax.twinx()
im2 = ax1.plot(df.index, df.price, label="Stock Price", color=theme.down_color)
ax1.set_ylabel(f"{symbol} Price")
ax.set_title(
f"{(greek).capitalize()} historical for {symbol.upper()} {strike} {['Call','Put'][put]}"
)
if df.empty:
console.print("[red]Data from API is not valid.[/red]\n")
return
ax.set_xlim(df.index[0], df.index[-1])
ims = im1 + im2
labels = [lab.get_label() for lab in ims]
ax.legend(ims, labels, loc=0)
theme.style_twin_axes(ax, ax1)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"grhist",
df,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/options/screen/syncretism_view.py | 0.792865 | 0.233695 | syncretism_view.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
import numpy as np
import requests
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_EODHD_TOKEN"])
def get_financials(
symbol: str, statement: str, quarterly: bool = False, ratios: bool = False
) -> pd.DataFrame:
"""Get ticker financial statements from eodhd
Parameters
----------
symbol: str
Stock ticker symbol
statement: str
Financial statement data to retrieve, can be balance, income or cash
quarterly:bool
Flag to get quarterly reports, by default False
ratios: bool
Shows percentage change, by default False
Returns
-------
pd.DataFrame
Balance Sheets or Income Statements or cashflow
"""
# Note the filing date is over 30 years so will always get as many as allowed
request_url = (
"https://eodhistoricaldata.com/api/fundamentals/"
f"{symbol}?"
f"api_token={cfg.API_EODHD_KEY}"
f"&filter=Financials::{statement}"
f"::{['yearly', 'quarterly'][quarterly]}"
)
r = requests.get(request_url)
if r.status_code != 200:
console.print("[red]Invalid API Key for eodhistoricaldata [/red]")
console.print(
"Get your Key here: https://eodhistoricaldata.com/r/?ref=869U7F4J\n"
)
return pd.DataFrame()
r_json = r.json()
df_financials = pd.DataFrame(r_json)
df_financials.drop("date", inplace=True)
df_financials.drop("filing_date", inplace=True)
df_financials.drop("currency_symbol", inplace=True)
df_financials = df_financials.fillna(value=np.nan)
df_financials = df_financials.dropna(how="all")
if ratios:
df_financials = df_financials.iloc[:, :5]
df_financials = df_financials.replace("-", "0")
df_financials = df_financials.astype(float)
df_financials = df_financials.pct_change(axis="columns", periods=-1).fillna(0)
return df_financials | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/fundamental_analysis/eodhd_model.py | 0.615319 | 0.190385 | eodhd_model.py | pypi |
__docformat__ = "numpy"
import logging
from datetime import datetime
from typing import Any, Dict, List, Union
import numpy as np
import pandas as pd
import yfinance as yf
from openpyxl import Workbook
from openpyxl.styles import Font
from openpyxl.styles.numbers import FORMAT_PERCENTAGE_00
from sklearn.linear_model import LinearRegression
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_rf
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.fundamental_analysis import dcf_model, dcf_static
# pylint: disable=C0302
logger = logging.getLogger(__name__)
class CreateExcelFA:
@log_start_end(log=logger)
def __init__(
self,
symbol: str,
audit: bool = False,
ratios: bool = True,
len_pred: int = 10,
max_similars: int = 3,
no_filter: bool = False,
growth: bool = False,
):
"""
Creates a detailed DCF for a given company
Parameters
----------
symbol : str
The ticker symbol to create a DCF for
audit : bool
Whether or not to show that the balance sheet and income statement tie-out
ratios : bool
Whether to show ratios for the company and for similar companies
len_pred : int
The number of years to make predictions for before assuming a terminal value
max_similars : int
The maximum number of similar companies to show, will be less if there are not enough
no_filter : bool
Disable filtering of similar companies to being in the same market cap category
growth : bool
When true this turns the revenue assumption from linear regression to percentage growth
"""
self.info: Dict[str, Any] = {
"len_data": 0,
"len_pred": len_pred,
"max_similars": max_similars,
"rounding": 0,
"symbol": symbol,
"audit": audit,
"ratios": ratios,
"no_filter": no_filter,
}
self.letter: int = 0
self.growth = growth
self.starts: Dict[str, int] = {"IS": 4, "BS": 18, "CF": 47}
self.wb: Workbook = Workbook()
self.ws: Dict[int, Any] = {
1: self.wb.active,
2: self.wb.create_sheet("Free Cash Flows"),
3: self.wb.create_sheet("Explanations"),
}
self.df: Dict[str, pd.DataFrame] = {
"BS": self.get_data("BS", self.starts["BS"], False),
"IS": self.get_data("IS", self.starts["IS"], True),
"CF": self.get_data("CF", self.starts["CF"], False),
}
self.data: Dict[str, Any] = {
"now": datetime.now().strftime("%Y-%m-%d"),
"info": yf.Ticker(symbol).info,
"t_bill": get_rf(),
"r_ff": dcf_model.get_fama_coe(self.info["symbol"]),
}
@log_start_end(log=logger)
def create_workbook(self):
self.ws[1].title = "Financials"
self.ws[1].column_dimensions["A"].width = 25
self.ws[2].column_dimensions["A"].width = 22
for column in dcf_static.letters[1:21]:
self.ws[1].column_dimensions[column].width = 14
for column in dcf_static.letters[1:21]:
self.ws[2].column_dimensions[column].width = 14
self.ws[3].column_dimensions["A"].width = 3
for _, value in self.ws.items():
self.create_header(value)
self.df["BS"], self.df["IS"], self.df["CF"] = dcf_model.clean_dataframes(
self.df["BS"], self.df["IS"], self.df["CF"]
)
self.add_estimates()
self.create_dcf()
if self.info["ratios"]:
self.add_ratios()
if self.info["audit"]:
self.run_audit()
i = 0
while True:
path = dcf_model.generate_path(i, self.info["symbol"], self.data["now"])
path.parent.mkdir(parents=True, exist_ok=True)
if not path.is_file():
self.wb.save(path)
console.print(f"Analysis for {self.info['symbol']} At:\n{path}.\n")
break
i += 1
@log_start_end(log=logger)
def get_data(self, statement: str, row: int, header: bool) -> pd.DataFrame:
df, rounding, _ = dcf_model.create_dataframe(self.info["symbol"], statement)
if df.empty:
raise ValueError("Could not generate a dataframe for the ticker symbol")
self.info["rounding"] = rounding
if not self.info["len_data"]:
self.info["len_data"] = len(df.columns)
self.ws[1][f"A{row}"] = dcf_static.statement_titles[statement]
self.ws[1][f"A{row}"].font = dcf_static.bold_font
rowI = row + 1
names = df.index.values.tolist()
for name in names:
self.ws[1][f"A{rowI}"] = name
if name in dcf_static.sum_rows:
length = self.info["len_data"] + (
self.info["len_pred"] if statement != "CF" else 0
)
for i in range(length):
if statement == "CF" and name == "Net Income":
pass
else:
self.ws[1][
f"{dcf_static.letters[i+1]}{rowI}"
].font = dcf_static.bold_font
self.ws[1][
f"{dcf_static.letters[i+1]}{rowI}"
].border = dcf_static.thin_border_top
rowI += 1
column = 1
for key, value in df.items():
rowI = row
if header:
dcf_model.set_cell(
self.ws[1],
f"{dcf_static.letters[column]}{rowI}",
float(key),
font=dcf_static.bold_font,
)
for item in value:
rowI += 1
try:
m = 0 if item is None else float(item.replace(",", ""))
except ValueError:
m = 0
dcf_model.set_cell(
self.ws[1],
f"{dcf_static.letters[column]}{rowI}",
m,
num_form="[$$-409]#,##0.00;[RED]-[$$-409]#,##0.00",
)
column += 1
return df
@log_start_end(log=logger)
def add_estimates(self):
last_year = self.df["BS"].columns[-1] # Replace with columns in DF
col = self.info["len_data"] + 1
for i in range(self.info["len_pred"]):
dcf_model.set_cell(
self.ws[1],
f"{dcf_static.letters[col+i]}4",
int(last_year) + 1 + i,
font=dcf_static.bold_font,
)
for i in range(41):
col = self.info["len_pred"] + self.info["len_data"] + 3
dcf_model.set_cell(
self.ws[1],
f"{dcf_static.letters[col]}{3+i}",
fill=dcf_static.green_bg,
border=dcf_static.thin_border_nr,
)
dcf_model.set_cell(
self.ws[1],
f"{dcf_static.letters[col+1]}{3+i}",
fill=dcf_static.green_bg,
border=dcf_static.thin_border_nl,
)
dcf_model.set_cell(
self.ws[1],
f"{dcf_static.letters[col]}3",
"Linear model",
alignment=dcf_static.center,
)
self.ws[1].merge_cells(
f"{dcf_static.letters[col]}3:{dcf_static.letters[col+1]}3"
)
dcf_model.set_cell(self.ws[1], f"{dcf_static.letters[col]}4", "m")
dcf_model.set_cell(self.ws[1], f"{dcf_static.letters[col+1]}4", "b")
rev_pred_type = "growth" if self.growth else "linear"
self.get_growth("Date", "Revenue", pred_type=rev_pred_type)
self.get_growth("Revenue", "Cost of Revenue")
self.get_sum("Gross Profit", "Revenue", [], ["Cost of Revenue"])
self.get_growth("Revenue", "Selling, General & Admin", True)
self.get_growth("Revenue", "Research & Development", True)
self.get_growth("Revenue", "Other Operating Expenses")
self.get_sum(
"Operating Income",
"Gross Profit",
[],
[
"Selling, General & Admin",
"Research & Development",
"Other Operating Expenses",
],
)
self.get_growth("Revenue", "Preferred Dividends")
self.get_growth("Revenue", "Interest Expense / Income")
self.get_growth("Revenue", "Other Expense / Income")
self.get_growth("Operating Income", "Income Tax")
self.get_sum(
"Net Income",
"Operating Income",
[],
["Interest Expense / Income", "Other Expense / Income", "Income Tax"],
)
self.custom_exp(
"Preferred Dividends",
"Preferred Dividends are not important in a DCF so we do not attempt to predict them.",
)
self.get_growth("Revenue", "Cash & Equivalents", True)
self.get_growth("Revenue", "Short-Term Investments", True)
self.get_sum(
"Cash & Cash Equivalents",
"Cash & Equivalents",
["Short-Term Investments"],
[],
)
self.get_growth("Revenue", "Receivables", True)
self.get_growth("Revenue", "Inventory", True)
self.get_growth("Revenue", "Other Current Assets")
self.get_sum(
"Total Current Assets",
"Cash & Cash Equivalents",
["Receivables", "Inventory", "Other Current Assets"],
[],
)
self.get_growth("Revenue", "Property, Plant & Equipment", True)
self.get_growth("Revenue", "Long-Term Investments", True)
self.get_growth("Revenue", "Goodwill and Intangibles", True)
self.get_growth("Revenue", "Other Long-Term Assets")
self.get_sum(
"Total Long-Term Assets",
"Property, Plant & Equipment",
[
"Long-Term Investments",
"Goodwill and Intangibles",
"Other Long-Term Assets",
],
[],
)
self.get_sum(
"Total Assets", "Total Current Assets", ["Total Long-Term Assets"], []
)
self.get_growth("Revenue", "Accounts Payable")
self.get_growth("Revenue", "Deferred Revenue")
self.get_growth("Revenue", "Current Debt")
self.get_growth("Revenue", "Other Current Liabilities")
self.get_sum(
"Total Current Liabilities",
"Accounts Payable",
["Deferred Revenue", "Current Debt", "Other Current Liabilities"],
[],
)
self.get_sum(
"Long-Term Debt",
"Total Assets",
[],
[
"Total Current Liabilities",
"Other Long-Term Liabilities",
"Shareholders' Equity",
],
text=(
"This is the plug. For more information on plugs visit https://corporatefina"
"nceinstitute.com/resources/questions/model-questions/financial-modeling-plug/"
),
) # This is the plug
self.get_growth("Revenue", "Other Long-Term Liabilities")
self.get_sum(
"Total Long-Term Liabilities",
"Long-Term Debt",
["Other Long-Term Liabilities"],
[],
)
self.get_sum(
"Total Liabilities",
"Total Current Liabilities",
["Total Long-Term Liabilities"],
[],
)
self.get_growth("Revenue", "Common Stock")
col = self.info["len_data"] + 1
rer = self.title_to_row("Retained Earnings")
nir = self.title_to_row("Net Income")
for i in range(self.info["len_pred"]):
dcf_model.set_cell(
self.ws[1],
f"{dcf_static.letters[col+i]}{rer}",
f"={dcf_static.letters[col+i]}{nir}+{dcf_static.letters[col+i-1]}{rer}",
num_form="[$$-409]#,##0.00;[RED]-[$$-409]#,##0.00",
)
self.get_growth("Revenue", "Comprehensive Income")
self.get_sum(
"Shareholders' Equity",
"Common Stock",
["Retained Earnings", "Comprehensive Income"],
[],
)
self.get_sum(
"Total Liabilities and Equity",
"Total Liabilities",
["Shareholders' Equity"],
[],
)
dcf_model.set_cell(
self.ws[1],
"A65",
(
"Warning: Stock Analysis does not have all of the cash flow items included. Operating"
", Financing, and Investing Cash Flows may not add up to total cash flows."
),
font=dcf_static.red,
)
@log_start_end(log=logger)
def create_dcf(self):
self.ws[2]["A5"] = "Net Income"
self.ws[2]["A6"] = "Change in NWC"
self.ws[2]["A7"] = "Change in Capex"
self.ws[2]["A8"] = "Preferred Dividends"
self.ws[2]["A9"] = "Free Cash Flows"
r = 4
c1 = dcf_static.letters[self.info["len_pred"] + 3]
c2 = dcf_static.letters[self.info["len_pred"] + 4]
c3 = dcf_static.letters[self.info["len_pred"] + 5]
for i in range(self.info["len_pred"]):
j = 1 + i + self.info["len_data"]
cols = dcf_static.letters
dcf_model.set_cell(
self.ws[2],
f"{cols[1+i]}4",
f"=Financials!{cols[j]}4",
font=dcf_static.bold_font,
)
dcf_model.set_cell(
self.ws[2],
f"{cols[1+i]}5",
f"=Financials!{cols[j]}{self.title_to_row('Net Income')}",
num_form="[$$-409]#,##0.00;[RED]-[$$-409]#,##0.00",
)
dcf_model.set_cell(
self.ws[2],
f"{dcf_static.letters[1+i]}6",
(
f"=Financials!{cols[j]}{self.title_to_row('Total Current Assets')}"
f"-Financials!{cols[j-1]}{self.title_to_row('Total Current Assets')}"
f"-Financials!{cols[j]}{self.title_to_row('Total Current Liabilities')}"
f"+Financials!{cols[j-1]}{self.title_to_row('Total Current Liabilities')}"
),
num_form="[$$-409]#,##0.00;[RED]-[$$-409]#,##0.00",
)
dcf_model.set_cell(
self.ws[2],
f"{dcf_static.letters[1+i]}7",
(
f"=Financials!{cols[j]}{self.title_to_row('Total Long-Term Assets')}"
f"-Financials!{cols[j-1]}{self.title_to_row('Total Long-Term Assets')}"
),
num_form="[$$-409]#,##0.00;[RED]-[$$-409]#,##0.00",
)
dcf_model.set_cell(
self.ws[2],
f"{dcf_static.letters[1+i]}8",
f"=Financials!{cols[j]}{self.title_to_row('Preferred Dividends')}",
num_form="[$$-409]#,##0.00;[RED]-[$$-409]#,##0.00",
)
dcf_model.set_cell(
self.ws[2],
f"{cols[1+i]}9",
f"={cols[1+i]}5-{cols[1+i]}6-{cols[1+i]}7-{cols[1+i]}8",
num_form="[$$-409]#,##0.00;[RED]-[$$-409]#,##0.00",
font=dcf_static.bold_font,
border=dcf_static.thin_border_top,
)
dcf_model.set_cell(
self.ws[2],
f"{cols[1+self.info['len_pred']]}9",
f"=({cols[self.info['len_pred']]}9*(1+{c2}"
f"{r+15}))/({c2}{r+11}-{c2}{r+15})",
num_form="[$$-409]#,##0.00;[RED]-[$$-409]#,##0.00",
)
dcf_model.set_cell(
self.ws[2],
f"{c1}{r-2}",
"Note: We do not allow r values to go below 0.5%.",
font=dcf_static.red,
)
self.ws[2].merge_cells(f"{c1}{r}:{c2}{r}")
for x in [c1, c2]:
dcf_model.set_cell(
self.ws[2], f"{x}{r}", border=dcf_static.thin_border_bottom
)
dcf_model.set_cell(
self.ws[2],
f"{c1}{r}",
"Discount Rate",
alignment=dcf_static.center,
border=dcf_static.thin_border_bottom,
)
# CAPM
dcf_model.set_cell(self.ws[2], f"{c1}{r+1}", "Risk Free Rate")
dcf_model.set_cell(
self.ws[2],
f"{c2}{r+1}",
float(self.data["t_bill"]) / 100,
num_form=FORMAT_PERCENTAGE_00,
)
self.custom_exp(r + 1, "Pulled from US Treasurey.", 2, f"{c3}")
dcf_model.set_cell(self.ws[2], f"{c1}{r+2}", "Market Rate")
dcf_model.set_cell(
self.ws[2], f"{c2}{r+2}", 0.08, num_form=FORMAT_PERCENTAGE_00
)
self.custom_exp(
r + 2, "Average return of the S&P 500 is 8% [Investopedia]", 2, f"{c3}"
)
dcf_model.set_cell(self.ws[2], f"{c1}{r+3}", "Beta")
if self.data["info"]["beta"] is None:
dcf_model.set_cell(self.ws[2], f"{c2}{r+3}", float(1))
self.custom_exp(
r + 3, "Warning: Beta not found. Assumed a beta of one.", 2, f"{c3}"
)
self.data["info"]["beta"] = 1
else:
dcf_model.set_cell(
self.ws[2], f"{c2}{r+3}", float(self.data["info"]["beta"])
)
self.custom_exp(r + 3, "Beta from yahoo finance", 2, f"{c3}")
dcf_model.set_cell(self.ws[2], f"{c1}{r+4}", "r (CAPM)")
dcf_model.set_cell(
self.ws[2],
f"{c2}{r+4}",
f"=max((({c2}{r+2}-{c2}{r+1})*{c2}{r+3})+{c2}{r+1},0.005)",
num_form=FORMAT_PERCENTAGE_00,
border=dcf_static.thin_border_top,
font=dcf_static.bold_font,
)
# Fama French
dcf_model.set_cell(self.ws[2], f"{c1}{r+7}", "Fama French")
dcf_model.set_cell(
self.ws[2],
f"{c2}{r+7}",
f"=max({self.data['r_ff']},0.005)",
num_form=FORMAT_PERCENTAGE_00,
)
self.custom_exp(
r + 7,
(
"Calculated using the Fama and French Three-Factor model. For more"
"information visit https://www.investopedia.com/terms/f/famaandfrenchthreefactormodel.asp."
),
2,
f"{c3}",
)
# Decide
for x in [c1, c2]:
dcf_model.set_cell(
self.ws[2], f"{x}{r+9}", border=dcf_static.thin_border_bottom
)
self.ws[2].merge_cells(f"{c1}{r+9}:{c2}{r+9}")
dcf_model.set_cell(
self.ws[2],
f"{c1}{r+9}",
"Choose model",
border=dcf_static.thin_border_bottom,
alignment=dcf_static.center,
num_form=FORMAT_PERCENTAGE_00,
)
dcf_model.set_cell(self.ws[2], f"{c1}{r+10}", "Model")
dcf_model.set_cell(self.ws[2], f"{c2}{r+10}", "Fama French")
dcf_model.set_cell(self.ws[2], f"{c3}{r+10}", "Type 'Fama French' or 'CAPM'")
dcf_model.set_cell(self.ws[2], f"{c1}{r+11}", "r")
dcf_model.set_cell(
self.ws[2],
f"{c2}{r+11}",
f'=if({c2}{r+10}="Fama French",{c2}{r+7},if({c2}{r+10}="CAPM",{c2}{r+4},"Invalid Selection"))',
num_form=FORMAT_PERCENTAGE_00,
)
dcf_model.set_cell(self.ws[2], f"{c1}{r+15}", "Long Term Growth")
dcf_model.set_cell(
self.ws[2],
f"{c2}{r+15}",
f"=min(0.04,{c2}{r+11}*0.9)",
num_form=FORMAT_PERCENTAGE_00,
)
dcf_model.set_cell(self.ws[2], "A11", "Value from Operations")
dcf_model.set_cell(
self.ws[2],
"B11",
f"=NPV({c2}{r+11},B9:{dcf_static.letters[self.info['len_pred']+1]}9)",
num_form="[$$-409]#,##0.00;[RED]-[$$-409]#,##0.00",
)
dcf_model.set_cell(self.ws[2], "A12", "Cash and Cash Equivalents")
dcf_model.set_cell(
self.ws[2],
"B12",
f"=financials!{dcf_static.letters[self.info['len_data']]}{self.title_to_row('Cash & Cash Equivalents')}",
num_form="[$$-409]#,##0.00;[RED]-[$$-409]#,##0.00",
)
dcf_model.set_cell(self.ws[2], "A13", "Intrinsic Value (sum)")
dcf_model.set_cell(
self.ws[2],
"B13",
"=B11+B12",
num_form="[$$-409]#,##0.00;[RED]-[$$-409]#,##0.00",
)
dcf_model.set_cell(self.ws[2], "A14", "Debt Obligations")
dcf_model.set_cell(
self.ws[2],
"B14",
f"=financials!{dcf_static.letters[self.info['len_data']]}"
+ f"{self.title_to_row('Total Long-Term Liabilities')}",
num_form="[$$-409]#,##0.00;[RED]-[$$-409]#,##0.00",
)
dcf_model.set_cell(self.ws[2], "A15", "Firm value without debt")
dcf_model.set_cell(
self.ws[2],
"B15",
(
f"=max(B13-B14,"
f"Financials!{dcf_static.letters[self.info['len_data']]}"
+ f"{self.title_to_row('Total Assets')}"
f"-Financials!{dcf_static.letters[self.info['len_data']]}"
+ f"{self.title_to_row('Total Liabilities')})"
),
num_form="[$$-409]#,##0.00;[RED]-[$$-409]#,##0.00",
)
dcf_model.set_cell(
self.ws[2],
"C15",
*(
f"=if((B13-B14)>"
f"(Financials!{dcf_static.letters[self.info['len_data']]}"
+ f"{self.title_to_row('Total Assets')}"
f"-Financials!{dcf_static.letters[self.info['len_data']]}"
+ f"{self.title_to_row('Total Liabilities')}),"
'"","Note: Total assets minus total liabilities exceeds projected firm value without debt.'
+ ' Value shown is total assets minus total liabilities.")',
),
font=dcf_static.red,
)
dcf_model.set_cell(self.ws[2], "A16", "Shares Outstanding")
dcf_model.set_cell(
self.ws[2], "B16", int(self.data["info"]["sharesOutstanding"])
)
dcf_model.set_cell(self.ws[2], "A17", "Shares Price")
dcf_model.set_cell(
self.ws[2],
"B17",
f"=(B15*{self.info['rounding']})/B16",
num_form="[$$-409]#,##0.00;[RED]-[$$-409]#,##0.00",
)
dcf_model.set_cell(self.ws[2], "A18", "Actual Price")
dcf_model.set_cell(
self.ws[2], "B18", float(self.data["info"]["regularMarketPrice"])
)
@log_start_end(log=logger)
def create_header(self, ws: Workbook):
for i in range(10):
dcf_model.set_cell(
ws, f"{dcf_static.letters[i]}1", border=dcf_static.thin_border
)
ws.merge_cells("A1:J1")
dcf_model.set_cell(
ws,
"A1",
f"OpenBB Terminal Analysis: {self.info['symbol'].upper()}",
font=Font(color="04cca8", size=20),
border=dcf_static.thin_border,
alignment=dcf_static.center,
)
dcf_model.set_cell(
ws, "A2", f"DCF for {self.info['symbol']} generated on {self.data['now']}"
)
@log_start_end(log=logger)
def run_audit(self):
start = 67
for i, value in enumerate(dcf_static.sum_rows):
dcf_model.set_cell(self.ws[1], f"A{start + i}", value)
self.ws[1].merge_cells(f"A{start-2}:K{start-2}")
dcf_model.set_cell(
self.ws[1],
f"A{start - 2}",
"Financial Statement Audit",
font=Font(color="FF0000"),
alignment=dcf_static.center,
)
dcf_model.set_cell(
self.ws[1],
f"A{start - 1}",
"Audit ensures data integrity. Numbers should be 0 (with slight rounding difference).",
)
self.get_sum(start, "Revenue", [], ["Cost of Revenue", "Gross Profit"], True)
self.get_sum(
start + 1,
"Gross Profit",
[],
[
"Selling, General & Admin",
"Research & Development",
"Other Operating Expenses",
"Operating Income",
],
True,
)
self.get_sum(
start + 2,
"Operating Income",
[],
[
"Interest Expense / Income",
"Other Expense / Income",
"Income Tax",
"Net Income",
],
True,
)
self.get_sum(
start + 3,
"Cash & Equivalents",
["Short-Term Investments"],
["Cash & Cash Equivalents"],
True,
)
self.get_sum(
start + 4,
"Cash & Cash Equivalents",
["Receivables", "Inventory", "Other Current Assets"],
["Total Current Assets"],
True,
)
self.get_sum(
start + 5,
"Property, Plant & Equipment",
[
"Long-Term Investments",
"Goodwill and Intangibles",
"Other Long-Term Assets",
],
["Total Long-Term Assets"],
True,
)
self.get_sum(
start + 6,
"Total Current Assets",
["Total Long-Term Assets"],
["Total Assets"],
True,
)
self.get_sum(
start + 7,
"Accounts Payable",
["Deferred Revenue", "Current Debt", "Other Current Liabilities"],
["Total Current Liabilities"],
True,
)
self.get_sum(
start + 8,
"Long-Term Debt",
["Other Long-Term Liabilities"],
["Total Long-Term Liabilities"],
True,
)
self.get_sum(
start + 9,
"Total Current Liabilities",
["Total Long-Term Liabilities"],
["Total Liabilities"],
True,
)
self.get_sum(
start + 10,
"Common Stock",
["Retained Earnings", "Comprehensive Income"],
["Shareholders' Equity"],
True,
)
self.get_sum(
start + 11,
"Total Liabilities",
["Shareholders' Equity"],
["Total Liabilities and Equity"],
True,
)
self.get_sum(
start + 12,
"Net Income",
[
"Depreciation & Amortization",
"Share-Based Compensation",
"Other Operating Activities",
],
["Operating Cash Flow"],
True,
)
self.get_sum(
start + 13,
"Capital Expenditures",
["Acquisitions", "Change in Investments", "Other Investing Activities"],
["Investing Cash Flow"],
True,
)
self.get_sum(
start + 14,
"Dividends Paid",
[
"Share Issuance / Repurchase",
"Debt Issued / Paid",
"Other Financing Activities",
],
["Financing Cash Flow"],
True,
)
self.get_sum(
start + 15,
"Operating Cash Flow",
["Investing Cash Flow", "Financing Cash Flow"],
["Net Cash Flow"],
True,
)
@log_start_end(log=logger)
def get_growth(
self, x_ind: str, y_ind: str, no_neg: bool = False, pred_type: str = "linear"
):
"""Add growth to a column. Usually this is linear but other options are allowed
x_ind: str
The x variable to use. This is unused if growth is the pred_type
y_ind: str
The y variable to use
no_neg: bool
Whether or not the value can have negative numbers (profit can be revenue cannot)
pred_type: str
This is assumed to be linear but growth is allowed as well
"""
x_type = "IS" if x_ind in self.df["IS"].index else "BS"
y_type = "IS" if y_ind in self.df["IS"].index else "BS"
x_df = self.df["IS"] if x_type == "IS" else self.df["BS"]
y_df = self.df["IS"] if y_type == "IS" else self.df["BS"]
analy_message = ""
row1 = (
y_df.index.get_loc(y_ind)
+ 1
+ (self.starts["IS"] if y_type == "IS" else self.starts["BS"])
)
col = self.info["len_pred"] + self.info["len_data"] + 3
if pred_type == "linear":
pre_x = (
x_df.columns.to_numpy()
if x_ind == "Date"
else x_df.loc[x_ind].to_numpy()
)
vfunc = np.vectorize(dcf_model.string_float)
pre_x = vfunc(pre_x)
if x_ind == "Date":
pre_x = pre_x - np.min(pre_x)
x = pre_x.reshape((-1, 1))
pre_y = y_df.loc[y_ind].to_numpy()
y = vfunc(pre_y)
model = LinearRegression().fit(x, y)
r_sq = model.score(x, y)
r = abs(r_sq**0.5)
if r > 0.9:
strength = "very strong"
elif r > 0.7:
strength = "strong"
elif r > 0.5:
strength = "moderate"
elif r > 0.3:
strength = "weak"
else:
strength = "very weak"
analy_message = (
f"The correlation between {x_ind.lower()} and {y_ind.lower()}"
f" is {strength} with a correlation coefficient of {r:.4f}."
)
dcf_model.set_cell(
self.ws[1], f"{dcf_static.letters[col]}{row1}", float(model.coef_)
)
dcf_model.set_cell(
self.ws[1],
f"{dcf_static.letters[col+1]}{row1}",
float(model.intercept_),
)
dcf_model.set_cell(
self.ws[1],
f"{dcf_static.letters[col+2]}{row1}",
dcf_static.letters[self.letter],
font=dcf_static.red,
)
dcf_model.set_cell(
self.ws[3],
f"A{self.letter+4}",
dcf_static.letters[self.letter],
font=dcf_static.red,
)
elif pred_type == "growth":
pre_y = y_df.loc[y_ind].to_numpy()
fin_y = []
for item in pre_y:
try:
fin_y.append(float(item))
except TypeError:
fin_y.append(0)
overall_growth = (fin_y[-1] - fin_y[0]) / fin_y[0]
yearly_growth = overall_growth / len(fin_y)
analy_message = (
f"Assuming growth percentage of {(yearly_growth * 100):.2f}%"
)
dcf_model.set_cell(
self.ws[1], f"{dcf_static.letters[col]}{row1}", yearly_growth
)
dcf_model.set_cell(
self.ws[1],
f"{dcf_static.letters[col+2]}{row1}",
dcf_static.letters[self.letter],
font=dcf_static.red,
)
dcf_model.set_cell(self.ws[3], f"B{self.letter+4}", analy_message)
col = self.info["len_data"] + 1
for i in range(self.info["len_pred"]):
if pred_type == "growth":
row_n = (
y_df.index.get_loc(y_ind) + 1 + self.starts["IS"]
if y_type == "IS"
else self.starts["BS"]
)
base = (
f"({dcf_static.letters[col+i-1]}{row1}* (1+"
f"{dcf_static.letters[col+self.info['len_pred']+2]}{row1}))"
)
elif x_ind == "Date":
base = (
f"(({dcf_static.letters[col+i]}4-B4)*"
f"{dcf_static.letters[col+self.info['len_pred']+2]}"
f"{row1})+{dcf_static.letters[col+self.info['len_pred']+3]}{row1}"
)
else:
row_n = (
x_df.index.get_loc(x_ind) + 1 + self.starts["IS"]
if x_type == "IS"
else self.starts["BS"]
)
base = (
f"({dcf_static.letters[col+i]}{row_n}*"
f"{dcf_static.letters[col+self.info['len_pred']+2]}{row1})"
f"+{dcf_static.letters[col+self.info['len_pred']+3]}{row1}"
)
dcf_model.set_cell(
self.ws[1],
f"{dcf_static.letters[col+i]}{row1}",
f"=max({base},0)" if no_neg else f"={base}",
num_form="[$$-409]#,##0.00;[RED]-[$$-409]#,##0.00",
)
self.letter += 1
@log_start_end(log=logger)
def get_sum(
self,
row: Union[int, str],
first: str,
adds: List[str],
subtracts: List[str],
audit: bool = False,
text: str = None,
):
col = 1 if audit else self.info["len_data"] + 1
for i in range(self.info["len_data"] if audit else self.info["len_pred"]):
sum_formula = f"={dcf_static.letters[col+i]}{self.title_to_row(first)}"
for item in adds:
sum_formula += f"+{dcf_static.letters[col+i]}{self.title_to_row(item)}"
for item in subtracts:
sum_formula += f"-{dcf_static.letters[col+i]}{self.title_to_row(item)}"
rowI = row if isinstance(row, int) else self.title_to_row(row)
dcf_model.set_cell(
self.ws[1],
f"{dcf_static.letters[col+i]}{rowI}",
sum_formula,
num_form="[$$-409]#,##0.00;[RED]-[$$-409]#,##0.00",
)
if text:
self.custom_exp(row, text)
@log_start_end(log=logger)
def title_to_row(self, title: str) -> int:
df = (
self.df["IS"]
if title in self.df["IS"].index
else self.df["BS"]
if title in self.df["BS"].index
else self.df["CF"]
)
ind = (
df.index.get_loc(title)
+ 1
+ (
self.starts["IS"]
if title in self.df["IS"].index
else self.starts["BS"]
if title in self.df["BS"].index
else self.starts["CF"]
)
)
return ind
@log_start_end(log=logger)
def custom_exp(
self, row: Union[int, str], text: str, ws: int = 1, column: str = None
):
if ws == 1:
rowT = row if isinstance(row, int) else self.title_to_row(row)
col = self.info["len_pred"] + self.info["len_data"] + 3
dcf_model.set_cell(
self.ws[1],
f"{dcf_static.letters[col+2]}{rowT}",
dcf_static.letters[self.letter],
font=dcf_static.red,
)
if ws == 2:
dcf_model.set_cell(
self.ws[2],
f"{column}{row}",
dcf_static.letters[self.letter],
font=dcf_static.red,
)
dcf_model.set_cell(
self.ws[3],
f"A{self.letter+4}",
dcf_static.letters[self.letter],
font=dcf_static.red,
)
dcf_model.set_cell(self.ws[3], f"B{self.letter+4}", text)
self.letter += 1
@log_start_end(log=logger)
def add_ratios(self):
self.ws[4] = self.wb.create_sheet("Ratios")
self.create_header(self.ws[4])
self.ws[4].column_dimensions["A"].width = 27
dcf_model.set_cell(self.ws[4], "B4", "Sector:")
dcf_model.set_cell(self.ws[4], "C4", self.data["info"]["sector"])
similar_data = dcf_model.get_similar_dfs(
self.info["symbol"],
self.data["info"],
self.info["max_similars"],
self.info["no_filter"],
)
similar_data.insert(
0, [self.info["symbol"], [self.df["BS"], self.df["IS"], self.df["CF"]]]
)
row = 6
for val in similar_data:
self.ws[4].merge_cells(f"A{row}:J{row}")
# Header columns formatted as row index, text, format type
titles = [
[0, val[0], 1],
[1, "Liquidity Ratios", 2],
[2, "Current Ratio", 0],
[3, "Quick Ratio", 0],
[5, "Activity Ratios", 2],
[6, "AR Turnover", 0],
[7, "Days Sales in AR", 0],
[8, "Inventory Turnover", 0],
[9, "Days in Inventory", 0],
[10, "Average Payable Turnover", 0],
[11, "Days of Payables Outstanding", 0],
[12, "Cash Conversion Cycle", 0],
[13, "Asset Turnover", 0],
[15, "Profitability Ratios", 2],
[16, "Profit Margin", 0],
[17, "Return on Assets", 0],
[18, "Return on Equity", 0],
[19, "Return on Sales", 0],
[20, "Gross Margin", 0],
[21, "Operating Cash Flow Ratio", 0],
[23, "Coverage Ratios", 2],
[24, "Debt-to-Equity", 0],
[25, "Total Debt Ratio", 0],
[26, "Equity Multiplier", 0],
[27, "Times Interest Earned", 0],
[29, "Investor Ratios", 2],
[30, "Earnings Per Share", 0],
[31, "Price Earnings Ratio", 0],
]
for j in titles:
if j[2] == 1:
align = dcf_static.center
dcf_model.set_cell(
self.ws[4], f"A{row+j[0]}", j[1], alignment=align
)
elif j[2] == 2:
border = dcf_static.thin_border
font = dcf_static.bold_font
dcf_model.set_cell(
self.ws[4], f"A{row+j[0]}", j[1], border=border, font=font
)
else:
dcf_model.set_cell(self.ws[4], f"A{row+j[0]}", j[1])
for j in range(val[1][0].shape[1] - 1):
lt = dcf_static.letters[j + 1]
cace1 = dcf_model.get_value(val[1][0], "Cash & Cash Equivalents", j)[1]
ar0, ar1 = dcf_model.get_value(val[1][0], "Receivables", j)
inv0, inv1 = dcf_model.get_value(val[1][0], "Inventory", j)
ca1 = dcf_model.get_value(val[1][0], "Total Current Assets", j)[1]
ta0, ta1 = dcf_model.get_value(val[1][0], "Total Assets", j)
ap0, ap1 = dcf_model.get_value(val[1][0], "Accounts Payable", j)
cl1 = dcf_model.get_value(val[1][0], "Total Current Liabilities", j)[1]
tl1 = dcf_model.get_value(val[1][0], "Total Liabilities", j)[1]
te0, te1 = dcf_model.get_value(val[1][0], "Shareholders' Equity", j)
sls1 = dcf_model.get_value(val[1][1], "Revenue", j)[1]
cogs1 = dcf_model.get_value(val[1][1], "Cost of Revenue", j)[1]
inte1 = dcf_model.get_value(val[1][1], "Interest Expense / Income", j)[
1
]
tax1 = dcf_model.get_value(val[1][1], "Income Tax", j)[1]
ni1 = dcf_model.get_value(val[1][1], "Net Income", j)[1]
pdiv1 = dcf_model.get_value(val[1][1], "Preferred Dividends", j)[1]
opcf1 = dcf_model.get_value(val[1][2], "Operating Cash Flow", j)[1]
info, outstand = self.data["info"], float(
self.data["info"]["sharesOutstanding"]
)
# Enter row offset, number to display, and format number
rows = [
[1, int(val[1][0].columns[j + 1]), 1],
[2, dcf_model.frac(ca1, cl1), 0],
[3, dcf_model.frac(cace1 + ar1, cl1), 0],
[6, dcf_model.frac(sls1, (ar0 + ar1) / 2), 0],
[7, dcf_model.frac(ar1, sls1 / 365), 0],
[8, dcf_model.frac(cogs1, (inv0 + inv1) / 2), 0],
[9, dcf_model.frac(inv1, cogs1 / 365), 0],
[10, dcf_model.frac(cogs1, (ap0 + ap1) / 2), 0],
[11, dcf_model.frac(ap1, cogs1 / 365), 0],
[
12,
"N/A"
if sls1 == 0 or cogs1 == 0
else dcf_model.frac(ar1, sls1 / 365)
+ dcf_model.frac(inv1, cogs1 / 365)
- dcf_model.frac(ap1, cogs1 / 365),
0,
],
[13, dcf_model.frac(sls1, (ta0 + ta1) / 2), 0],
[16, dcf_model.frac(ni1, sls1), 0],
[17, dcf_model.frac(ni1, (ar0 + ar1) / 2), 0],
[18, dcf_model.frac(ni1, (te0 + te1) / 2), 0],
[19, dcf_model.frac(ni1 + inte1 + tax1, sls1), 0],
[20, dcf_model.frac(sls1 - cogs1, sls1), 0],
[21, dcf_model.frac(opcf1, cl1), 0],
[24, dcf_model.frac(tl1, te1), 0],
[25, dcf_model.frac(tl1, ta1), 0],
[26, dcf_model.frac(ta1, te1), 0],
[27, dcf_model.frac(ni1 + inte1 + tax1, inte1), 0],
[
30,
dcf_model.frac((ni1 - pdiv1) * self.info["rounding"], outstand),
0,
],
[
31,
dcf_model.frac(
float(info["previousClose"]) * outstand,
(ni1 - pdiv1) * self.info["rounding"],
),
0,
],
]
for k in rows:
if k[2] == 1:
font = dcf_static.bold_font
dcf_model.set_cell(
self.ws[4], f"{lt}{row+k[0]}", k[1], font=font
)
else:
dcf_model.set_cell(self.ws[4], f"{lt}{row+k[0]}", k[1])
row += 35 | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/fundamental_analysis/dcf_view.py | 0.690246 | 0.349033 | dcf_view.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
import requests
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_POLYGON_KEY"])
def get_financials(
symbol: str, statement: str, quarterly: bool = False, ratios: bool = False
) -> pd.DataFrame:
"""Get ticker financial statements from polygon
Parameters
----------
symbol: str
Stock ticker symbol
statement: str
Financial statement data to retrieve, can be balance, income or cash
quarterly:bool
Flag to get quarterly reports, by default False
ratios: bool
Shows percentage change, by default False
Returns
-------
pd.DataFrame
Balance Sheets or Income Statements
"""
# Note the filing date is over 30 years so will always get as many as allowed
json_request = requests.get(
"https://api.polygon.io/vX/reference/financials?"
f"ticker={symbol}"
f"&timeframe={['annual','quarterly'][quarterly]}"
"&limit=100"
"&filing_date.gte=1980-01-01"
f"&apiKey={cfg.API_POLYGON_KEY}"
).json()
if statement == "cash" and quarterly:
console.print(
"[red]Quarterly information not available for statement of cash flows[/red]\n"
)
if statement not in ["balance", "income", "cash"]:
console.print("financial must be 'balance' or 'income'.\n")
return pd.DataFrame()
if json_request["status"] == "ERROR":
console.print(json_request["status"])
return pd.DataFrame()
all_results = json_request["results"]
if len(all_results) == 0:
console.print("No financials found from Polygon.\n")
return pd.DataFrame()
balance_sheets = pd.DataFrame()
income_statements = pd.DataFrame()
cash_flows = pd.DataFrame()
first = True
for single_thing in all_results:
if first:
balance_sheets = pd.concat(
[
pd.DataFrame(),
pd.DataFrame.from_dict(
single_thing["financials"]["balance_sheet"], orient="index"
),
],
axis=1,
)
balance_sheets = balance_sheets[["value"]]
balance_sheets.columns = [single_thing["filing_date"]]
income_statements = pd.concat(
[
pd.DataFrame(),
pd.DataFrame.from_dict(
single_thing["financials"]["income_statement"], orient="index"
),
],
axis=1,
)
income_statements = income_statements[["value"]]
income_statements.columns = [single_thing["filing_date"]]
if not quarterly:
cash_flows = pd.concat(
[
pd.DataFrame(),
pd.DataFrame.from_dict(
single_thing["financials"]["cash_flow_statement"],
orient="index",
),
],
axis=1,
)
cash_flows = cash_flows[["value"]]
cash_flows.columns = [single_thing["filing_date"]]
first = False
else:
values = pd.DataFrame(
pd.DataFrame.from_dict(
single_thing["financials"]["balance_sheet"], orient="index"
).value
)
values.columns = [single_thing["filing_date"]]
balance_sheets = pd.concat([balance_sheets, values], axis=1)
values = pd.DataFrame(
pd.DataFrame.from_dict(
single_thing["financials"]["income_statement"], orient="index"
).value
)
values.columns = [single_thing["filing_date"]]
income_statements = pd.concat([income_statements, values], axis=1)
if not quarterly:
values = pd.DataFrame(
pd.DataFrame.from_dict(
single_thing["financials"]["cash_flow_statement"],
orient="index",
).value
)
values.columns = [single_thing["filing_date"]]
cash_flows = pd.concat([cash_flows, values], axis=1)
if statement == "balance":
df_fa = balance_sheets
elif statement == "income":
df_fa = income_statements
elif statement == "cash":
df_fa = cash_flows
else:
return pd.DataFrame()
if ratios:
types = df_fa.copy().applymap(lambda x: isinstance(x, (float, int)))
types = types.all(axis=1)
# For rows with complete data
valid = []
i = 0
for row in types:
if row:
valid.append(i)
i += 1
df_fa_pc = df_fa.iloc[valid].pct_change(axis="columns", periods=-1).fillna(0)
j = 0
for i in valid:
df_fa.iloc[i] = df_fa_pc.iloc[j]
j += 1
return df_fa | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/fundamental_analysis/polygon_model.py | 0.562657 | 0.272064 | polygon_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
from matplotlib import pyplot as plt
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.helper_funcs import (
export_data,
print_rich_table,
plot_autoscale,
camel_case_split,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.fundamental_analysis import av_model
from openbb_terminal.helpers_denomination import (
transform as transform_by_denomination,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_overview(symbol: str):
"""Alpha Vantage stock ticker overview
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
"""
df_fa = av_model.get_overview(symbol)
if df_fa.empty:
console.print("No API calls left. Try me later", "\n")
return
print_rich_table(
df_fa.drop(index=["Description"]),
headers=[""],
title=f"{symbol} Overview",
show_index=True,
)
console.print(f"Company Description:\n\n{df_fa.loc['Description'][0]}")
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_key(symbol: str, export: str = ""):
"""Alpha Vantage key metrics
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
"""
df_key = av_model.get_key_metrics(symbol)
if df_key.empty:
return
print_rich_table(
df_key, headers=[""], title=f"{symbol} Key Metrics", show_index=True
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "key", df_key)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_income_statement(
symbol: str,
limit: int = 5,
quarterly: bool = False,
ratios: bool = False,
plot: list = None,
export: str = "",
):
"""Alpha Vantage income statement
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
limit: int
Number of past statements, by default 5
quarterly: bool
Flag to get quarterly instead of annual, by default False
ratios: bool
Shows percentage change, by default False
plot: list
List of row labels to plot
export: str
Format to export data
"""
df_income = av_model.get_income_statements(
symbol, limit, quarterly, ratios, bool(plot)
)
if df_income.empty:
return
if plot:
rows_plot = len(plot)
income_plot_data = df_income.transpose()
income_plot_data.columns = income_plot_data.columns.str.lower()
if not ratios:
(df_rounded, denomination) = transform_by_denomination(income_plot_data)
if denomination == "Units":
denomination = ""
else:
df_rounded = income_plot_data
denomination = ""
if rows_plot == 1:
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
df_rounded[plot[0].replace("_", "")].plot()
title = (
f"{plot[0].replace('_', ' ').lower()} {'QoQ' if quarterly else 'YoY'} Growth of {symbol.upper()}"
if ratios
else f"{plot[0].replace('_', ' ')} of {symbol.upper()} {denomination}"
)
plt.title(title)
theme.style_primary_axis(ax)
theme.visualize_output()
else:
fig, axes = plt.subplots(rows_plot)
for i in range(rows_plot):
axes[i].plot(df_rounded[plot[i].replace("_", "")])
axes[i].set_title(f"{plot[i].replace('_', ' ')} {denomination}")
theme.style_primary_axis(axes[0])
fig.autofmt_xdate()
else:
indexes = df_income.index
new_indexes = [camel_case_split(ind) for ind in indexes]
df_income.index = new_indexes
print_rich_table(
df_income,
headers=list(df_income.columns),
title=f"{symbol} Income Statement"
if not ratios
else f"{'QoQ' if quarterly else 'YoY'} Change of {symbol} Income Statement",
show_index=True,
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "income", df_income)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_balance_sheet(
symbol: str,
limit: int = 5,
quarterly: bool = False,
ratios: bool = False,
plot: list = None,
export: str = "",
):
"""Alpha Vantage balance sheet statement
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
limit: int
Number of past statements, by default 5
quarterly: bool
Flag to get quarterly instead of annual, by default False
ratios: bool
Shows percentage change, by default False
plot: list
List of row labels to plot
export: str
Format to export data
"""
df_balance = av_model.get_balance_sheet(
symbol, limit, quarterly, ratios, bool(plot)
)
if df_balance.empty:
return
if plot:
rows_plot = len(plot)
balance_plot_data = df_balance.transpose()
balance_plot_data.columns = balance_plot_data.columns.str.lower()
if not ratios:
(df_rounded, denomination) = transform_by_denomination(balance_plot_data)
if denomination == "Units":
denomination = ""
else:
df_rounded = balance_plot_data
denomination = ""
if rows_plot == 1:
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
df_rounded[plot[0].replace("_", "")].plot()
title = (
f"{plot[0].replace('_', ' ').lower()} {'QoQ' if quarterly else 'YoY'} Growth of {symbol.upper()}"
if ratios
else f"{plot[0].replace('_', ' ')} of {symbol.upper()} {denomination}"
)
plt.title(title)
theme.style_primary_axis(ax)
theme.visualize_output()
else:
fig, axes = plt.subplots(rows_plot)
for i in range(rows_plot):
axes[i].plot(df_rounded[plot[i].replace("_", "")])
axes[i].set_title(f"{plot[i].replace('_', ' ')} {denomination}")
theme.style_primary_axis(axes[0])
fig.autofmt_xdate()
else:
indexes = df_balance.index
new_indexes = [camel_case_split(ind) for ind in indexes]
df_balance.index = new_indexes
print_rich_table(
df_balance,
headers=list(df_balance.columns),
title=f"{symbol} Balance Sheet"
if not ratios
else f"{'QoQ' if quarterly else 'YoY'} Change of {symbol} Balance Sheet",
show_index=True,
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "balance", df_balance
)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_cash_flow(
symbol: str,
limit: int = 5,
quarterly: bool = False,
ratios: bool = False,
plot: list = None,
export: str = "",
):
"""Alpha Vantage income statement
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
limit: int
Number of past statements, by default 5
quarterly: bool
Flag to get quarterly instead of annual, by default False
ratios: bool
Shows percentage change, by default False
plot: list
List of row labels to plot
export: str
Format to export data
"""
df_cash = av_model.get_cash_flow(symbol, limit, quarterly, ratios, bool(plot))
if df_cash.empty:
return
if plot:
rows_plot = len(plot)
cash_plot_data = df_cash.transpose()
cash_plot_data.columns = cash_plot_data.columns.str.lower()
if not ratios:
(df_rounded, denomination) = transform_by_denomination(cash_plot_data)
if denomination == "Units":
denomination = ""
else:
df_rounded = cash_plot_data
denomination = ""
if rows_plot == 1:
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
df_rounded[plot[0].replace("_", "")].plot()
title = (
f"{plot[0].replace('_', ' ').lower()} {'QoQ' if quarterly else 'YoY'} Growth of {symbol.upper()}"
if ratios
else f"{plot[0].replace('_', ' ')} of {symbol.upper()} {denomination}"
)
plt.title(title)
theme.style_primary_axis(ax)
theme.visualize_output()
else:
fig, axes = plt.subplots(rows_plot)
for i in range(rows_plot):
axes[i].plot(df_rounded[plot[i].replace("_", "")])
axes[i].set_title(f"{plot[i].replace('_', ' ')} {denomination}")
theme.style_primary_axis(axes[0])
fig.autofmt_xdate()
else:
indexes = df_cash.index
new_indexes = [camel_case_split(ind) for ind in indexes]
df_cash.index = new_indexes
print_rich_table(
df_cash,
headers=list(df_cash.columns),
title=f"{symbol} Cash flow"
if not ratios
else f"{'QoQ' if quarterly else 'YoY'} Change of {symbol} Cash flow",
show_index=True,
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "cash", df_cash)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_earnings(
symbol: str, limit: int = 5, quarterly: bool = False, export: str = ""
):
"""Alpha Vantage earnings
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
limit:int
Number of events to show
quarterly: bool
Flag to show quarterly instead of annual
export: str
Format to export data
"""
df_fa = av_model.get_earnings(symbol, quarterly)
if df_fa.empty:
return
print_rich_table(
df_fa.head(limit),
headers=list(df_fa.columns),
show_index=False,
title=f"{symbol} Earnings",
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "earnings", df_fa)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_fraud(
symbol: str,
export: str = "",
help_text: bool = False,
color: bool = True,
detail: bool = False,
):
"""Fraud indicators for given ticker
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
export : str
Whether to export the dupont breakdown
help_text : bool
Whether to show help text
color : bool
Whether to show color in the dataframe
detail : bool
Whether to show the details for the mscore
"""
df = av_model.get_fraud_ratios(symbol, detail=detail)
if df.empty:
return
df_color = df.copy()
if color:
for column in df_color:
df_color[column] = df_color[column].astype(str)
df_color = df_color.apply(lambda x: av_model.replace_df(x.name, x), axis=1)
print_rich_table(
df_color,
headers=list(df_color.columns),
show_index=True,
title="Fraud Risk Statistics",
)
help_message = """
MSCORE:
An mscore above -1.78 indicates a high risk of fraud, and one above -2.22 indicates a medium risk of fraud.
ZSCORE:
A zscore less than 0.5 indicates a high risk of fraud.
Mckee:
A mckee less than 0.5 indicates a high risk of fraud.
"""
if help_text:
console.print(help_message)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "dupont", df)
return
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_dupont(
symbol: str,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Shows the extended dupont ratio
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
raw : str
Show raw data instead of a graph
export : bool
Whether to export the dupont breakdown
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = av_model.get_dupont(symbol)
if df.empty:
return
if raw:
print_rich_table(
df, headers=list(df.columns), show_index=True, title="Extended Dupont"
)
return
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
colors = theme.get_colors()
df.transpose().plot(kind="line", ax=ax, color=colors)
ax.set_title("Extended Dupont by Year")
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "dupont", df) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/fundamental_analysis/av_view.py | 0.766818 | 0.278643 | av_view.py | pypi |
__docformat__ = "numpy"
import logging
from datetime import datetime, timedelta
from typing import Optional, Tuple
from urllib.request import Request, urlopen
import re
import ssl
import numpy as np
import pandas as pd
import yfinance as yf
from bs4 import BeautifulSoup
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import lambda_long_number_format
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.fundamental_analysis.fa_helper import clean_df_index
from openbb_terminal.helpers_denomination import (
transform as transform_by_denomination,
)
logger = logging.getLogger(__name__)
# pylint: disable=W0212
ssl._create_default_https_context = ssl._create_unverified_context
@log_start_end(log=logger)
def get_info(symbol: str) -> pd.DataFrame:
"""Gets ticker symbol info
Parameters
----------
symbol: str
Stock ticker symbol
Returns
-------
pd.DataFrame
DataFrame of yfinance information
"""
stock = yf.Ticker(symbol)
df_info = pd.DataFrame(stock.info.items(), columns=["Metric", "Value"])
df_info = df_info.set_index("Metric")
clean_df_index(df_info)
if "Last split date" in df_info.index and df_info.loc["Last split date"].values[0]:
df_info.loc["Last split date"].values[0] = datetime.fromtimestamp(
df_info.loc["Last split date"].values[0]
).strftime("%Y-%m-%d")
df_info = df_info.mask(df_info["Value"].astype(str).eq("[]")).dropna()
df_info[df_info.index != "Zip"] = df_info[df_info.index != "Zip"].applymap(
lambda x: lambda_long_number_format(x)
)
df_info = df_info.rename(
index={
"Address1": "Address",
"Average daily volume10 day": "Average daily volume 10 day",
"Average volume10days": "Average volume 10 days",
"Price to sales trailing12 months": "Price to sales trailing 12 months",
}
)
df_info.index = df_info.index.str.replace("eps", "EPS")
df_info.index = df_info.index.str.replace("p e", "PE")
df_info.index = df_info.index.str.replace("Peg", "PEG")
return df_info
@log_start_end(log=logger)
def get_shareholders(symbol: str, holder: str = "institutional") -> pd.DataFrame:
"""Get shareholders from yahoo
Parameters
----------
symbol : str
Stock ticker symbol
holder : str
Which holder to get table for
Returns
-------
pd.DataFrame
Major holders
"""
stock = yf.Ticker(symbol)
# Major holders
df_major_holders = stock.major_holders
df_major_holders[1] = df_major_holders[1].apply(
lambda x: x.replace("%", "Percentage")
)
# Institutional holders
df_institutional_shareholders = stock.institutional_holders
df_institutional_shareholders.columns = (
df_institutional_shareholders.columns.str.replace("% Out", "Stake")
)
df_institutional_shareholders["Shares"] = df_institutional_shareholders[
"Shares"
].apply(lambda x: lambda_long_number_format(x))
df_institutional_shareholders["Value"] = df_institutional_shareholders[
"Value"
].apply(lambda x: lambda_long_number_format(x))
df_institutional_shareholders["Stake"] = df_institutional_shareholders[
"Stake"
].apply(lambda x: str(f"{100 * x:.2f}") + " %")
# Mutualfunds holders
df_mutualfund_shareholders = stock.mutualfund_holders
df_mutualfund_shareholders.columns = df_mutualfund_shareholders.columns.str.replace(
"% Out", "Stake"
)
df_mutualfund_shareholders["Shares"] = df_mutualfund_shareholders["Shares"].apply(
lambda x: lambda_long_number_format(x)
)
df_mutualfund_shareholders["Value"] = df_mutualfund_shareholders["Value"].apply(
lambda x: lambda_long_number_format(x)
)
df_mutualfund_shareholders["Stake"] = df_mutualfund_shareholders["Stake"].apply(
lambda x: str(f"{100 * x:.2f}") + " %"
)
if holder == "major":
return df_major_holders
if holder == "institutional":
return df_institutional_shareholders
if holder == "mutualfund":
return df_mutualfund_shareholders
return pd.DataFrame()
@log_start_end(log=logger)
def get_sustainability(symbol: str) -> pd.DataFrame:
"""Get sustainability metrics from yahoo
Parameters
----------
symbol : str
Stock ticker symbol
Returns
-------
pd.DataFrame
Dataframe of sustainability metrics
"""
stock = yf.Ticker(symbol)
pd.set_option("display.max_colwidth", None)
df_sustainability = stock.sustainability
if df_sustainability is None or df_sustainability.empty:
return pd.DataFrame()
clean_df_index(df_sustainability)
df_sustainability = df_sustainability.rename(
index={
"Controversialweapons": "Controversial Weapons",
"Socialpercentile": "Social Percentile",
"Peercount": "Peer Count",
"Governancescore": "Governance Score",
"Environmentpercentile": "Environment Percentile",
"Animaltesting": "Animal Testing",
"Highestcontroversy": "Highest Controversy",
"Environmentscore": "Environment Score",
"Governancepercentile": "Governance Percentile",
"Militarycontract": "Military Contract",
}
)
return df_sustainability
@log_start_end(log=logger)
def get_calendar_earnings(symbol: str) -> pd.DataFrame:
"""Get calendar earnings for ticker symbol
Parameters
----------
symbol: str
Stock ticker symbol
Returns
-------
pd.DataFrame
Dataframe of calendar earnings
"""
stock = yf.Ticker(symbol)
df_calendar = stock.calendar
if df_calendar.empty:
return pd.DataFrame()
df_calendar.iloc[0, :] = df_calendar.iloc[0, :].apply(
lambda x: x.date().strftime("%m/%d/%Y")
)
df_calendar.iloc[1:, :] = df_calendar.iloc[1:, :].applymap(
lambda x: lambda_long_number_format(x)
)
return df_calendar.T
@log_start_end(log=logger)
def get_website(symbol: str) -> str:
"""Gets website of company from yfinance
Parameters
----------
symbol: str
Stock ticker symbol
Returns
-------
str
Company website"""
stock = yf.Ticker(symbol)
df_info = pd.DataFrame(stock.info.items(), columns=["Metric", "Value"])
return df_info[df_info["Metric"] == "website"]["Value"].values[0]
@log_start_end(log=logger)
def get_hq(symbol: str) -> str:
"""Gets google map url for headquarter
Parameters
----------
symbol: str
Stock ticker symbol
Returns
-------
str
Headquarter google maps url
"""
stock = yf.Ticker(symbol)
df_info = pd.DataFrame(stock.info.items(), columns=["Metric", "Value"])
df_info = df_info.set_index("Metric")
maps = "https://www.google.com/maps/search/"
for field in ["address1", "address2", "city", "state", "zip", "country"]:
if field in df_info.index:
maps += (
df_info[df_info.index == field]["Value"].values[0].replace(" ", "+")
+ ","
)
return maps[:-1]
@log_start_end(log=logger)
def get_dividends(symbol: str) -> pd.DataFrame:
"""Get historical dividend for ticker
Parameters
----------
symbol: str
Ticker symbol to get dividend for
Returns
-------
pd.DataFrame
Dataframe of dividends and dates
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.fa.divs("AAPL")
"""
df = pd.DataFrame(yf.Ticker(symbol).dividends)
if df.empty:
console.print("No dividends found.\n")
return pd.DataFrame()
df["Change"] = df.diff()
df = df[::-1]
return df
@log_start_end(log=logger)
def get_mktcap(
symbol: str,
start_date: Optional[str] = None,
) -> Tuple[pd.DataFrame, str]:
"""Get market cap over time for ticker. [Source: Yahoo Finance]
Parameters
----------
symbol: str
Ticker to get market cap over time
start_date: Optional[str]
Initial date (e.g., 2021-10-01). Defaults to 3 years back
Returns
-------
pd.DataFrame
Dataframe of estimated market cap over time
str:
Currency of ticker
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=3 * 366)).strftime("%Y-%m-%d")
currency = ""
df_data = yf.download(symbol, start=start_date, progress=False, threads=False)
if not df_data.empty:
data = yf.Ticker(symbol).info
if data:
df_data["Adj Close"] = df_data["Adj Close"] * data["sharesOutstanding"]
df_data = df_data["Adj Close"]
currency = data["currency"]
return df_data, currency
@log_start_end(log=logger)
def get_splits(symbol: str) -> pd.DataFrame:
"""Get splits and reverse splits events. [Source: Yahoo Finance]
Parameters
----------
symbol: str
Ticker to get forward and reverse splits
Returns
-------
pd.DataFrame
Dataframe of forward and reverse splits
"""
data = yf.Ticker(symbol).splits
if not data.empty:
return data.to_frame()
return pd.DataFrame()
@log_start_end(log=logger)
def get_financials(symbol: str, statement: str, ratios: bool = False) -> pd.DataFrame:
"""Get cashflow statement for company
Parameters
----------
symbol : str
Stock ticker symbol
statement: str
can be:
- cash-flow
- financials for Income
- balance-sheet
ratios: bool
Shows percentage change
Returns
-------
pd.DataFrame
Dataframe of Financial statement
"""
url = (
"https://uk.finance.yahoo.com/quote/"
+ symbol
+ "/"
+ statement
+ "?p="
+ symbol
)
# Making the website believe that you are accessing it using a Mozilla browser
req = Request(url, headers={"User-Agent": "Mozilla/5.0"})
webpage = urlopen(req).read() # pylint: disable= R1732
soup = BeautifulSoup(webpage, "html.parser")
features = soup.find_all("div", class_="D(tbr)")
headers = []
temp_list = []
final = []
if len(features) == 0:
return console.print("No data found in Yahoo Finance\n")
index = 0 # create headers
for item in features[0].find_all("div", class_="D(ib)"):
headers.append(item.text) # statement contents
while index <= len(features) - 1:
# filter for each line of the statement
temp = features[index].find_all("div", class_="D(tbc)")
for line in temp:
# each item adding to a temporary list
temp_list.append(line.text)
# temp_list added to final list
final.append(temp_list)
# clear temp_list
temp_list = []
index += 1
df = pd.DataFrame(final[1:])
if df.empty:
return pd.DataFrame()
new_headers = []
if statement == "balance-sheet":
for dates in headers[1:]:
read = datetime.strptime(dates, "%d/%m/%Y")
write = read.strftime("%Y-%m-%d")
new_headers.append(write)
new_headers[:0] = ["Breakdown"]
df.columns = new_headers
df.set_index("Breakdown", inplace=True)
elif statement == "financials":
for dates in headers[2:]:
read = datetime.strptime(dates, "%d/%m/%Y")
write = read.strftime("%Y-%m-%d")
new_headers.append(write)
new_headers[:0] = ["Breakdown", "ttm"]
df.columns = new_headers
df.set_index("Breakdown", inplace=True)
elif statement == "cash-flow":
for dates in headers[2:]:
read = datetime.strptime(dates, "%d/%m/%Y")
write = read.strftime("%Y-%m-%d")
new_headers.append(write)
new_headers[:0] = ["Breakdown", "ttm"]
df.columns = new_headers
df.set_index("Breakdown", inplace=True)
df.replace("", np.nan, inplace=True)
df.replace("-", np.nan, inplace=True)
df = df.dropna(how="all")
df = df.replace(",", "", regex=True)
df = df.replace("k", "", regex=True)
df = df.astype("float")
# Data except EPS is returned in thousands, convert it
(df, _) = transform_by_denomination(
df,
"Thousands",
"Units",
axis=1,
skipPredicate=lambda row: re.search("eps", row.name, re.IGNORECASE) is not None,
)
if ratios:
types = df.copy().applymap(lambda x: isinstance(x, (float, int)))
types = types.all(axis=1)
# For rows with complete data
valid = []
i = 0
for row in types:
if row:
valid.append(i)
i += 1
df_fa_pc = df.iloc[valid].pct_change(axis="columns", periods=-1).fillna(0)
j = 0
for i in valid:
df.iloc[i] = df_fa_pc.iloc[j]
j += 1
return df
@log_start_end(log=logger)
def get_earnings_history(symbol: str) -> pd.DataFrame:
"""Get earning reports
Parameters
----------
symbol: str
Symbol to get earnings for
Returns
-------
pd.DataFrame
Dataframe of historical earnings if present
"""
earnings = yf.Ticker(symbol).earnings_history
return earnings
@log_start_end(log=logger)
def get_currency(symbol) -> str:
"""Quick helper to get currency for financial statements"""
ticker_info = yf.Ticker(symbol).info
if "financialCurrency" in ticker_info:
return ticker_info["financialCurrency"]
return "Not Specified" | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/fundamental_analysis/yahoo_finance_model.py | 0.811863 | 0.276404 | yahoo_finance_model.py | pypi |
__docformat__ = "numpy"
import pandas as pd
from openbb_terminal.stocks.fundamental_analysis import (
yahoo_finance_model,
polygon_model,
av_model,
fmp_model,
eodhd_model,
)
def get_income_statement(
symbol: str,
quarterly: bool = False,
ratios: bool = False,
source: str = "YahooFinance",
limit: int = 10,
) -> pd.DataFrame:
"""Get income statement.
Parameters
----------
symbol : str
Symbol to get income statement for
source : str, optional
Data source for income statement, by default "YahooFinance"
quarterly : bool, optional
Flag to get quarterly data
ratios : bool, optional
Flag to return data as a percent change.
limit : int
Number of statements to return (free tiers may be limited to 5 years)
Returns
-------
pd.DataFrame
Dataframe of income statement
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> income_statement = openbb.stocks.fa.income("AAPL", source="YahooFinance)
If you have a premium AlphaVantage key, you can use the quarterly flag to get quarterly statements
>>> quarterly_income_statement = openbb.stocks.fa.income("AAPL", source="AlphaVantage", quarterly=True)
"""
if source == "YahooFinance":
if quarterly:
print(
"Quarterly income statement not available from Yahoo Finance. Returning annual"
)
df = yahoo_finance_model.get_financials(
symbol=symbol, statement="financials", ratios=ratios
)
return df
if source == "AlphaVantage":
df = av_model.get_income_statements(
symbol=symbol, quarterly=quarterly, ratios=ratios, limit=limit
)
return df
if source == "FinancialModelingPrep":
df = fmp_model.get_income(
symbol=symbol, limit=limit, quarterly=quarterly, ratios=ratios
)
return df
if source == "Polygon":
df = polygon_model.get_financials(symbol, "income", quarterly, ratios)
return df
if source == "EODHD":
df = eodhd_model.get_financials(symbol, "income", quarterly, ratios)
return df
return pd.DataFrame()
def get_balance_sheet(
symbol: str,
quarterly: bool = False,
ratios: bool = False,
source: str = "YahooFinance",
limit: int = 10,
) -> pd.DataFrame:
"""Get balance sheet.
Parameters
----------
symbol : str
Symbol to get balance sheet for
source : str, optional
Data source for balance sheet, by default "YahooFinance"
quarterly : bool, optional
Flag to get quarterly data
ratios : bool, optional
Flag to return data as a percent change.
limit : int
Number of statements to return (free tiers may be limited to 5 years)
Returns
-------
pd.DataFrame
Dataframe of balance sheet
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> balance_sheet = openbb.stocks.fa.balance("AAPL", source="YahooFinance)
If you have a premium AlphaVantage key, you can use the quarterly flag to get quarterly statements
>>> quarterly_income_statement = openbb.stocks.fa.balance("AAPL", source="AlphaVantage", quarterly=True)
"""
if source == "YahooFinance":
if quarterly:
print(
"Quarterly statements not available from Yahoo Finance. Returning annual"
)
df = yahoo_finance_model.get_financials(
symbol=symbol, statement="balance-sheet", ratios=ratios
)
return df
if source == "AlphaVantage":
df = av_model.get_balance_sheet(
symbol=symbol, quarterly=quarterly, ratios=ratios, limit=limit
)
return df
if source == "FinancialModelingPrep":
df = fmp_model.get_balance(
symbol=symbol, limit=limit, quarterly=quarterly, ratios=ratios
)
return df
if source == "Polygon":
df = polygon_model.get_financials(symbol, "balance", quarterly, ratios)
return df
if source == "EODHD":
df = eodhd_model.get_financials(symbol, "balance", quarterly, ratios)
return df
return pd.DataFrame()
def get_cash_flow(
symbol: str,
quarterly: bool = False,
ratios: bool = False,
source: str = "YahooFinance",
limit: int = 10,
) -> pd.DataFrame:
"""Get Cash Flow.
Parameters
----------
symbol : str
Symbol to get cash flow for
source : str, optional
Data source for cash flow, by default "YahooFinance"
quarterly : bool, optional
Flag to get quarterly data
ratios : bool, optional
Flag to return data as a percent change.
limit : int
Number of statements to return (free tiers may be limited to 5 years)
Returns
-------
pd.DataFrame
Dataframe of cash flow
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> cash_flow = openbb.stocks.fa.cash("AAPL", source="YahooFinance)
If you have a premium AlphaVantage key, you can use the quarterly flag to get quarterly statements
>>> quarterly_income_statement = openbb.stocks.fa.cash("AAPL", source="AlphaVantage", quarterly=True)
"""
if source == "YahooFinance":
if quarterly:
print(
"Quarterly statements not available from Yahoo Finance. Returning annual"
)
df = yahoo_finance_model.get_financials(
symbol=symbol, statement="cash-flow", ratios=ratios
)
return df
if source == "AlphaVantage":
df = av_model.get_cash_flow(
symbol=symbol, quarterly=quarterly, ratios=ratios, limit=limit
)
return df
if source == "FinancialModelingPrep":
df = fmp_model.get_cash(
symbol=symbol, limit=limit, quarterly=quarterly, ratios=ratios
)
return df
if source == "Polygon":
df = polygon_model.get_financials(symbol, "cash", quarterly, ratios)
return df
if source == "EODHD":
df = eodhd_model.get_financials(symbol, "cash", quarterly, ratios)
return df
return pd.DataFrame()
def earnings(
symbol: str, source: str = "YahooFinance", quarterly: bool = False
) -> pd.DataFrame:
"""Get earnings data.
Parameters
----------
symbol : str
Stock ticker
source : str, optional
Source to use, by default "AlphaVantage"
quarterly : bool, optional
Flag to get quarterly data (AlphaVantage only), by default False.
Returns
-------
pd.DataFrame
Dataframe of earnings
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> aapl_earnings = openbb.stocks.fa.earnings("AAPL", source ="YahooFinance)
To obtain quarterly earnings, use the quarterly flag with AlphaVantage
>>> aapl_earnings = openbb.stocks.fa.metrics("earnings", source ="AlphaVantage, quarterly=True)
"""
if source == "YahooFinance":
df = yahoo_finance_model.get_earnings_history(symbol)
return df
if source == "AlphaVantage":
df = av_model.get_earnings(symbol, quarterly)
return df
return pd.DataFrame() | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/fundamental_analysis/sdk_helpers.py | 0.825976 | 0.418935 | sdk_helpers.py | pypi |
__docformat__ = "numpy"
import logging
import os
from io import BytesIO
from pathlib import Path
from typing import Any, Dict, List, Tuple, Union
from urllib.request import urlopen
from zipfile import ZipFile
import financedatabase as fd
import pandas as pd
import requests
import yfinance as yf
from bs4 import BeautifulSoup
from openpyxl import worksheet
from sklearn.linear_model import LinearRegression
from openbb_terminal.decorators import log_start_end
from openbb_terminal.stocks.fundamental_analysis import dcf_static
from openbb_terminal.helper_funcs import compose_export_path
logger = logging.getLogger(__name__)
CURRENCIES = [
"ALL",
"AFN",
"ARS",
"AWG",
"AUD",
"AZN",
"BSD",
"BBD",
"BYN",
"BZD",
"BMD",
"BOB",
"BAM",
"BWP",
"BGN",
"BRL",
"BND",
"KHR",
"CAD",
"KYD",
"CLP",
"CNY",
"COP",
"CRC",
"HRK",
"CUP",
"CZK",
"DKK",
"DOP",
"XCD",
"EGP",
"SVC",
"EUR",
"FKP",
"FJD",
"GHS",
"GIP",
"GTQ",
"GGP",
"GYD",
"HNL",
"HKD",
"HUF",
"ISK",
"INR",
"IDR",
"IRR",
"IMP",
"ILS",
"JMD",
"JPY",
"JEP",
"KZT",
"KPW",
"KRW",
"KGS",
"LAK",
"LBP",
"LRD",
"MKD",
"MYR",
"MUR",
"MXN",
"MNT",
"MNT",
"MZN",
"NAD",
"NPR",
"ANG",
"NZD",
"NIO",
"NGN",
"NOK",
"OMR",
"PKR",
"PAB",
"PYG",
"PEN",
"PHP",
"PLN",
"QAR",
"RON",
"RUB",
"SHP",
"SAR",
"RSD",
"SCR",
"SGD",
"SBD",
"SOS",
"KRW",
"ZAR",
"LKR",
"SEK",
"CHF",
"SRD",
"SYP",
"TWD",
"THB",
"TTD",
"TRY",
"TVD",
"UAH",
"AED",
"GBP",
"USD",
"UYU",
"UZS",
"VEF",
"VND",
"YER",
"ZWD",
]
@log_start_end(log=logger)
def string_float(string: str) -> float:
"""Convert a string to a float
Parameters
----------
string : str
String to be converted
Returns
-------
number : float
Analysis of filings text
"""
if string.strip().replace(",", "").replace("-", "") == "":
return 0
return float(string.strip().replace(",", "").replace("-", ""))
def insert_row(
name: str, index: str, df: pd.DataFrame, row_v: List[str]
) -> pd.DataFrame:
"""Allows a row to be added given an index and name
Parameters
----------
name : str
Name to be added to df
index : str
The row the new item will go after
df : pd.DataFrame
The dataframe to be modified
row_v : List[str]
The items to be added to the row
Returns
-------
new_df : pd.DataFrame
The new dataframe
"""
pd.options.mode.chained_assignment = None
if name not in df.index:
row_number = df.index.get_loc(index) + 1
df1 = df[0:row_number]
df2 = df[row_number:]
df1.loc[name] = row_v
df_result = pd.concat([df1, df2])
return df_result
return df
@log_start_end(log=logger)
def set_cell(
ws: worksheet,
cell: str,
text: Union[int, str, float] = None,
font: str = None,
border: str = None,
fill: str = None,
alignment: str = None,
num_form: str = None,
):
"""Set the value for a cell
Parameters
----------
ws : worksheet
The worksheet to be modified
cell : str
The cell that will be modified
text : Union[int, str, float]
The new value of the cell
font : str
The type of font
border : str
The type of border
fill : str
The type of fill
alignment : str
The type of alignment
num_form : str
The format for numbers
"""
if text:
ws[cell] = text
if font:
ws[cell].font = font
if border:
ws[cell].border = border
if fill:
ws[cell].fill = fill
if alignment:
ws[cell].alignment = alignment
if num_form:
ws[cell].number_format = num_form
@log_start_end(log=logger)
def get_fama_raw() -> pd.DataFrame:
"""Get Fama French data
Returns
-------
df : pd.DataFrame
Fama French data
"""
with urlopen( # nosec
"https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/F-F_Research_Data_Factors_CSV.zip"
) as url:
# Download Zipfile and create pandas DataFrame
with ZipFile(BytesIO(url.read())) as zipfile:
with zipfile.open("F-F_Research_Data_Factors.CSV") as zip_open:
df = pd.read_csv(
zip_open,
header=0,
names=["Date", "MKT-RF", "SMB", "HML", "RF"],
skiprows=3,
)
df = df[df["Date"].apply(lambda x: len(str(x).strip()) == 6)]
df["Date"] = df["Date"].astype(str) + "01"
df["Date"] = pd.to_datetime(df["Date"], format="%Y%m%d")
df["MKT-RF"] = pd.to_numeric(df["MKT-RF"], downcast="float")
df["SMB"] = pd.to_numeric(df["SMB"], downcast="float")
df["HML"] = pd.to_numeric(df["HML"], downcast="float")
df["RF"] = pd.to_numeric(df["RF"], downcast="float")
df["MKT-RF"] = df["MKT-RF"] / 100
df["SMB"] = df["SMB"] / 100
df["HML"] = df["HML"] / 100
df["RF"] = df["RF"] / 100
df = df.set_index("Date")
return df
@log_start_end(log=logger)
def get_historical_5(symbol: str) -> pd.DataFrame:
"""Get 5 year monthly historical performance for a ticker with dividends filtered
Parameters
----------
symbol: str
The ticker symbol to be analyzed
Returns
-------
df: pd.DataFrame
Historical data
"""
tick = yf.Ticker(symbol)
df = tick.history(period="5y", interval="1mo")
df = df[df.index.to_series().apply(lambda x: x.day == 1)]
df = df.drop(["Dividends", "Stock Splits"], axis=1)
df = df.dropna()
df.index = [d.replace(tzinfo=None) for d in df.index]
return df
@log_start_end(log=logger)
def get_fama_coe(symbol: str) -> float:
"""Use Fama and French to get the cost of equity for a company
Parameters
----------
symbol : str
The ticker symbol to be analyzed
Returns
-------
coef : float
The stock's Fama French coefficient
"""
df_f = get_fama_raw()
df_h = get_historical_5(symbol)
df = df_h.join(df_f)
df = df.dropna()
df["Monthly Return"] = df["Close"].pct_change()
df["Excess Monthly Return"] = df["Monthly Return"] - df["RF"]
df = df.dropna()
x = df[["MKT-RF", "SMB", "HML"]]
y = df["Excess Monthly Return"]
model = LinearRegression().fit(x, y)
coefs = model.coef_
return (
df["RF"].mean()
+ coefs[0] * df["MKT-RF"].mean()
+ coefs[1] * df["SMB"].mean()
+ coefs[2] * df["HML"].mean()
) * 12
@log_start_end(log=logger)
def others_in_sector(
symbol: str, sector: str, industry: str, no_filter: bool = False
) -> List[str]:
"""Get other stocks in a ticker's sector
Parameters
----------
symbol: str
The ticker symbol to be excluded
sector: str
The sector to pull from
industry: str
The industry to pull from
no_filter: bool
True means that we do not filter based on market cap
Returns
-------
List[str]
List of symbols in the same sector
"""
industry = industry.replace("—", " - ")
industry = industry.replace("/", " ")
similars = fd.select_equities(sector=sector, industry=industry)
# This filters similars to match market cap and removes ticker analyzed
if symbol in similars:
market_cap = similars[symbol]["market_cap"]
similars.pop(symbol, None)
if not no_filter:
similars = {
k: v for (k, v) in similars.items() if v["market_cap"] == market_cap
}
similars = list(similars)
return similars
def create_dataframe(symbol: str, statement: str, period: str = "annual"):
"""
Creates a df financial statement for a given ticker
Parameters
----------
symbol : str
The ticker symbol to create a dataframe for
statement : str
The financial statement dataframe to create
period : str
Whether to look at annual, quarterly, or trailing
Returns
-------
statement : pd.DataFrame
The financial statement requested
rounding : int
The amount of rounding to use
statement_currency: str
The currency the financial statements are reported in
"""
if statement not in ["BS", "CF", "IS"]:
raise ValueError("statement variable must be 'BS', 'CF' or 'IS'")
if period not in ["annual", "quarterly", "trailing"]:
raise ValueError(
"statement variable must be 'annual','quarterly', or 'trailing'"
)
per_url = f"{period}/" if period != "annual" else ""
URL = f"https://stockanalysis.com/stocks/{symbol}/financials/"
URL += dcf_static.statement_url[statement] + per_url
ignores = dcf_static.statement_ignore[statement]
r = requests.get(URL, headers=dcf_static.headers)
if "404 - Page Not Found" in r.text:
return pd.DataFrame(), None, None
try:
df = pd.read_html(r.text)[0]
except ValueError:
return pd.DataFrame(), None, None
soup = BeautifulSoup(r.content, "html.parser")
phrase = soup.find(
"div", attrs={"class": "hidden pb-1 text-sm text-gray-600 lg:block"}
)
phrase = phrase.get_text().lower() if phrase else ""
if "thousand" in phrase:
rounding = 1_000
elif "millions" in phrase:
rounding = 1_000_000
elif "billions" in phrase:
rounding = 1_000_000_000
else:
return pd.DataFrame(), None, None
statement_currency = ""
for currency in CURRENCIES:
if currency.lower() in phrase:
statement_currency = currency
break
if "Quarter Ended" in df.columns:
df = df.set_index("Quarter Ended")
elif "Quarter Ending" in df.columns:
df = df.set_index("Quarter Ending")
else:
df = df.set_index("Year")
df = df.loc[:, ~(df == "Upgrade").any()]
for ignore in ignores:
if ignore in df.index:
df = df.drop([ignore])
df = df[df.columns[::-1]]
if statement == "IS":
vals = ["Revenue", dcf_static.gaap_is]
elif statement == "BS":
vals = ["Cash & Equivalents", dcf_static.gaap_bs]
elif statement == "CF":
vals = ["Net Income", dcf_static.gaap_cf]
if vals[0] in df.index:
blank_list = ["0" for _ in df.loc[vals[0]].to_list()]
else:
return pd.DataFrame(), None, None
for i, _ in enumerate(vals[1][1:]):
df = insert_row(vals[1][i + 1], vals[1][i], df, blank_list)
return df, rounding, statement_currency
@log_start_end(log=logger)
def get_similar_dfs(symbol: str, info: Dict[str, Any], n: int, no_filter: bool = False):
"""
Get dataframes for similar companies
Parameters
----------
symbol : str
The ticker symbol to create a dataframe for
into : Dict[str,Any]
The dictionary produced from the yfinance.info function
n : int
The number of similar companies to produce
no_filter : bool
True means that we do not filter based on market cap
Returns
-------
new_list : List[str, pd.DataFrame]
A list of similar companies
"""
similars = others_in_sector(symbol, info["sector"], info["industry"], no_filter)
i = 0
new_list = []
while i < n and similars:
similar_ret = [create_dataframe(similars[0], x)[0] for x in ["BS", "IS", "CF"]]
blank = [x.empty for x in similar_ret]
if True not in blank:
vals = [similars[0], similar_ret]
new_list.append(vals)
i += 1
similars.pop(0)
return new_list
@log_start_end(log=logger)
def clean_dataframes(*args) -> List[pd.DataFrame]:
"""
All dataframes in the list take on the length of the shortest dataframe
Parameters
----------
*args : List[pd.DataFrame]
List of dataframes to clean
Returns
-------
dfs : List[pd.DataFrame]
Cleaned list of dataframes
"""
min_cols = min(x.shape[1] for x in args)
dfs = [x.iloc[:, -min_cols:] for x in args]
return dfs
@log_start_end(log=logger)
def get_value(df: pd.DataFrame, row: str, column: int) -> Tuple[float, float]:
"""
Gets a specific value from the dataframe
Parameters
----------
df : pd.DataFrame
The dataframe to get the information from
row : str
The row to get the information from
column : int
The column to get the information from
Returns
-------
value : List[float]
The information in float format
"""
val1 = df.at[row, df.columns[column]]
if isinstance(val1, str):
fin_val1: float = float(val1.replace(",", "").replace("-", "-0"))
else:
fin_val1 = float(val1)
val2 = df.at[row, df.columns[column + 1]]
if isinstance(val2, str):
fin_val2: float = float(val2.replace(",", "").replace("-", "-0"))
else:
fin_val2 = float(val2)
return fin_val1, fin_val2
@log_start_end(log=logger)
def frac(num: float, denom: float) -> Union[str, float]:
"""
Converts a numerator and a denominator in a fraction, checking for invalid denominators
Parameters
----------
num : float
The numerator
denom : float
The denominator
Returns
-------
value : Union[str, float]
The fraction
"""
return "N/A" if denom == 0 else num / denom
@log_start_end(log=logger)
def generate_path(n: int, symbol: str, date: str) -> Path:
"""
Create the path to save an excel file to
Parameters
----------
n: int
The try number
symbol: str
The ticker symbol to be saved
date: str
The date the dcf was generated
Returns
-------
path: Path
The path to save a file to
"""
val = "" if n == 0 else f"({n})"
export_folder = compose_export_path(
func_name="dcf", dir_path=os.path.abspath(os.path.dirname(__file__))
).parent
trypath = export_folder / symbol / date / val
trypath = str(trypath) + ".xlsx" # type: ignore
return Path(trypath) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/fundamental_analysis/dcf_model.py | 0.750918 | 0.351979 | dcf_model.py | pypi |
__docformat__ = "numpy"
import logging
import pandas as pd
import requests
from bs4 import BeautifulSoup
from rapidfuzz import fuzz
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_management(symbol: str) -> pd.DataFrame:
"""Get company managers from Business Insider
Parameters
----------
symbol : str
Stock ticker symbol
Returns
-------
pd.DataFrame
Dataframe of managers
"""
url_market_business_insider = (
f"https://markets.businessinsider.com/stocks/{symbol.lower()}-stock"
)
text_soup_market_business_insider = BeautifulSoup(
requests.get(
url_market_business_insider, headers={"User-Agent": get_user_agent()}
).text,
"lxml",
)
found_h2s = {}
for next_h2 in text_soup_market_business_insider.findAll(
"h2", {"class": "header-underline"}
):
next_table = next_h2.find_next_sibling("table", {"class": "table"})
if next_table:
found_h2s[next_h2.text] = next_table
# Business Insider changed management display convention from 'Management' to
# 'Ticker Management'. These next few lines simply find 'Ticker Management'
# header key and copy it to a 'Management' key as to not alter the rest of
# the function
ticker_management_to_be_deleted = ""
management_data_available = False
for key in found_h2s:
if "Management" in key:
ticker_management_to_be_deleted = key
management_data_available = True
if management_data_available:
found_h2s["Management"] = found_h2s[ticker_management_to_be_deleted]
del found_h2s[ticker_management_to_be_deleted]
if found_h2s.get("Management") is None:
console.print(
f"[red]No management information in Business Insider for {symbol}[/red]"
)
return pd.DataFrame()
l_titles = [
s_title.text.strip()
for s_title in found_h2s["Management"].findAll(
"td", {"class": "table__td text-right"}
)
if any(c.isalpha() for c in s_title.text.strip())
and ("USD" not in s_title.text.strip())
]
l_names = [
s_name.text.strip()
for s_name in found_h2s["Management"].findAll(
"td", {"class": "table__td table--allow-wrap"}
)
]
df_management = pd.DataFrame(
{"Name": l_names[-len(l_titles) :], "Title": l_titles}, # noqa: E203
columns=["Name", "Title"],
)
df_management["Info"] = "-"
df_management["Insider Activity"] = "-"
df_management = df_management.set_index("Name")
for s_name in df_management.index:
df_management.loc[s_name][
"Info"
] = f"http://www.google.com/search?q={s_name} {symbol.upper()}".replace(
" ", "%20"
)
s_url_base = "https://markets.businessinsider.com"
for insider in text_soup_market_business_insider.findAll(
"a", {"onclick": "silentTrackPI()"}
):
for s_name in df_management.index:
if fuzz.token_set_ratio(s_name, insider.text.strip()) > 70: # type: ignore
df_management.loc[s_name]["Insider Activity"] = (
s_url_base + insider.attrs["href"]
)
return df_management | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/fundamental_analysis/business_insider_model.py | 0.54359 | 0.155623 | business_insider_model.py | pypi |
__docformat__ = "numpy"
import logging
from typing import Dict, List
import numpy as np
import pandas as pd
import requests
from alpha_vantage.fundamentaldata import FundamentalData
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import lambda_long_number_format
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.fundamental_analysis import yahoo_finance_model
from openbb_terminal.stocks.stocks_helper import clean_fraction
from openbb_terminal.stocks.fundamental_analysis.fa_helper import clean_df_index
logger = logging.getLogger(__name__)
def check_premium_key(json_response: Dict) -> bool:
"""Checks if the response is the premium endpoint"""
if json_response == {
"Information": "Thank you for using Alpha Vantage! This is a premium endpoint. You may subscribe to "
"any of the premium plans at https://www.alphavantage.co/premium/ to instantly unlock all premium endpoints"
}:
console.print(
"This is a premium endpoint for AlphaVantage. Please use a premium key.\n"
)
return True
return False
@log_start_end(log=logger)
def get_overview(symbol: str) -> pd.DataFrame:
"""Get alpha vantage company overview
Parameters
----------
symbol : str
Stock ticker symbol
Returns
-------
pd.DataFrame
Dataframe of fundamentals
"""
# Request OVERVIEW data from Alpha Vantage API
s_req = f"https://www.alphavantage.co/query?function=OVERVIEW&symbol={symbol}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
result = requests.get(s_req, stream=True)
result_json = result.json()
df_fa = pd.DataFrame()
# If the returned data was unsuccessful
if "Error Message" in result_json:
console.print(result_json["Error Message"])
else:
# check if json is empty
if not result_json:
console.print("No data found from Alpha Vantage\n")
# Parse json data to dataframe
elif "Note" in result_json:
console.print(result_json["Note"], "\n")
else:
df_fa = pd.json_normalize(result_json)
# Keep json data sorting in dataframe
df_fa = df_fa[list(result_json.keys())].T
df_fa.iloc[5:] = df_fa.iloc[5:].applymap(
lambda x: lambda_long_number_format(x)
)
clean_df_index(df_fa)
df_fa = df_fa.rename(
index={
"E b i t d a": "EBITDA",
"P e ratio": "PE ratio",
"P e g ratio": "PEG ratio",
"E p s": "EPS",
"Revenue per share t t m": "Revenue per share TTM",
"Operating margin t t m": "Operating margin TTM",
"Return on assets t t m": "Return on assets TTM",
"Return on equity t t m": "Return on equity TTM",
"Revenue t t m": "Revenue TTM",
"Gross profit t t m": "Gross profit TTM",
"Diluted e p s t t m": "Diluted EPS TTM",
"Quarterly earnings growth y o y": "Quarterly earnings growth YOY",
"Quarterly revenue growth y o y": "Quarterly revenue growth YOY",
"Trailing p e": "Trailing PE",
"Forward p e": "Forward PE",
"Price to sales ratio t t m": "Price to sales ratio TTM",
"E v to revenue": "EV to revenue",
"E v to e b i t d a": "EV to EBITDA",
}
)
return df_fa
@log_start_end(log=logger)
def get_key_metrics(symbol: str) -> pd.DataFrame:
"""Get key metrics from overview
Parameters
----------
symbol : str
Stock ticker symbol
Returns
-------
pd.DataFrame
Dataframe of key metrics
"""
# Request OVERVIEW data
s_req = f"https://www.alphavantage.co/query?function=OVERVIEW&symbol={symbol}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
result = requests.get(s_req, stream=True)
result_json = result.json()
# If the returned data was unsuccessful
if "Error Message" in result_json:
console.print(result_json["Error Message"])
else:
# check if json is empty
if not result_json or len(result_json) < 2:
console.print("No data found from Alpha Vantage\n")
return pd.DataFrame()
df_fa = pd.json_normalize(result_json)
df_fa = df_fa[list(result_json.keys())].T
df_fa = df_fa.applymap(lambda x: lambda_long_number_format(x))
clean_df_index(df_fa)
df_fa = df_fa.rename(
index={
"E b i t d a": "EBITDA",
"P e ratio": "PE ratio",
"P e g ratio": "PEG ratio",
"E p s": "EPS",
"Return on equity t t m": "Return on equity TTM",
"Price to sales ratio t t m": "Price to sales ratio TTM",
}
)
as_key_metrics = [
"Market capitalization",
"EBITDA",
"EPS",
"PE ratio",
"PEG ratio",
"Price to book ratio",
"Return on equity TTM",
"Price to sales ratio TTM",
"Dividend yield",
"50 day moving average",
"Analyst target price",
"Beta",
]
return df_fa.loc[as_key_metrics]
return pd.DataFrame()
@log_start_end(log=logger)
def get_income_statements(
symbol: str,
limit: int = 5,
quarterly: bool = False,
ratios: bool = False,
plot: bool = False,
) -> pd.DataFrame:
"""Get income statements for company
Parameters
----------
symbol : str
Stock ticker symbol
limit : int
Number of past to get
quarterly : bool, optional
Flag to get quarterly instead of annual, by default False
ratios: bool
Shows percentage change, by default False
plot: bool
If the data shall be formatted ready to plot
Returns
-------
pd.DataFrame
DataFrame of income statements
"""
url = (
f"https://www.alphavantage.co/query?function=INCOME_STATEMENT&symbol={symbol}"
f"&apikey={cfg.API_KEY_ALPHAVANTAGE}"
)
r = requests.get(url)
response_json = r.json()
if check_premium_key(response_json):
return pd.DataFrame()
# If the returned data was unsuccessful
if "Error Message" in response_json:
console.print(response_json["Error Message"])
else:
# check if json is empty
if not response_json:
console.print(
"No data found from Alpha Vantage, looking in Yahoo Finance\n"
)
if (
yahoo_finance_model.get_financials(symbol, statement="financials")
is not None
):
return yahoo_finance_model.get_financials(
symbol, statement="financials"
)
else:
statements = response_json
df_fa = pd.DataFrame()
if quarterly:
if "quarterlyReports" in statements:
df_fa = pd.DataFrame(statements["quarterlyReports"])
else:
if "annualReports" in statements:
df_fa = pd.DataFrame(statements["annualReports"])
if df_fa.empty:
console.print("No data found from Alpha Vantage\n")
return pd.DataFrame()
df_fa = df_fa.set_index("fiscalDateEnding")
df_fa = df_fa[::-1].T
df_fa = df_fa.replace("None", "0")
df_fa.iloc[1:] = df_fa.iloc[1:].astype("float")
df_fa_c = df_fa.iloc[:, -limit:].applymap(
lambda x: lambda_long_number_format(x)
)
if ratios:
df_fa_pc = df_fa.iloc[1:].pct_change(axis="columns").fillna(0)
j = 0
for i in list(range(1, 25)):
df_fa.iloc[i] = df_fa_pc.iloc[j]
j += 1
df_fa = df_fa.iloc[:, 0:limit]
return df_fa_c if not plot else df_fa
return pd.DataFrame()
@log_start_end(log=logger)
def get_balance_sheet(
symbol: str,
limit: int = 5,
quarterly: bool = False,
ratios: bool = False,
plot: bool = False,
) -> pd.DataFrame:
"""Get balance sheets for company
Parameters
----------
symbol : str
Stock ticker symbol
limit : int
Number of past to get
quarterly : bool, optional
Flag to get quarterly instead of annual, by default False
ratios: bool
Shows percentage change, by default False
plot: bool
If the data shall be formatted ready to plot
Returns
-------
pd.DataFrame
DataFrame of the balance sheet
"""
url = f"https://www.alphavantage.co/query?function=BALANCE_SHEET&symbol={symbol}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
r = requests.get(url)
response_json = r.json()
if check_premium_key(response_json):
return pd.DataFrame()
# If the returned data was unsuccessful
if "Error Message" in response_json:
console.print(response_json["Error Message"])
# check if json is empty
if not response_json:
console.print("No data found from Alpha Vantage, looking in Yahoo Finance\n")
if (
yahoo_finance_model.get_financials(symbol, statement="balance-sheet")
is not None
):
return yahoo_finance_model.get_financials(symbol, statement="balance-sheet")
else:
statements = response_json
df_fa = pd.DataFrame()
if quarterly:
if "quarterlyReports" in statements:
df_fa = pd.DataFrame(statements["quarterlyReports"])
else:
if "annualReports" in statements:
df_fa = pd.DataFrame(statements["annualReports"])
if df_fa.empty:
console.print("No data found from Alpha Vantage\n")
return pd.DataFrame()
df_fa = df_fa.set_index("fiscalDateEnding")
df_fa = df_fa[::-1].T
df_fa = df_fa.replace("None", "0")
df_fa.iloc[1:] = df_fa.iloc[1:].astype("float")
df_fa_c = df_fa.iloc[:, -limit:].applymap(
lambda x: lambda_long_number_format(x)
)
if ratios:
df_fa_pc = df_fa.iloc[1:].pct_change(axis="columns").fillna(0)
j = 0
for i in list(range(1, 37)):
df_fa.iloc[i] = df_fa_pc.iloc[j]
j += 1
df_fa_c = df_fa.iloc[:, 0:limit].applymap(
lambda x: lambda_long_number_format(x)
)
df_fa = df_fa.iloc[:, 0:limit]
return df_fa_c if not plot else df_fa
return pd.DataFrame()
@log_start_end(log=logger)
def get_cash_flow(
symbol: str,
limit: int = 5,
quarterly: bool = False,
ratios: bool = False,
plot: bool = False,
) -> pd.DataFrame:
"""Get cash flows for company
Parameters
----------
symbol : str
Stock ticker symbol
limit : int
Number of past to get
quarterly : bool, optional
Flag to get quarterly instead of annual, by default False
ratios: bool
Shows percentage change, by default False
plot: bool
If the data shall be formatted ready to plot
Returns
-------
pd.DataFrame
Dataframe of cash flow statements
"""
url = f"https://www.alphavantage.co/query?function=CASH_FLOW&symbol={symbol}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
r = requests.get(url)
response_json = r.json()
if check_premium_key(response_json):
return pd.DataFrame()
# If the returned data was unsuccessful
if "Error Message" in response_json:
console.print(response_json["Error Message"])
else:
# check if json is empty
if not response_json:
console.print(
"No data found from Alpha Vantage, looking in Yahoo Finance\n"
)
if (
yahoo_finance_model.get_financials(symbol, statement="cash-flow")
is not None
):
return yahoo_finance_model.get_financials(symbol, statement="cash-flow")
else:
statements = response_json
df_fa = pd.DataFrame()
if quarterly:
if "quarterlyReports" in statements:
df_fa = pd.DataFrame(statements["quarterlyReports"])
else:
if "annualReports" in statements:
df_fa = pd.DataFrame(statements["annualReports"])
if df_fa.empty:
console.print("No data found from Alpha Vantage\n")
return pd.DataFrame()
df_fa = df_fa.set_index("fiscalDateEnding")
df_fa = df_fa[::-1].T
df_fa = df_fa.replace("None", "0")
df_fa.iloc[1:] = df_fa.iloc[1:].astype("float")
df_fa_c = df_fa.iloc[:, -limit:].applymap(
lambda x: lambda_long_number_format(x)
)
if ratios:
df_fa_pc = df_fa.iloc[1:].pct_change(axis="columns").fillna(0)
j = 0
for i in list(range(1, 37)):
df_fa.iloc[i] = df_fa_pc.iloc[j]
j += 1
df_fa_c = df_fa.iloc[:, 0:limit].applymap(
lambda x: lambda_long_number_format(x)
)
df_fa = df_fa.iloc[:, 0:limit]
return df_fa_c if not plot else df_fa
return pd.DataFrame()
@log_start_end(log=logger)
def get_earnings(symbol: str, quarterly: bool = False) -> pd.DataFrame:
"""Get earnings calendar for ticker
Parameters
----------
symbol : str
Stock ticker symbol
quarterly : bool, optional
Flag to get quarterly and not annual, by default False
Returns
-------
pd.DataFrame
Dataframe of earnings
"""
# Request EARNINGS data from Alpha Vantage API
s_req = (
"https://www.alphavantage.co/query?function=EARNINGS&"
f"symbol={symbol}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
)
result = requests.get(s_req, stream=True)
result_json = result.json()
df_fa = pd.DataFrame()
# If the returned data was unsuccessful
if "Error Message" in result_json:
console.print(result_json["Error Message"])
else:
# check if json is empty
if not result_json or len(result_json) < 2:
console.print("No data found from Alpha Vantage\n")
else:
df_fa = pd.json_normalize(result_json)
if quarterly:
df_fa = pd.DataFrame(df_fa["quarterlyEarnings"][0])
df_fa = df_fa[
[
"fiscalDateEnding",
"reportedDate",
"reportedEPS",
"estimatedEPS",
"surprise",
"surprisePercentage",
]
]
df_fa = df_fa.rename(
columns={
"fiscalDateEnding": "Fiscal Date Ending",
"reportedEPS": "Reported EPS",
"estimatedEPS": "Estimated EPS",
"reportedDate": "Reported Date",
"surprise": "Surprise",
"surprisePercentage": "Surprise Percentage",
}
)
else:
df_fa = pd.DataFrame(df_fa["annualEarnings"][0])
df_fa = df_fa.rename(
columns={
"fiscalDateEnding": "Fiscal Date Ending",
"reportedEPS": "Reported EPS",
}
)
return df_fa
@log_start_end(log=logger)
def df_values(
df: pd.DataFrame, item: str, index: int = 0, length: int = 2
) -> List[int]:
"""Clean the values from the df
Parameters
----------
df : pd.DataFrame
The Dataframe to use
item : str
The item to select
index : int
The number of row to display
length : int
The number of rows to return
Returns
-------
values : List[int]
The values for the dataframe
"""
if index:
df = df.iloc[index : index + length]
selection = df[item]
values = selection.apply(lambda x: 0 if (not x or x == "None") else int(x)).values
return values.tolist()
def replace_df(name: str, row: pd.Series) -> pd.Series:
"""Replaces text in pandas row based on color functions
Return
----------
name : str
The name of the row
row : pd.Series
The original row
Parameters
----------
new_row : pd.Series
The new formatted row
"""
for i, item in enumerate(row):
if name == "Mscore":
row[i] = color_mscore(item)
elif name in ["Zscore", "McKee"]:
row[i] = color_zscore_mckee(item)
else:
row[i] = str(round(float(item), 2))
return row
def color_mscore(value: str) -> str:
"""Adds color to mscore dataframe values
Parameters
----------
value : str
The string value
Returns
-------
new_value : str
The string formatted with rich color
"""
value_float = float(value)
if value_float <= -2.22:
return f"[green]{value_float:.2f}[/green]"
if value_float <= -1.78:
return f"[yellow]{value_float:.2f}[/yellow]"
return f"[red]{value_float:.2f}[/red]"
def color_zscore_mckee(value: str) -> str:
"""Adds color to mckee or zscore dataframe values
Parameters
----------
value : str
The string value
Returns
-------
new_value : str
The string formatted with rich color
"""
value_float = float(value)
if value_float < 0.5:
return f"[red]{value_float:.2f}[/red]"
return f"[green]{value_float:.2f}[/green]"
@log_start_end(log=logger)
def get_fraud_ratios(symbol: str, detail: bool = False) -> pd.DataFrame:
"""Get fraud ratios based on fundamentals
Parameters
----------
symbol : str
Stock ticker symbol
detail : bool
Whether to provide extra m-score details
Returns
-------
metrics : pd.DataFrame
The fraud ratios
"""
try:
fd = FundamentalData(key=cfg.API_KEY_ALPHAVANTAGE, output_format="pandas")
# pylint: disable=unbalanced-tuple-unpacking
df_cf, _ = fd.get_cash_flow_annual(symbol=symbol)
df_bs, _ = fd.get_balance_sheet_annual(symbol=symbol)
df_is, _ = fd.get_income_statement_annual(symbol=symbol)
except ValueError as e:
if "premium endpoint" in str(e):
console.print(
"This is a premium endpoint for AlphaVantage. Please use a premium key.\n"
)
return pd.DataFrame()
# pylint: disable=no-member
df_cf = df_cf.set_index("fiscalDateEnding")
df_bs = df_bs.set_index("fiscalDateEnding")
df_is = df_is.set_index("fiscalDateEnding")
fraud_years = pd.DataFrame()
for i in range(len(df_cf) - 1):
ar = df_values(df_bs, "currentNetReceivables", i)
sales = df_values(df_is, "totalRevenue", i)
cogs = df_values(df_is, "costofGoodsAndServicesSold", i)
ni = df_values(df_is, "netIncome", i)
ca = df_values(df_bs, "totalCurrentAssets", i)
cl = df_values(df_bs, "totalCurrentLiabilities", i)
ppe = df_values(df_bs, "propertyPlantEquipment", i)
cash = df_values(df_bs, "cashAndCashEquivalentsAtCarryingValue", i)
cash_and_sec = df_values(df_bs, "cashAndShortTermInvestments", i)
sec = [y - x for (x, y) in zip(cash, cash_and_sec)]
ta = df_values(df_bs, "totalAssets", i)
dep = df_values(df_bs, "accumulatedDepreciationAmortizationPPE", i)
sga = df_values(df_is, "sellingGeneralAndAdministrative", i)
tl = df_values(df_bs, "totalLiabilities", i)
icfo = df_values(df_is, "netIncomeFromContinuingOperations", i)
cfo = df_values(df_cf, "operatingCashflow", i)
ratios: Dict = {}
try:
ratios["DSRI"] = (ar[0] / sales[0]) / (ar[1] / sales[1])
ratios["GMI"] = ((sales[1] - cogs[1]) / sales[1]) / (
(sales[0] - cogs[0]) / sales[0]
)
ratios["AQI"] = (1 - ((ca[0] + ppe[0] + sec[0]) / ta[0])) / (
1 - ((ca[1] + ppe[1] + sec[1]) / ta[1])
)
ratios["SGI"] = sales[0] / sales[1]
ratios["DEPI"] = (dep[1] / (ppe[1] + dep[1])) / (dep[0] / (ppe[0] + dep[0]))
ratios["SGAI"] = (sga[0] / sales[0]) / (sga[1] / sales[1])
ratios["LVGI"] = (tl[0] / ta[0]) / (tl[1] / ta[1])
ratios["TATA"] = (icfo[0] - cfo[0]) / ta[0]
ratios["Mscore"] = (
-4.84
+ (0.92 * ratios["DSRI"])
+ (0.58 * ratios["GMI"])
+ (0.404 * ratios["AQI"])
+ (0.892 * ratios["SGI"])
+ (0.115 * ratios["DEPI"] - (0.172 * ratios["SGAI"]))
+ (4.679 * ratios["TATA"])
- (0.327 * ratios["LVGI"])
)
zscore = (
-4.336
- (4.513 * (ni[0] / ta[0]))
+ (5.679 * (tl[0] / ta[0]))
+ (0.004 * (ca[0] / cl[0]))
)
v1 = np.log(ta[0] / 1000)
v2 = ni[0] / ta[0]
v3 = cash[0] / cl[0]
x = ((v1 + 0.85) * v2) - 0.85
y = 1 + v3
mckee = x**2 / (x**2 + y**2)
ratios["Zscore"] = zscore
ratios["McKee"] = mckee
except ZeroDivisionError:
for item in [
"DSRI",
"GMI",
"AQI",
"SGI",
"DEPI",
"SGAI",
"LVGI",
"TATA",
"Mscore",
"Zscore",
"Mckee",
]:
ratios[item] = "N/A"
if fraud_years.empty:
fraud_years.index = ratios.keys()
fraud_years[df_cf.index[i]] = ratios.values()
fraud_years = fraud_years[sorted(fraud_years)]
if not detail:
details = ["DSRI", "GMI", "AQI", "SGI", "DEPI", "SGAI", "LVGI", "TATA"]
fraud_years = fraud_years.drop(details)
return fraud_years
@log_start_end(log=logger)
def get_dupont(symbol: str) -> pd.DataFrame:
"""Get dupont ratios
Parameters
----------
symbol : str
Stock ticker symbol
Returns
-------
dupont : pd.DataFrame
The dupont ratio breakdown
"""
try:
fd = FundamentalData(key=cfg.API_KEY_ALPHAVANTAGE, output_format="pandas")
# pylint: disable=unbalanced-tuple-unpacking
df_bs, _ = fd.get_balance_sheet_annual(symbol=symbol)
df_is, _ = fd.get_income_statement_annual(symbol=symbol)
except ValueError as e:
if "premium endpoint" in str(e):
console.print(
"This is a premium endpoint for AlphaVantage. Please use a premium key.\n"
)
return pd.DataFrame()
# pylint: disable=no-member
df_bs = df_bs.set_index("fiscalDateEnding")
df_is = df_is.set_index("fiscalDateEnding")
dupont_years = pd.DataFrame()
for i in range(len(df_bs)):
ni = df_values(df_is, "netIncome", i, 1)
pretax = df_values(df_is, "incomeBeforeTax", i, 1)
ebit = df_values(df_is, "ebit", i, 1)
sales = df_values(df_is, "totalRevenue", i, 1)
assets = df_values(df_bs, "totalAssets", i, 1)
equity = df_values(df_bs, "totalShareholderEquity", i, 1)
ratios: Dict = {}
try:
ratios["Tax Burden"] = clean_fraction(ni[0], pretax[0])
ratios["Interest Burden"] = clean_fraction(pretax[0], ebit[0])
ratios["EBIT Margin"] = clean_fraction(ebit[0], sales[0])
ratios["Asset Turnover"] = clean_fraction(sales[0], assets[0])
ratios["Finance Leverage"] = clean_fraction(assets[0], equity[0])
ratios["ROI"] = clean_fraction(ni[0], equity[0])
except IndexError:
pass
if dupont_years.empty:
dupont_years.index = ratios.keys()
dupont_years[df_bs.index[i]] = ratios.values()
dupont_years = dupont_years[sorted(dupont_years)]
return dupont_years | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/fundamental_analysis/av_model.py | 0.793226 | 0.247737 | av_model.py | pypi |
__docformat__ = "numpy"
from typing import Dict, List
from openpyxl.styles import Border, Side, Font, PatternFill, Alignment
from openbb_terminal.helper_funcs import excel_columns, get_user_agent
letters = excel_columns()
non_gaap_is = [
"Revenue Growth",
"Net Income Common",
"Net Income Growth",
"Shares Outstanding (Basic)",
"Shares Outstanding (Diluted)",
"Shares Change",
"EPS (Basic)",
"EPS (Diluted)",
"EPS Growth",
"Free Cash Flow Per Share",
"Dividend Per Share",
"Dividend Growth",
"Gross Margin",
"Operating Margin",
"Profit Margin",
"Free Cash Flow Margin",
"Effective Tax Rate",
"EBITDA",
"EBITDA Margin",
"EBIT",
"EBIT Margin",
"Operating Expenses",
"Pretax Income",
"Revenue Growth (YoY)",
"Depreciation & Amortization",
]
gaap_is = [
"Revenue",
"Cost of Revenue",
"Gross Profit",
"Selling, General & Admin",
"Research & Development",
"Other Operating Expenses",
"Operating Income",
"Interest Expense / Income",
"Other Expense / Income",
"Income Tax",
"Net Income",
"Preferred Dividends",
]
non_gaap_bs = [
"Cash Growth",
"Debt Growth",
"Net Cash / Debt",
"Net Cash / Debt Growth",
"Net Cash Per Share",
"Working Capital",
"Book Value Per Share",
"Total Debt",
]
gaap_bs = [
"Cash & Equivalents",
"Short-Term Investments",
"Cash & Cash Equivalents",
"Receivables",
"Inventory",
"Other Current Assets",
"Total Current Assets",
"Property, Plant & Equipment",
"Long-Term Investments",
"Goodwill and Intangibles",
"Other Long-Term Assets",
"Total Long-Term Assets",
"Total Assets",
"Accounts Payable",
"Deferred Revenue",
"Current Debt",
"Other Current Liabilities",
"Total Current Liabilities",
"Long-Term Debt",
"Other Long-Term Liabilities",
"Total Long-Term Liabilities",
"Total Liabilities",
"Common Stock",
"Retained Earnings",
"Comprehensive Income",
"Shareholders' Equity",
"Total Liabilities and Equity",
]
non_gaap_cf = [
"Operating Cash Flow Growth",
"Free Cash Flow Growth",
"Free Cash Flow Margin",
"Free Cash Flow Per Share",
"Free Cash Flow",
]
gaap_cf = [
"Net Income",
"Depreciation & Amortization",
"Share-Based Compensation",
"Other Operating Activities",
"Operating Cash Flow",
"Capital Expenditures",
"Acquisitions",
"Change in Investments",
"Other Investing Activities",
"Investing Cash Flow",
"Dividends Paid",
"Share Issuance / Repurchase",
"Debt Issued / Paid",
"Other Financing Activities",
"Financing Cash Flow",
"Net Cash Flow",
]
sum_rows = [
"Gross Profit",
"Operating Income",
"Net Income",
"Cash & Cash Equivalents",
"Total Current Assets",
"Total Long-Term Assets",
"Total Assets",
"Total Current Liabilities",
"Total Long-Term Liabilities",
"Total Liabilities",
"Shareholders' Equity",
"Total Liabilities and Equity",
"Operating Cash Flow",
"Investing Cash Flow",
"Financing Cash Flow",
"Net Cash Flow",
]
bold_font = Font(bold=True)
thin_border_top = Border(top=Side(style="thin"))
thin_border_bottom = Border(bottom=Side(style="thin"))
thin_border_nl = Border(
right=Side(style="thin"),
top=Side(style="thin"),
bottom=Side(style="thin"),
)
thin_border_nr = Border(
left=Side(style="thin"),
top=Side(style="thin"),
bottom=Side(style="thin"),
)
thin_border = Border(
left=Side(style="thin"),
right=Side(style="thin"),
top=Side(style="thin"),
bottom=Side(style="thin"),
)
green_bg = PatternFill(fgColor="7fe5cd", fill_type="solid")
center = Alignment(horizontal="center")
red = Font(color="FF0000")
fmt_acct = "_($* #,##0.00_);[Red]_($* (#,##0.00);_($* -_0_0_);_(@"
statement_titles = {"BS": "Balance Sheet", "CF": "Cash Flows", "IS": "Income Statement"}
statement_url: Dict[str, str] = {
"BS": "balance-sheet/",
"CF": "cash-flow-statement/",
"IS": "",
}
statement_ignore: Dict[str, List[str]] = {
"BS": non_gaap_bs,
"CF": non_gaap_cf,
"IS": non_gaap_is,
}
headers = {"User-Agent": get_user_agent()} | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/fundamental_analysis/dcf_static.py | 0.671255 | 0.458773 | dcf_static.py | pypi |
__docformat__ = "numpy"
import logging
import os
import webbrowser
from typing import List, Optional
from fractions import Fraction
import yfinance as yf
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
lambda_long_number_format,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.fundamental_analysis import yahoo_finance_model
from openbb_terminal.helpers_denomination import (
transform as transform_by_denomination,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def open_headquarters_map(symbol: str):
"""Headquarters location of the company
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
"""
webbrowser.open(yahoo_finance_model.get_hq(symbol))
@log_start_end(log=logger)
def open_web(symbol: str):
"""Website of the company
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
"""
webbrowser.open(yahoo_finance_model.get_website(symbol))
@log_start_end(log=logger)
def display_info(symbol: str, export: str = ""):
"""Yahoo Finance ticker info
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
export: str
Format to export data
"""
summary = ""
df_info = yahoo_finance_model.get_info(symbol)
if "Long business summary" in df_info.index:
summary = df_info.loc["Long business summary"].values[0]
df_info = df_info.drop(index=["Long business summary"])
if not df_info.empty:
print_rich_table(
df_info,
headers=list(df_info.columns),
show_index=True,
title=f"{symbol.upper()} Info",
)
else:
logger.error("Invalid data")
console.print("[red]Invalid data[/red]\n")
return
if summary:
console.print("Business Summary:")
console.print(summary)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "info", df_info)
@log_start_end(log=logger)
def display_shareholders(symbol: str, holder: str = "institutional", export: str = ""):
"""Yahoo Finance ticker shareholders
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
holder: str
Shareholder table to get. Can be major/institutional/mutualfund
export: str
Format to export data
"""
df = yahoo_finance_model.get_shareholders(symbol, holder)
if holder == "major":
df.columns = ["", ""]
if "Date Reported" in df.columns:
df["Date Reported"] = df["Date Reported"].apply(
lambda x: x.strftime("%Y-%m-%d")
)
title = f"{holder.title()} Holders"
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title=f"{symbol.upper()} {title}",
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), f"{holder}_holders", df
)
@log_start_end(log=logger)
def display_sustainability(symbol: str, export: str = ""):
"""Yahoo Finance ticker sustainability
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
export: str
Format to export data
"""
df_sustainability = yahoo_finance_model.get_sustainability(symbol)
if df_sustainability.empty:
console.print("No sustainability data found.", "\n")
return
if not df_sustainability.empty:
print_rich_table(
df_sustainability,
headers=list(df_sustainability),
title=f"{symbol.upper()} Sustainability",
show_index=True,
)
else:
logger.error("Invalid data")
console.print("[red]Invalid data[/red]\n")
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "sust", df_sustainability
)
@log_start_end(log=logger)
def display_calendar_earnings(symbol: str, export: str = ""):
"""Yahoo Finance ticker calendar earnings
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
export: str
Format to export data
"""
df_calendar = yahoo_finance_model.get_calendar_earnings(symbol)
if df_calendar.empty:
console.print("No calendar events found.\n")
return
print_rich_table(
df_calendar,
show_index=False,
headers=list(df_calendar.columns),
title=f"{symbol.upper()} Calendar Earnings",
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "cal", df_calendar)
@log_start_end(log=logger)
def display_dividends(
symbol: str,
limit: int = 12,
plot: bool = True,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display historical dividends
Parameters
----------
symbol: str
Stock ticker symbol
limit: int
Number to show
plot: bool
Plots historical data
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.fa.divs_chart("AAPL")
"""
div_history = yahoo_finance_model.get_dividends(symbol)
if div_history.empty:
return
if plot:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(
div_history.index,
div_history["Dividends"],
ls="-",
linewidth=0.75,
marker=".",
markersize=4,
mfc=theme.down_color,
mec=theme.down_color,
alpha=1,
label="Dividends Payout",
)
ax.set_ylabel("Amount Paid ($)")
ax.set_title(f"Dividend History for {symbol}")
ax.set_xlim(div_history.index[-1], div_history.index[0])
ax.legend()
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
else:
div_history.index = pd.to_datetime(div_history.index, format="%Y%m%d").strftime(
"%Y-%m-%d"
)
print_rich_table(
div_history.head(limit),
headers=["Amount Paid ($)", "Change"],
title=f"{symbol.upper()} Historical Dividends",
show_index=True,
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "divs", div_history)
@log_start_end(log=logger)
def display_splits(
symbol: str,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display splits and reverse splits events. [Source: Yahoo Finance]
Parameters
----------
symbol: str
Stock ticker symbol
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_splits = yahoo_finance_model.get_splits(symbol)
if df_splits.empty:
console.print("No splits or reverse splits events found.\n")
return
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
# Get all stock data since IPO
df_data = yf.download(symbol, progress=False, threads=False)
if df_data.empty:
console.print("No stock price data available.\n")
return
ax.plot(df_data.index, df_data["Adj Close"], color="#FCED00")
ax.set_ylabel("Price")
ax.set_title(f"{symbol} splits and reverse splits events")
ax.plot(df_data.index, df_data["Adj Close"].values)
for index, row in df_splits.iterrows():
val = row.values[0]
frac = Fraction(val).limit_denominator(1000000)
if val > 1:
ax.axvline(index, color=theme.up_color)
ax.annotate(
f"{frac.numerator}:{frac.denominator}",
(mdates.date2num(index), df_data["Adj Close"].max()),
xytext=(10, 0),
textcoords="offset points",
color=theme.up_color,
)
else:
ax.axvline(index, color=theme.down_color)
ax.annotate(
f"{frac.numerator}:{frac.denominator}",
(mdates.date2num(index), df_data["Adj Close"].max()),
xytext=(10, 0),
textcoords="offset points",
color=theme.down_color,
)
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
print_rich_table(
df_splits,
title=f"{symbol.upper()} splits and reverse splits",
show_index=True,
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "splits", df_splits)
@log_start_end(log=logger)
def display_mktcap(
symbol: str,
start_date: Optional[str] = None,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display market cap over time. [Source: Yahoo Finance]
Parameters
----------
symbol: str
Stock ticker symbol
start_date: Optional[str]
Initial date (e.g., 2021-10-01). Defaults to 3 years back
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_mktcap, currency = yahoo_finance_model.get_mktcap(symbol, start_date)
if df_mktcap.empty:
console.print("No Market Cap data available.\n")
return
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.stackplot(df_mktcap.index, df_mktcap.values / 1e9, colors=[theme.up_color])
ax.set_ylabel(f"Market Cap in Billion ({currency})")
ax.set_title(f"{symbol} Market Cap")
ax.set_xlim(df_mktcap.index[0], df_mktcap.index[-1])
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "mktcap", df_mktcap)
@log_start_end(log=logger)
def display_fundamentals(
symbol: str,
statement: str,
limit: int = 12,
ratios: bool = False,
plot: list = None,
export: str = "",
):
"""Display tickers balance sheet, income statement or cash-flow
Parameters
----------
symbol: str
Stock ticker symbol
statement: str
Possible values are:
- cash-flow
- financials for Income
- balance-sheet
limit: int
Number of periods to show
ratios: bool
Shows percentage change
plot: list
List of row labels to plot
export: str
Format to export data
"""
fundamentals = yahoo_finance_model.get_financials(symbol, statement, ratios)
if statement == "balance-sheet":
title_str = "Balance Sheet"
elif statement == "financials":
title_str = "Income Statement"
elif statement == "cash-flow":
title_str = "Cash Flow Statement"
if fundamentals is None:
return
if fundamentals.empty:
# The empty data frame error handling done in model
return
symbol_currency = yahoo_finance_model.get_currency(symbol)
if plot:
plot = [x.lower() for x in plot]
rows_plot = len(plot)
fundamentals_plot_data = fundamentals.transpose().fillna(-1)
fundamentals_plot_data.columns = fundamentals_plot_data.columns.str.lower()
if "ttm" in list(fundamentals_plot_data.index):
fundamentals_plot_data = fundamentals_plot_data.drop(["ttm"])
fundamentals_plot_data = fundamentals_plot_data.sort_index()
if not ratios:
maximum_value = fundamentals_plot_data[plot[0].replace("_", " ")].max()
(df_rounded, denomination) = transform_by_denomination(
fundamentals_plot_data, maxValue=maximum_value
)
if denomination == "Units":
denomination = ""
else:
df_rounded = fundamentals_plot_data
denomination = ""
if rows_plot == 1:
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax.bar(df_rounded.index, df_rounded[plot[0].replace("_", " ")])
title = (
f"{plot[0].replace('_', ' ').capitalize()} QoQ Growth of {symbol.upper()}"
if ratios
else f"{plot[0].replace('_', ' ').capitalize()} of {symbol.upper()} {denomination}"
)
plt.title(title)
theme.style_primary_axis(ax)
theme.visualize_output()
else:
fig, axes = plt.subplots(rows_plot)
for i in range(rows_plot):
axes[i].bar(
df_rounded.index, df_rounded[plot[i].replace("_", " ")], width=0.5
)
axes[i].set_title(f"{plot[i].replace('_', ' ')} {denomination}")
theme.style_primary_axis(axes[0])
fig.autofmt_xdate()
else:
# Snake case to english
fundamentals.index = fundamentals.index.to_series().apply(
lambda x: x.replace("_", " ").title()
)
# Readable numbers
formatted_df = fundamentals.applymap(lambda_long_number_format).fillna("-")
print_rich_table(
formatted_df.iloc[:, :limit].applymap(lambda x: "-" if x == "nan" else x),
show_index=True,
title=f"{symbol} {title_str} Currency: {symbol_currency}",
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), statement, fundamentals
)
@log_start_end(log=logger)
def display_earnings(symbol: str, limit: int, export: str):
"""
Parameters
----------
symbol
limit
export
Returns
-------
"""
earnings = yahoo_finance_model.get_earnings_history(symbol)
if earnings.empty:
return
earnings = earnings.drop(columns={"Symbol", "Company"}).fillna("-")
print_rich_table(
earnings.head(limit),
headers=earnings.columns,
title=f"Historical Earnings for {symbol}",
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "earnings_yf", earnings
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/fundamental_analysis/yahoo_finance_view.py | 0.754463 | 0.255545 | yahoo_finance_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
import matplotlib.pyplot as plt
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
print_rich_table,
plot_autoscale,
)
from openbb_terminal.stocks.fundamental_analysis import polygon_model
from openbb_terminal.helpers_denomination import (
transform as transform_by_denomination,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_POLYGON_KEY"])
def display_fundamentals(
symbol: str,
statement: str,
limit: int = 10,
quarterly: bool = False,
ratios: bool = False,
plot: list = None,
export: str = "",
):
"""Display tickers balance sheet or income statement
Parameters
----------
symbol: str
Stock ticker symbol
statement:str
Either balance or income
limit: int
Number of results to show, by default 10
quarterly: bool
Flag to get quarterly reports, by default False
ratios: bool
Shows percentage change, by default False
plot: list
List of row labels to plot
export: str
Format to export data
"""
fundamentals = polygon_model.get_financials(symbol, statement, quarterly, ratios)
title_str = {
"balance": "Balance Sheet",
"income": "Income Statement",
"cash": "Cash Flows",
}[statement]
if fundamentals.empty:
return
fundamentals = fundamentals.iloc[:, :limit]
fundamentals = fundamentals[fundamentals.columns[::-1]]
if plot:
fundamentals_plot_data = fundamentals.copy().fillna(-1)
rows_plot = len(plot)
fundamentals_plot_data = fundamentals_plot_data.transpose()
fundamentals_plot_data.columns = fundamentals_plot_data.columns.str.lower()
fundamentals_plot_data.columns = [
x.replace("_", "") for x in list(fundamentals_plot_data.columns)
]
if not ratios:
(df_rounded, denomination) = transform_by_denomination(
fundamentals_plot_data
)
if denomination == "Units":
denomination = ""
else:
df_rounded = fundamentals_plot_data
denomination = ""
if rows_plot == 1:
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
df_rounded[plot[0].replace("_", "")].plot()
title = (
f"{plot[0].replace('_', ' ').lower()} {'QoQ' if quarterly else 'YoY'} Growth of {symbol.upper()}"
if ratios
else f"{plot[0].replace('_', ' ')} of {symbol.upper()} {denomination}"
)
plt.title(title)
theme.style_primary_axis(ax)
theme.visualize_output()
else:
fig, axes = plt.subplots(rows_plot)
for i in range(rows_plot):
axes[i].plot(df_rounded[plot[i].replace("_", "")])
axes[i].set_title(f"{plot[i].replace('_', ' ')} {denomination}")
theme.style_primary_axis(axes[0])
fig.autofmt_xdate()
else:
# Snake case to english
fundamentals.index = fundamentals.index.to_series().apply(
lambda x: x.replace("_", " ").title()
)
# Readable numbers
fundamentals = fundamentals.applymap(lambda_long_number_format).fillna("-")
print_rich_table(
fundamentals.applymap(lambda x: "-" if x == "nan" else x),
show_index=True,
title=f"{symbol} {title_str}"
if not ratios
else f"{'QoQ' if quarterly else 'YoY'} Change of {symbol} {title_str}",
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), statement, fundamentals
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/fundamental_analysis/polygon_view.py | 0.520253 | 0.325346 | polygon_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
import matplotlib.pyplot as plt
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
print_rich_table,
plot_autoscale,
)
from openbb_terminal.stocks.fundamental_analysis import eodhd_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_EODHD_KEY"])
def display_fundamentals(
symbol: str,
statement: str,
limit: int = 10,
quarterly: bool = False,
ratios: bool = False,
plot: list = None,
export: str = "",
):
"""Display tickers balance sheet; income statement; cash flow statement
Parameters
----------
symbol: str
Stock ticker symbol
statement:str
Either balance or income or cashflow
limit: int
Number of results to show, by default 10
quarterly: bool
Flag to get quarterly reports, by default False
ratios: bool
Shows percentage change, by default False
plot: list
List of row labels to plot
export: str
Format to export data
"""
fundamentals = eodhd_model.get_financials(symbol, statement, quarterly, ratios)
title_str = {
"Balance_Sheet": "Balance Sheet",
"Income_Statement": "Income Statement",
"Cash_Flow": "Cash Flows",
}[statement]
if fundamentals.empty:
return
if ratios or plot:
fundamentals = fundamentals.iloc[:, :limit]
if plot:
rows_plot = len(plot)
fundamentals_plot_data = fundamentals.transpose().fillna(-1)
fundamentals_plot_data.columns = fundamentals_plot_data.columns.str.lower()
fundamentals_plot_data = fundamentals_plot_data.replace("-", "-1")
fundamentals_plot_data = fundamentals_plot_data.astype(float)
if "ttm" in list(fundamentals_plot_data.index):
fundamentals_plot_data = fundamentals_plot_data.drop(["ttm"])
fundamentals_plot_data = fundamentals_plot_data.sort_index()
if not ratios:
maximum_value = fundamentals_plot_data.max().max()
if maximum_value > 1_000_000_000_000:
df_rounded = fundamentals_plot_data / 1_000_000_000_000
denomination = "in Trillions"
elif maximum_value > 1_000_000_000:
df_rounded = fundamentals_plot_data / 1_000_000_000
denomination = "in Billions"
elif maximum_value > 1_000_000:
df_rounded = fundamentals_plot_data / 1_000_000
denomination = "in Millions"
elif maximum_value > 1_000:
df_rounded = fundamentals_plot_data / 1_000
denomination = "in Thousands"
else:
df_rounded = fundamentals_plot_data
denomination = ""
else:
df_rounded = fundamentals_plot_data
denomination = ""
if rows_plot == 1:
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax.bar(df_rounded.index, df_rounded[plot[0].replace("_", " ")])
title = (
f"{plot[0].replace('_', ' ').capitalize()} QoQ Growth of {symbol.upper()}"
if ratios
else f"{plot[0].replace('_', ' ').capitalize()} of {symbol.upper()} {denomination}"
)
plt.title(title)
theme.style_primary_axis(ax)
theme.visualize_output()
else:
fig, axes = plt.subplots(rows_plot)
for i in range(rows_plot):
axes[i].bar(
df_rounded.index, df_rounded[plot[i].replace("_", " ")], width=0.5
)
axes[i].set_title(f"{plot[i].replace('_', ' ')} {denomination}")
theme.style_primary_axis(axes[0])
fig.autofmt_xdate()
else:
# Snake case to english
fundamentals.index = fundamentals.index.to_series().apply(
lambda x: x.replace("_", " ").title()
)
# Readable numbers
fundamentals = fundamentals.applymap(lambda_long_number_format).fillna("-")
print_rich_table(
fundamentals.iloc[:, :limit].applymap(lambda x: "-" if x == "nan" else x),
show_index=True,
title=f"{symbol} {title_str}",
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), statement, fundamentals
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/fundamental_analysis/eodhd_view.py | 0.556641 | 0.314419 | eodhd_view.py | pypi |
__docformat__ = "numpy"
import logging
import re
from typing import List, Tuple
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
lambda_clean_data_values_to_float,
get_user_agent,
lambda_int_or_round_float,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def prepare_df_financials(
symbol: str, statement: str, quarter: bool = False
) -> pd.DataFrame:
"""Builds a DataFrame with financial statements for a given company
Parameters
----------
symbol : str
Company's stock symbol
statement : str
Either income, balance or cashflow
quarter : bool, optional
Return quarterly financial statements instead of annual, by default False
Returns
-------
pd.DataFrame
A DataFrame with financial info
Raises
------
ValueError
If statement is not income, balance or cashflow
"""
financial_urls = {
"income": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/income/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/income",
},
"balance": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/balance-sheet/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/balance-sheet",
},
"cashflow": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/cash-flow/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/cash-flow",
},
}
if statement not in financial_urls:
raise ValueError(f"type {statement} is not in {financial_urls.keys()}")
if quarter:
period = "quarter"
else:
period = "annual"
text_soup_financials = BeautifulSoup(
requests.get(
financial_urls[statement][period].format(symbol),
headers={"User-Agent": get_user_agent()},
).text,
"lxml",
)
# Define financials columns
a_financials_header = [
financials_header.text.strip("\n").split("\n")[0]
for financials_header in text_soup_financials.findAll(
"th", {"class": "overflow__heading"}
)
]
s_header_end_trend = ("5-year trend", "5- qtr trend")[quarter]
if s_header_end_trend not in a_financials_header:
return pd.DataFrame()
if s_header_end_trend in a_financials_header:
df_financials = pd.DataFrame(
columns=a_financials_header[
0 : a_financials_header.index(s_header_end_trend)
]
)
else:
return pd.DataFrame()
find_table = text_soup_financials.findAll(
"div", {"class": "element element--table table--fixed financials"}
)
if not find_table:
return df_financials
financials_rows = find_table[0].findAll(
"tr", {"class": ["table__row is-highlighted", "table__row"]}
)
for a_row in financials_rows:
constructed_row = []
financial_columns = a_row.findAll(
"td", {"class": ["overflow__cell", "overflow__cell fixed--column"]}
)
if not financial_columns:
continue
for a_column in financial_columns:
column_to_text = a_column.text.strip()
if "\n" in column_to_text:
column_to_text = column_to_text.split("\n")[0]
if column_to_text == "":
continue
constructed_row.append(column_to_text)
df_financials.loc[len(df_financials)] = constructed_row
return df_financials
@log_start_end(log=logger)
def get_sean_seah_warnings(
symbol: str, debug: bool = False
) -> Tuple[pd.DataFrame, List[str], List[str]]:
"""Get financial statements and prepare Sean Seah warnings
Parameters
----------
symbol : str
Ticker to look at
debug : bool, optional
Whether or not to return debug messages.
Defaults to False.
Returns
-------
pd.DataFrame
Dataframe of financials
List[str]
List of warnings
List[str]
List of debug messages
"""
# From INCOME STATEMENT, get: 'EPS (Basic)', 'Net Income', 'Interest Expense', 'EBITDA'
url_financials = (
f"https://www.marketwatch.com/investing/stock/{symbol}/financials/income"
)
text_soup_financials = BeautifulSoup(
requests.get(url_financials, headers={"User-Agent": get_user_agent()}).text,
"lxml",
)
# Define financials columns
a_financials_header = [
financials_header.text.strip("\n").split("\n")[0]
for financials_header in text_soup_financials.findAll(
"th", {"class": "overflow__heading"}
)
]
df_financials = pd.DataFrame(columns=a_financials_header[0:-1])
# Add financials values
soup_financials = text_soup_financials.findAll(
lambda tag: tag.name == "tr" and tag.get("class") == ["table__row"]
)
soup_financials += text_soup_financials.findAll(
"tr", {"class": "table__row is-highlighted"}
)
for financials_info in soup_financials:
financials_row = financials_info.text.split("\n")
if len(financials_row) > 5:
for item in financials_row:
if bool(re.search(r"\d", item)):
a_financials_info = financials_info.text.split("\n")
l_financials = [a_financials_info[2]]
l_financials.extend(a_financials_info[5:-2])
# Append data values to financials
df_financials.loc[len(df_financials.index)] = l_financials
break
l_fin = ["EPS (Basic)", "Net Income", "Interest Expense", "EBITDA"]
if not all(elem in df_financials["Item"].values for elem in l_fin):
return pd.DataFrame(), [], []
# Set item name as index
df_financials = df_financials.set_index("Item")
df_sean_seah = df_financials.loc[l_fin]
# From BALANCE SHEET, get: 'Liabilities & Shareholders\' Equity', 'Long-Term Debt'
url_financials = (
f"https://www.marketwatch.com/investing/stock/{symbol}/financials/balance-sheet"
)
text_soup_financials = BeautifulSoup(
requests.get(url_financials, headers={"User-Agent": get_user_agent()}).text,
"lxml",
)
# Define financials columns
a_financials_header = []
for financials_header in text_soup_financials.findAll(
"th", {"class": "overflow__heading"}
):
a_financials_header.append(financials_header.text.strip("\n").split("\n")[0])
s_header_end_trend = "5-year trend"
df_financials = pd.DataFrame(
columns=a_financials_header[0 : a_financials_header.index(s_header_end_trend)]
)
# Add financials values
soup_financials = text_soup_financials.findAll(
lambda tag: tag.name == "tr" and tag.get("class") == ["table__row"]
)
soup_financials += text_soup_financials.findAll(
"tr", {"class": "table__row is-highlighted"}
)
for financials_info in soup_financials:
financials_row = financials_info.text.split("\n")
if len(financials_row) > 5:
for item in financials_row:
if bool(re.search(r"\d", item)):
a_financials_info = financials_info.text.split("\n")
l_financials = [a_financials_info[2]]
l_financials.extend(a_financials_info[5:-2])
# Append data values to financials
df_financials.loc[len(df_financials.index)] = l_financials
break
# Set item name as index
df_financials = df_financials.set_index("Item")
# Create dataframe to compute meaningful metrics from sean seah book
transfer_cols = [
"Total Shareholders' Equity",
"Liabilities & Shareholders' Equity",
"Long-Term Debt",
]
df_sean_seah = pd.concat([df_sean_seah, df_financials.loc[transfer_cols]])
# Clean these metrics by parsing their values to float
df_sean_seah = df_sean_seah.applymap(lambda x: lambda_clean_data_values_to_float(x))
df_sean_seah = df_sean_seah.T
# Add additional necessary metrics
df_sean_seah["ROE"] = (
df_sean_seah["Net Income"] / df_sean_seah["Total Shareholders' Equity"]
)
df_sean_seah["Interest Coverage Ratio"] = (
df_sean_seah["EBITDA"] / df_sean_seah["Interest Expense"]
)
df_sean_seah["ROA"] = (
df_sean_seah["Net Income"] / df_sean_seah["Liabilities & Shareholders' Equity"]
)
df_sean_seah = df_sean_seah.sort_index()
df_sean_seah = df_sean_seah.T
n_warnings = 0
warnings = []
debugged_warnings = []
if np.any(df_sean_seah.loc["EPS (Basic)"].diff().dropna().values < 0):
warnings.append("No consistent historical earnings per share")
n_warnings += 1
if debug:
sa_eps = np.array2string(
df_sean_seah.loc["EPS (Basic)"].values,
formatter={"float_kind": lambda x: lambda_int_or_round_float(x)},
)
sa_growth = np.array2string(
df_sean_seah.loc["EPS (Basic)"].diff().dropna().values,
formatter={"float_kind": lambda x: lambda_int_or_round_float(x)},
)
debugged_warnings.append(f"\tEPS: {sa_eps}\n\tGrowth: {sa_growth} < 0")
if np.any(df_sean_seah.loc["ROE"].values < 0.15):
warnings.append("NOT consistently high return on equity")
n_warnings += 1
if debug:
sa_roe = np.array2string(
df_sean_seah.loc["ROE"].values,
formatter={"float_kind": lambda x: lambda_int_or_round_float(x)},
)
debugged_warnings.append(f"\tROE: {sa_roe} < 0.15")
if np.any(df_sean_seah.loc["ROA"].values < 0.07):
warnings.append("NOT consistently high return on assets")
n_warnings += 1
if debug:
sa_roa = np.array2string(
df_sean_seah.loc["ROA"].values,
formatter={"float_kind": lambda x: lambda_int_or_round_float(x)},
)
debugged_warnings.append(f"\tROA: {sa_roa} < 0.07")
if np.any(
df_sean_seah.loc["Long-Term Debt"].values
> 5 * df_sean_seah.loc["Net Income"].values
):
warnings.append("5x Net Income < Long-Term Debt")
n_warnings += 1
if debug:
sa_5_net_income = np.array2string(
5 * df_sean_seah.loc["Net Income"].values,
formatter={"float_kind": lambda x: lambda_int_or_round_float(x)},
)
sa_long_term_debt = np.array2string(
df_sean_seah.loc["Long-Term Debt"].values,
formatter={"float_kind": lambda x: lambda_int_or_round_float(x)},
)
debugged_warnings.append(
f"\t5x NET Income: {sa_5_net_income}\n\tlower than Long-Term Debt: {sa_long_term_debt}"
)
if np.any(df_sean_seah.loc["Interest Coverage Ratio"].values < 3):
warnings.append("Interest coverage ratio less than 3")
n_warnings += 1
if debug:
sa_interest_coverage_ratio = np.array2string(
100 * df_sean_seah.loc["Interest Coverage Ratio"].values,
formatter={"float_kind": lambda x: lambda_int_or_round_float(x)},
)
debugged_warnings.append(
f"\tInterest Coverage Ratio: {sa_interest_coverage_ratio} < 3"
)
return (
df_sean_seah.applymap(lambda x: lambda_int_or_round_float(x)),
warnings,
debugged_warnings,
) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/fundamental_analysis/market_watch_model.py | 0.749179 | 0.435241 | market_watch_model.py | pypi |
__docformat__ = "numpy"
import logging
import os
import matplotlib.pyplot as plt
import pandas as pd
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import check_api_key
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table, plot_autoscale
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.fundamental_analysis import fmp_model
from openbb_terminal.helpers_denomination import (
transform as transform_by_denomination,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_KEY_FINANCIALMODELINGPREP"])
def valinvest_score(symbol: str):
"""Value investing tool based on Warren Buffett, Joseph Piotroski and Benjamin Graham thoughts [Source: FMP]
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
"""
score = fmp_model.get_score(symbol)
if score:
console.print(f"Score: {score:.2f}".rstrip("0").rstrip(".") + " %")
@log_start_end(log=logger)
@check_api_key(["API_KEY_FINANCIALMODELINGPREP"])
def display_profile(symbol: str):
"""Financial Modeling Prep ticker profile
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
"""
profile = fmp_model.get_profile(symbol)
if not profile.empty:
print_rich_table(
profile.drop(index=["description", "image"]),
headers=[""],
title=f"{symbol.upper()} Profile",
show_index=True,
)
console.print(f"\nImage: {profile.loc['image'][0]}")
console.print(f"\nDescription: {profile.loc['description'][0]}")
else:
logger.error("Could not get data")
console.print("[red]Unable to get data[/red]\n")
console.print()
@log_start_end(log=logger)
@check_api_key(["API_KEY_FINANCIALMODELINGPREP"])
def display_quote(symbol: str):
"""Financial Modeling Prep ticker quote
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
"""
quote = fmp_model.get_quote(symbol)
if quote.empty:
console.print("[red]Data not found[/red]\n")
else:
print_rich_table(quote, headers=[""], title=f"{symbol} Quote", show_index=True)
@log_start_end(log=logger)
@check_api_key(["API_KEY_FINANCIALMODELINGPREP"])
def display_enterprise(
symbol: str, limit: int = 5, quarterly: bool = False, export: str = ""
):
"""Financial Modeling Prep ticker enterprise
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
limit: int
Number to get
quarterly: bool
Flag to get quarterly data
export: str
Format to export data
"""
df_fa = fmp_model.get_enterprise(symbol, limit, quarterly)
df_fa = df_fa[df_fa.columns[::-1]]
# Re-order the returned columns so they are in a more logical ordering
df_fa = df_fa.reindex(
[
"Symbol",
"Stock price",
"Number of shares",
"Market capitalization",
"Add total debt",
"Minus cash and cash equivalents",
"Enterprise value",
]
)
if df_fa.empty:
console.print("[red]No data available[/red]\n")
else:
print_rich_table(
df_fa,
headers=list(df_fa.columns),
title=f"{symbol} Enterprise",
show_index=True,
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "enterprise", df_fa
)
@log_start_end(log=logger)
@check_api_key(["API_KEY_FINANCIALMODELINGPREP"])
def display_discounted_cash_flow(
symbol: str, limit: int = 5, quarterly: bool = False, export: str = ""
):
"""Financial Modeling Prep ticker discounted cash flow
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
limit: int
Number to get
quarterly: bool
Flag to get quarterly data
export: str
Format to export data
"""
dcf = fmp_model.get_dcf(symbol, limit, quarterly)
dcf = dcf[dcf.columns[::-1]]
dcf.columns = dcf.iloc[0].values
dcf = dcf.drop("Date")
if dcf.empty:
console.print("[red]No data available[/red]\n")
else:
print_rich_table(dcf, title="Discounted Cash Flow", show_index=True)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "dcf", dcf)
@log_start_end(log=logger)
@check_api_key(["API_KEY_FINANCIALMODELINGPREP"])
def display_income_statement(
symbol: str,
limit: int = 5,
quarterly: bool = False,
ratios: bool = False,
plot: list = None,
export: str = "",
):
"""Financial Modeling Prep ticker income statement
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
limit: int
Number to get
quarterly: bool
Flag to get quarterly data
ratios: bool
Shows percentage change, by default False
plot: list
List of row labels to plot
export: str
Format to export data
"""
income = fmp_model.get_income(symbol, limit, quarterly, ratios, bool(plot))
if not income.empty:
if plot:
income_plot_data = income[income.columns[::-1]]
rows_plot = len(plot)
income_plot_data = income_plot_data.transpose()
income_plot_data.columns = income_plot_data.columns.str.lower()
if not ratios:
(df_rounded, denomination) = transform_by_denomination(income_plot_data)
if denomination == "Units":
denomination = ""
else:
df_rounded = income_plot_data
denomination = ""
if rows_plot == 1:
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
df_rounded[plot[0].replace("_", "")].plot()
title = (
f"{plot[0].replace('_', ' ').lower()} {'QoQ' if quarterly else 'YoY'} Growth of {symbol.upper()}"
if ratios
else f"{plot[0].replace('_', ' ')} of {symbol.upper()} {denomination}"
)
plt.title(title)
theme.style_primary_axis(ax)
theme.visualize_output()
else:
fig, axes = plt.subplots(rows_plot)
for i in range(rows_plot):
axes[i].plot(df_rounded[plot[i].replace("_", "")])
axes[i].set_title(f"{plot[i].replace('_', ' ')} {denomination}")
theme.style_primary_axis(axes[0])
fig.autofmt_xdate()
else:
income = income[income.columns[::-1]]
print_rich_table(
income.drop(index=["Final link", "Link"]),
headers=list(income.columns),
title=f"{symbol.upper()} Income Statement"
if not ratios
else f"{'QoQ' if quarterly else 'YoY'} Change of {symbol.upper()} Income Statement",
show_index=True,
)
pd.set_option("display.max_colwidth", None)
console.print(income.loc["Final link"].to_frame().to_string())
console.print()
console.print(income.loc["Link"].to_frame().to_string())
console.print()
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "income", income
)
else:
logger.error("Could not get data")
console.print("[red]Could not get data[/red]\n")
@log_start_end(log=logger)
@check_api_key(["API_KEY_FINANCIALMODELINGPREP"])
def display_balance_sheet(
symbol: str,
limit: int = 5,
quarterly: bool = False,
ratios: bool = False,
plot: list = None,
export: str = "",
):
"""Financial Modeling Prep ticker balance sheet
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
limit: int
Number to get
quarterly: bool
Flag to get quarterly data
ratios: bool
Shows percentage change, by default False
plot: list
List of row labels to plot
export: str
Format to export data
"""
balance = fmp_model.get_balance(symbol, limit, quarterly, ratios, bool(plot))
if not balance.empty:
if plot:
balance_plot_data = balance[balance.columns[::-1]]
rows_plot = len(plot)
balance_plot_data = balance_plot_data.transpose()
balance_plot_data.columns = balance_plot_data.columns.str.lower()
if not ratios:
(df_rounded, denomination) = transform_by_denomination(
balance_plot_data
)
if denomination == "Units":
denomination = ""
else:
df_rounded = balance_plot_data
denomination = ""
if rows_plot == 1:
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
df_rounded[plot[0].replace("_", "")].plot()
title = (
f"{plot[0].replace('_', ' ').lower()} {'QoQ' if quarterly else 'YoY'} Growth of {symbol.upper()}"
if ratios
else f"{plot[0].replace('_', ' ')} of {symbol.upper()} {denomination}"
)
plt.title(title)
theme.style_primary_axis(ax)
theme.visualize_output()
else:
fig, axes = plt.subplots(rows_plot)
for i in range(rows_plot):
axes[i].plot(df_rounded[plot[i].replace("_", "")])
axes[i].set_title(f"{plot[i].replace('_', ' ')} {denomination}")
theme.style_primary_axis(axes[0])
fig.autofmt_xdate()
else:
balance = balance[balance.columns[::-1]]
print_rich_table(
balance.drop(index=["Final link", "Link"]),
headers=list(balance.columns),
title=f"{symbol.upper()} Balance Sheet",
show_index=True,
)
pd.set_option("display.max_colwidth", None)
console.print(balance.loc["Final link"].to_frame().to_string())
console.print()
console.print(balance.loc["Link"].to_frame().to_string())
console.print()
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "balance", balance
)
else:
logger.error("Could not get data")
console.print("[red]Could not get data[/red]\n")
@log_start_end(log=logger)
@check_api_key(["API_KEY_FINANCIALMODELINGPREP"])
def display_cash_flow(
symbol: str,
limit: int = 5,
quarterly: bool = False,
ratios: bool = False,
plot: list = None,
export: str = "",
):
"""Financial Modeling Prep ticker cash flow
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
limit: int
Number to get
quarterly: bool
Flag to get quarterly data
ratios: bool
Shows percentage change, by default False
plot: list
List of row labels to plot
export: str
Format to export data
"""
cash = fmp_model.get_cash(symbol, limit, quarterly, ratios, bool(plot))
if not cash.empty:
if plot:
cash_plot_data = cash[cash.columns[::-1]]
rows_plot = len(plot)
cash_plot_data = cash_plot_data.transpose()
cash_plot_data.columns = cash_plot_data.columns.str.lower()
if not ratios:
(df_rounded, denomination) = transform_by_denomination(cash_plot_data)
if denomination == "Units":
denomination = ""
else:
df_rounded = cash_plot_data
denomination = ""
if rows_plot == 1:
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
df_rounded[plot[0].replace("_", "")].plot()
title = (
f"{plot[0].replace('_', ' ').lower()} {'QoQ' if quarterly else 'YoY'} Growth of {symbol.upper()}"
if ratios
else f"{plot[0].replace('_', ' ')} of {symbol.upper()} {denomination}"
)
plt.title(title)
theme.style_primary_axis(ax)
theme.visualize_output()
else:
fig, axes = plt.subplots(rows_plot)
for i in range(rows_plot):
axes[i].plot(df_rounded[plot[i].replace("_", "")])
axes[i].set_title(f"{plot[i].replace('_', ' ')} {denomination}")
theme.style_primary_axis(axes[0])
fig.autofmt_xdate()
else:
cash = cash[cash.columns[::-1]]
print_rich_table(
cash.drop(index=["Final link", "Link"]),
headers=list(cash.columns),
title=f"{symbol.upper()} Cash Flow",
show_index=True,
)
pd.set_option("display.max_colwidth", None)
console.print(cash.loc["Final link"].to_frame().to_string())
console.print()
console.print(cash.loc["Link"].to_frame().to_string())
console.print()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "cash", cash)
else:
logger.error("Could not get data")
console.print("[red]Could not get data[/red]\n")
@log_start_end(log=logger)
@check_api_key(["API_KEY_FINANCIALMODELINGPREP"])
def display_key_metrics(
symbol: str, limit: int = 5, quarterly: bool = False, export: str = ""
):
"""Financial Modeling Prep ticker key metrics
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
limit: int
Number to get
quarterly: bool
Flag to get quarterly data
export: str
Format to export data
"""
key_metrics = fmp_model.get_key_metrics(symbol, limit, quarterly)
if not key_metrics.empty:
key_metrics = key_metrics[key_metrics.columns[::-1]]
print_rich_table(
key_metrics,
headers=list(key_metrics.columns),
title=f"{symbol.upper()} Key Metrics",
show_index=True,
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "metrics", key_metrics
)
else:
logger.error("Could not get data")
console.print("[red]Could not get data[/red]\n")
@log_start_end(log=logger)
@check_api_key(["API_KEY_FINANCIALMODELINGPREP"])
def display_financial_ratios(
symbol: str, limit: int = 5, quarterly: bool = False, export: str = ""
):
"""Financial Modeling Prep ticker ratios
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
limit: int
Number to get
quarterly: bool
Flag to get quarterly data
export: str
Format to export data
"""
ratios = fmp_model.get_key_ratios(symbol, limit, quarterly)
if not ratios.empty:
ratios = ratios[ratios.columns[::-1]]
print_rich_table(
ratios,
headers=list(ratios.columns),
title=f"{symbol.upper()} Ratios",
show_index=True,
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "grratiosowth", ratios
)
else:
logger.error("Could not get data")
console.print("[red]Could not get data[/red]\n")
@log_start_end(log=logger)
@check_api_key(["API_KEY_FINANCIALMODELINGPREP"])
def display_financial_statement_growth(
symbol: str, limit: int = 5, quarterly: bool = False, export: str = ""
):
"""Financial Modeling Prep ticker growth
Parameters
----------
symbol : str
Fundamental analysis ticker symbol
limit: int
Number to get
quarterly: bool
Flag to get quarterly data
export: str
Format to export data
"""
growth = fmp_model.get_financial_growth(symbol, limit, quarterly)
if not growth.empty:
print_rich_table(
growth,
headers=list(growth.columns),
title=f"{symbol.upper()} Growth",
show_index=True,
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "growth", growth
)
else:
logger.error("Could not get data")
console.print("[red]Could not get data[/red]\n") | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/fundamental_analysis/fmp_view.py | 0.577257 | 0.221256 | fmp_view.py | pypi |
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.government import quiverquant_model
# pylint: disable=C0302
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_last_government(
gov_type: str = "congress",
limit: int = 5,
representative: str = "",
export: str = "",
):
"""Display last government trading [Source: quiverquant.com]
Parameters
----------
gov_type: str
Type of government data between: congress, senate and house
limit: int
Number of days to look back
representative: str
Specific representative to look at
export: str
Format to export data
"""
df_gov = quiverquant_model.get_last_government(gov_type, limit, representative)
if df_gov.empty:
if representative:
console.print(
f"No representative {representative} found in the past {limit}"
f" days. The following are available: "
f"{', '.join(df_gov['Representative'].str.split().str[0].unique())}"
)
else:
console.print(f"No {gov_type} trading data found\n")
return
console.print(f"\nLast transactions for {gov_type.upper()}\n")
print_rich_table(
df_gov,
headers=list(df_gov.columns),
show_index=False,
title="Representative Trading",
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "lasttrades", df_gov
)
@log_start_end(log=logger)
def display_government_buys(
gov_type: str = "congress",
past_transactions_months: int = 6,
limit: int = 10,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Top buy government trading [Source: quiverquant.com]
Parameters
----------
gov_type: str
Type of government data between: congress, senate and house
past_transactions_months: int
Number of months to get trading for
limit: int
Number of tickers to show
raw: bool
Display raw data
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_gov = quiverquant_model.get_government_buys(gov_type, past_transactions_months)
if df_gov.empty:
console.print(f"No {gov_type} trading data found\n")
return
if raw:
df = pd.DataFrame(
df_gov.groupby("Ticker")["upper"]
.sum()
.div(1000)
.sort_values(ascending=False)
.head(n=limit)
)
print_rich_table(
df, headers=["Amount ($1k)"], show_index=True, title="Top Government Buys"
)
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
colors = theme.get_colors()
df_gov.groupby("Ticker")["upper"].sum().div(1000).sort_values(ascending=False).head(
n=limit
).plot(kind="bar", rot=0, ax=ax, color=colors)
ax.set_ylabel("Amount [1k $]")
ax.set_title(
f"{gov_type.upper()}'s top {limit} purchased stocks (upper) in last {past_transactions_months} months"
)
# plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False)
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "topbuys", df_gov)
@log_start_end(log=logger)
def display_government_sells(
gov_type: str = "congress",
past_transactions_months: int = 6,
limit: int = 10,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Top sell government trading [Source: quiverquant.com]
Parameters
----------
gov_type: str
Type of government data between: congress, senate and house
past_transactions_months: int
Number of months to get trading for
limit: int
Number of tickers to show
raw: bool
Display raw data
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_gov = quiverquant_model.get_government_sells(gov_type, past_transactions_months)
if df_gov.empty:
console.print(f"No {gov_type} trading data found\n")
return
if raw:
df = pd.DataFrame(
df_gov.groupby("Ticker")["upper"]
.sum()
.div(1000)
.sort_values(ascending=True)
.abs()
.head(n=limit)
)
print_rich_table(
df,
headers=["Amount ($1k)"],
show_index=True,
title="Top Government Trades",
)
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
colors = theme.get_colors()
df_gov.groupby("Ticker")["upper"].sum().div(1000).sort_values().abs().head(
n=limit
).plot(kind="bar", rot=0, ax=ax, color=colors)
ax.set_ylabel("Amount ($1k)")
ax.set_title(
f"{limit} most sold stocks over last {past_transactions_months} months"
f" (upper bound) for {gov_type}"
)
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "topsells", df_gov)
@log_start_end(log=logger)
def display_last_contracts(
past_transaction_days: int = 2,
limit: int = 20,
sum_contracts: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Last government contracts [Source: quiverquant.com]
Parameters
----------
past_transaction_days: int
Number of days to look back
limit: int
Number of contracts to show
sum_contracts: bool
Flag to show total amount of contracts given out.
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = quiverquant_model.get_last_contracts(past_transaction_days)
if df.empty:
return
print_rich_table(
df[:limit],
headers=list(df.columns),
show_index=False,
title="Last Government Contracts",
)
if sum_contracts:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
df["Date"] = pd.to_datetime(df["Date"]).dt.date
df.groupby("Date").sum().div(1000).plot(kind="bar", rot=0, ax=ax)
ax.set_ylabel("Amount ($1k)")
ax.set_title("Total amount of government contracts given")
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "lastcontracts", df)
@log_start_end(log=logger)
def plot_government(
government: pd.DataFrame,
symbol: str,
gov_type: str,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Helper for plotting government trading
Parameters
----------
government: pd.DataFrame
Data to plot
symbol: str
Ticker symbol to plot government trading
gov_type: str
Type of government data between: congress, senate and house
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.fill_between(
government["TransactionDate"].unique(),
government.groupby("TransactionDate")["lower"].sum().values / 1000,
government.groupby("TransactionDate")["upper"].sum().values / 1000,
)
ax.set_xlim(
[
government["TransactionDate"].values[0],
government["TransactionDate"].values[-1],
]
)
ax.set_title(f"{gov_type.capitalize()} trading on {symbol}")
ax.set_ylabel("Amount ($1k)")
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
@log_start_end(log=logger)
def display_government_trading(
symbol: str,
gov_type: str = "congress",
past_transactions_months: int = 6,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Government trading for specific ticker [Source: quiverquant.com]
Parameters
----------
symbol: str
Ticker symbol to get congress trading data from
gov_type: str
Type of government data between: congress, senate and house
past_transactions_months: int
Number of months to get transactions for
raw: bool
Show raw output of trades
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_gov = quiverquant_model.get_cleaned_government_trading(
symbol=symbol,
gov_type=gov_type,
past_transactions_months=past_transactions_months,
)
if df_gov.empty:
console.print(f"No {gov_type} trading data found\n")
return
if raw:
print_rich_table(
df_gov,
headers=list(df_gov.columns),
show_index=False,
title=f"Government Trading for {symbol.upper()}",
)
else:
plot_government(df_gov, symbol, gov_type, external_axes)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "gtrades", df_gov)
@log_start_end(log=logger)
def display_contracts(
symbol: str,
past_transaction_days: int = 10,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Show government contracts for ticker [Source: quiverquant.com]
Parameters
----------
symbol: str
Ticker to get congress trading data from
past_transaction_days: int
Number of days to get transactions for
raw: bool
Flag to display raw data
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_contracts = quiverquant_model.get_contracts(symbol, past_transaction_days)
if df_contracts.empty:
return
if raw:
print_rich_table(
df_contracts,
headers=list(df_contracts.columns),
show_index=False,
title=f"Government Contracts for {symbol.upper()}",
)
if df_contracts.Amount.abs().sum() != 0:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
df_contracts.groupby("Date").sum(numeric_only=True).div(1000).plot(
kind="bar", rot=0, ax=ax
)
ax.set_ylabel("Amount ($1k)")
ax.set_title(f"Sum of latest government contracts to {symbol}")
ax.xaxis.set_major_locator(matplotlib.ticker.MultipleLocator(4))
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
if df_contracts.Amount.abs().sum() == 0:
console.print("Contracts found, but they are all equal to $0.00.\n")
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "contracts", df_contracts
)
@log_start_end(log=logger)
def display_qtr_contracts(
analysis: str = "total",
limit: int = 5,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Quarterly contracts [Source: quiverquant.com]
Parameters
----------
analysis: str
Analysis to perform. Either 'total', 'upmom' 'downmom'
limit: int
Number to show
raw: bool
Flag to display raw data
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
symbols = quiverquant_model.get_qtr_contracts(analysis, limit)
if symbols.empty:
return
if analysis in ("upmom", "downmom"):
if raw:
print_rich_table(
pd.DataFrame(symbols.values),
headers=["symbols"],
show_index=True,
title="Quarterly Contracts",
)
else:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
max_amount = 0
quarter_ticks = []
df_contracts = quiverquant_model.get_government_trading("quarter-contracts")
for symbol in symbols:
amounts = (
df_contracts[df_contracts["Ticker"] == symbol]
.sort_values(by=["Year", "Qtr"])["Amount"]
.values
)
qtr = (
df_contracts[df_contracts["Ticker"] == symbol]
.sort_values(by=["Year", "Qtr"])["Qtr"]
.values
)
year = (
df_contracts[df_contracts["Ticker"] == symbol]
.sort_values(by=["Year", "Qtr"])["Year"]
.values
)
ax.plot(
np.arange(0, len(amounts)), amounts / 1_000_000, "-*", lw=2, ms=15
)
if len(amounts) > max_amount:
max_amount = len(amounts)
quarter_ticks = [
f"{quarter[0]} - Q{quarter[1]} " for quarter in zip(year, qtr)
]
ax.set_xlim([-0.5, max_amount - 0.5])
ax.set_xticks(np.arange(0, max_amount))
ax.set_xticklabels(quarter_ticks)
ax.legend(symbols)
titles = {
"upmom": "Highest increasing quarterly Government Contracts",
"downmom": "Highest decreasing quarterly Government Contracts",
}
ax.set_title(titles[analysis])
ax.set_ylabel("Amount ($1M)")
if not external_axes:
theme.visualize_output()
elif analysis == "total":
print_rich_table(
symbols, headers=["Total"], title="Quarterly Contracts", show_index=True
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "qtrcontracts", symbols
)
@log_start_end(log=logger)
def display_hist_contracts(
symbol: str,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Show historical quarterly government contracts [Source: quiverquant.com]
Parameters
----------
symbol: str
Ticker symbol to get congress trading data from
raw: bool
Flag to display raw data
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_contracts = quiverquant_model.get_hist_contracts(symbol)
if df_contracts.empty:
return
if raw:
print_rich_table(
df_contracts,
headers=list(df_contracts.columns),
floatfmt=[".0f", ".0f", ".2f"],
title="Historical Quarterly Government Contracts",
)
else:
amounts = df_contracts.sort_values(by=["Year", "Qtr"])["Amount"].values
qtr = df_contracts.sort_values(by=["Year", "Qtr"])["Qtr"].values
year = df_contracts.sort_values(by=["Year", "Qtr"])["Year"].values
quarter_ticks = [
f"{quarter[0]}" if quarter[1] == 1 else "" for quarter in zip(year, qtr)
]
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(
np.arange(0, len(amounts)),
amounts / 1000,
marker=".",
markerfacecolor=theme.down_color,
lw=2,
ms=15,
)
ax.set_xlim([-0.5, len(amounts) - 0.5])
ax.set_xticks(np.arange(0, len(amounts)))
ax.set_xticklabels(quarter_ticks)
ax.set_title(f"Historical Quarterly Government Contracts for {symbol.upper()}")
ax.set_ylabel("Amount ($1k)")
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "histcont")
@log_start_end(log=logger)
def display_top_lobbying(
limit: int = 10,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Top lobbying tickers based on total spent
Parameters
----------
limit: int
Number of tickers to show
raw: bool
Show raw data
export:
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_lobbying = quiverquant_model.get_top_lobbying()
if df_lobbying.empty:
return
df_lobbying["Amount"] = df_lobbying.Amount.astype(float).fillna(0) / 100_000
lobbying_by_ticker = pd.DataFrame(
df_lobbying.groupby("Ticker")["Amount"].agg("sum")
).sort_values(by="Amount", ascending=False)
if raw:
print_rich_table(
lobbying_by_ticker.head(limit),
headers=["Amount ($100k)"],
show_index=True,
title="Top Lobbying Tickers",
)
else:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
colors = theme.get_colors()
lobbying_by_ticker.head(limit).plot(kind="bar", ax=ax, color=colors)
ax.set_xlabel("Ticker")
ax.set_ylabel("Total Amount ($100k)")
ax.set_title(f"Corporate Lobbying Spent since {df_lobbying['Date'].min()}")
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "lobbying", df_lobbying
)
@log_start_end(log=logger)
def display_lobbying(symbol: str, limit: int = 10):
"""Corporate lobbying details
Parameters
----------
symbol: str
Ticker symbol to get corporate lobbying data from
limit: int
Number of events to show
"""
df_lobbying = quiverquant_model.get_lobbying(symbol, limit)
if df_lobbying.empty:
return
for _, row in df_lobbying.iterrows():
amount = (
"$" + str(int(float(row["Amount"]))) if row["Amount"] is not None else "N/A"
)
console.print(f"{row['Date']}: {row['Client']} {amount}")
if (row["Amount"] is not None) and (row["Specific_Issue"] is not None):
console.print(
"\t" + row["Specific_Issue"].replace("\n", " ").replace("\r", "")
)
console.print("")
console.print("") | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/government/quiverquant_view.py | 0.823293 | 0.368917 | quiverquant_view.py | pypi |
__docformat__ = "numpy"
# Provided by Quiverquant guys to GST users
import logging
import textwrap
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import requests
from sklearn.linear_model import LinearRegression
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
API_QUIVERQUANT_KEY = (
"5cd2a65e96d0486efbe926a7cdbc1e8d8ab6c7b3" # pragma: allowlist secret
)
@log_start_end(log=logger)
def get_government_trading(
gov_type: str = "congress", symbol: str = ""
) -> pd.DataFrame:
"""Returns the most recent transactions by members of government
Parameters
----------
gov_type: str
Type of government data between:
'congress', 'senate', 'house', 'contracts', 'quarter-contracts' and 'corporate-lobbying'
symbol : str
Ticker symbol to get congress trading data from
Returns
-------
pd.DataFrame
Most recent transactions by members of U.S. Congress
"""
if gov_type == "congress":
if symbol:
url = (
f"https://api.quiverquant.com/beta/historical/congresstrading/{symbol}"
)
else:
url = "https://api.quiverquant.com/beta/live/congresstrading"
elif gov_type.lower() == "senate":
if symbol:
url = f"https://api.quiverquant.com/beta/historical/senatetrading/{symbol}"
else:
url = "https://api.quiverquant.com/beta/live/senatetrading"
elif gov_type.lower() == "house":
if symbol:
url = f"https://api.quiverquant.com/beta/historical/housetrading/{symbol}"
else:
url = "https://api.quiverquant.com/beta/live/housetrading"
elif gov_type.lower() == "contracts":
if symbol:
url = (
f"https://api.quiverquant.com/beta/historical/govcontractsall/{symbol}"
)
else:
url = "https://api.quiverquant.com/beta/live/govcontractsall"
elif gov_type.lower() == "quarter-contracts":
if symbol:
url = f"https://api.quiverquant.com/beta/historical/govcontracts/{symbol}"
else:
url = "https://api.quiverquant.com/beta/live/govcontracts"
elif gov_type.lower() == "corporate-lobbying":
if symbol:
url = f"https://api.quiverquant.com/beta/historical/lobbying/{symbol}"
else:
url = "https://api.quiverquant.com/beta/live/lobbying"
else:
return pd.DataFrame()
headers = {
"accept": "application/json",
"X-CSRFToken": "TyTJwjuEC7VV7mOqZ622haRaaUr0x0Ng4nrwSRFKQs7vdoBcJlK9qjAS69ghzhFu", # pragma: allowlist secret
"Authorization": f"Token {API_QUIVERQUANT_KEY}",
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
if gov_type in ["congress", "senate", "house"]:
return pd.DataFrame(response.json()).rename(
columns={"Date": "TransactionDate", "Senator": "Representative"}
)
return pd.DataFrame(response.json())
return pd.DataFrame()
@log_start_end(log=logger)
def get_contracts(
symbol: str,
past_transaction_days: int = 10,
) -> pd.DataFrame:
"""Get government contracts for ticker [Source: quiverquant.com]
Parameters
----------
symbol: str
Ticker to get congress trading data from
past_transaction_days: int
Number of days to get transactions for
Returns
-------
pd.DataFrame
Most recent transactions by members of U.S. Congress
"""
df_contracts = get_government_trading("contracts", symbol)
if df_contracts.empty:
console.print("No government contracts found\n")
return pd.DataFrame()
df_contracts["Date"] = pd.to_datetime(df_contracts["Date"]).dt.date
df_contracts = df_contracts[
df_contracts["Date"].isin(df_contracts["Date"].unique()[:past_transaction_days])
]
df_contracts.drop_duplicates(inplace=True)
return df_contracts
@log_start_end(log=logger)
def get_hist_contracts(
symbol: str,
) -> pd.DataFrame:
"""Get historical quarterly government contracts [Source: quiverquant.com]
Parameters
----------
symbol: str
Ticker symbol to get congress trading data from
Returns
-------
pd.DataFrame
Historical quarterly government contracts
"""
df_contracts = get_government_trading("quarter-contracts", symbol=symbol)
if df_contracts.empty:
console.print("No quarterly government contracts found\n")
return pd.DataFrame()
return df_contracts
@log_start_end(log=logger)
def get_last_government(
gov_type: str = "congress", limit: int = -1, representative: str = ""
) -> pd.DataFrame:
"""Get last government trading [Source: quiverquant.com]
Parameters
----------
gov_type: str
Type of government data between: congress, senate and house
limit: int
Number of days to look back
representative: str
Specific representative to look at
Returns
-------
pd.DataFrame
Last government trading
"""
df_gov = get_government_trading(gov_type)
if df_gov.empty:
return pd.DataFrame()
df_gov = df_gov[
df_gov["TransactionDate"].isin(df_gov["TransactionDate"].unique()[:limit])
]
if gov_type == "congress":
df_gov = df_gov[
[
"TransactionDate",
"Ticker",
"Representative",
"Transaction",
"Range",
"House",
"ReportDate",
]
].rename(
columns={
"TransactionDate": "Transaction Date",
"ReportDate": "Report Date",
}
)
else:
df_gov = df_gov[
[
"TransactionDate",
"Ticker",
"Representative",
"Transaction",
"Range",
]
].rename(columns={"TransactionDate": "Transaction Date"})
if representative:
df_gov = df_gov[df_gov["Representative"].str.split().str[0] == representative]
return df_gov
@log_start_end(log=logger)
def get_government_buys(
gov_type: str = "congress",
past_transactions_months: int = 6,
) -> pd.DataFrame:
"""Get top buy government trading [Source: quiverquant.com]
Parameters
----------
gov_type: str
Type of government data between: congress, senate and house
past_transactions_months: int
Number of months to get trading for
Returns
-------
pd.DataFrame
DataFrame of top government buy trading
"""
df_gov = get_government_trading(gov_type)
if df_gov.empty:
return pd.DataFrame()
df_gov = df_gov.sort_values("TransactionDate", ascending=False)
start_date = datetime.now() - timedelta(days=past_transactions_months * 30)
df_gov["TransactionDate"] = pd.to_datetime(df_gov["TransactionDate"])
df_gov = df_gov[df_gov["TransactionDate"] > start_date].dropna(axis=1)
# Catch bug where error shown for purchase of >5,000,000
df_gov["Range"] = df_gov["Range"].apply(
lambda x: "$5,000,001-$5,000,001" if x == ">$5,000,000" else x
)
df_gov["min"] = df_gov["Range"].apply(
lambda x: x.split("-")[0].strip("$").replace(",", "").strip()
)
df_gov["max"] = df_gov["Range"].apply(
lambda x: x.split("-")[1].replace(",", "").strip().strip("$")
if "-" in x
else x.strip("$").replace(",", "")
)
df_gov["lower"] = df_gov[["min", "max", "Transaction"]].apply(
lambda x: float(x["min"])
if x["Transaction"] == "Purchase"
else -float(x["max"]),
axis=1,
)
df_gov["upper"] = df_gov[["min", "max", "Transaction"]].apply(
lambda x: float(x["max"])
if x["Transaction"] == "Purchase"
else -float(x["min"]),
axis=1,
)
df_gov = df_gov.sort_values("TransactionDate", ascending=True)
return df_gov
@log_start_end(log=logger)
def get_government_sells(
gov_type: str = "congress",
past_transactions_months: int = 6,
) -> pd.DataFrame:
"""Get top sell government trading [Source: quiverquant.com]
Parameters
----------
gov_type: str
Type of government data between: congress, senate and house
past_transactions_months: int
Number of months to get trading for
Returns
-------
pd.DataFrame
DataFrame of top government sell trading
"""
df_gov = get_government_trading(gov_type)
if df_gov.empty:
return pd.DataFrame()
df_gov = df_gov.sort_values("TransactionDate", ascending=False)
start_date = datetime.now() - timedelta(days=past_transactions_months * 30)
df_gov["TransactionDate"] = pd.to_datetime(df_gov["TransactionDate"])
df_gov = df_gov[df_gov["TransactionDate"] > start_date].dropna()
df_gov["Range"] = df_gov["Range"].apply(
lambda x: "$5,000,001-$5,000,001" if x == ">$5,000,000" else x
)
df_gov["min"] = df_gov["Range"].apply(
lambda x: x.split("-")[0]
.strip("$")
.replace(",", "")
.strip()
.replace(">$", "")
.strip()
)
df_gov["max"] = df_gov["Range"].apply(
lambda x: x.split("-")[1]
.replace(",", "")
.strip()
.strip("$")
.replace(">$", "")
.strip()
if "-" in x
else x.strip("$").replace(",", "").replace(">$", "").strip()
)
df_gov["lower"] = df_gov[["min", "max", "Transaction"]].apply(
lambda x: float(x["min"])
if x["Transaction"] == "Purchase"
else -float(x["max"]),
axis=1,
)
df_gov["upper"] = df_gov[["min", "max", "Transaction"]].apply(
lambda x: float(x["max"])
if x["Transaction"] == "Purchase"
else -float(x["min"]),
axis=1,
)
df_gov = df_gov.sort_values("TransactionDate", ascending=True)
return df_gov
@log_start_end(log=logger)
def get_top_lobbying() -> pd.DataFrame:
"""Corporate lobbying details
Returns
-------
pd.DataFrame
DataFrame of top corporate lobbying
"""
df_lobbying = get_government_trading("corporate-lobbying")
if df_lobbying.empty:
console.print("No corporate lobbying found\n")
return pd.DataFrame()
return df_lobbying
@log_start_end(log=logger)
def get_last_contracts(
past_transaction_days: int = 2,
) -> pd.DataFrame:
"""Get last government contracts [Source: quiverquant.com]
Parameters
----------
past_transaction_days: int
Number of days to look back
Returns
-------
pd.DataFrame
DataFrame of government contracts
"""
df_contracts = get_government_trading("contracts")
if df_contracts.empty:
console.print("No government contracts found\n")
return pd.DataFrame()
df_contracts.sort_values("Date", ascending=False)
df_contracts["Date"] = pd.to_datetime(df_contracts["Date"])
df_contracts.drop_duplicates(inplace=True)
df_contracts = df_contracts[
df_contracts["Date"].isin(df_contracts["Date"].unique()[:past_transaction_days])
]
df_contracts = df_contracts[["Date", "Ticker", "Amount", "Description", "Agency"]]
df_contracts["Description"] = df_contracts["Description"].apply(
lambda x: "\n".join(textwrap.wrap(x, 50))
)
return df_contracts
def get_cleaned_government_trading(
symbol: str,
gov_type: str = "congress",
past_transactions_months: int = 6,
) -> pd.DataFrame:
"""Government trading for specific ticker [Source: quiverquant.com]
Parameters
----------
symbol: str
Ticker symbol to get congress trading data from
gov_type: str
Type of government data between: congress, senate and house
past_transactions_months: int
Number of months to get transactions for
Returns
-------
pd.DataFrame
DataFrame of tickers government trading
"""
df_gov = get_government_trading(gov_type, symbol)
if df_gov.empty:
return pd.DataFrame()
df_gov = df_gov.sort_values("TransactionDate", ascending=False)
start_date = datetime.now() - timedelta(days=past_transactions_months * 30)
df_gov["TransactionDate"] = pd.to_datetime(df_gov["TransactionDate"])
df_gov = df_gov[df_gov["TransactionDate"] > start_date]
if df_gov.empty:
console.print(f"No recent {gov_type} trading data found\n")
return pd.DataFrame()
df_gov["min"] = df_gov["Range"].apply(
lambda x: x.split("-")[0].strip("$").replace(",", "").strip()
)
df_gov["max"] = df_gov["Range"].apply(
lambda x: x.split("-")[1].replace(",", "").strip().strip("$")
if "-" in x
else x.strip("$").replace(",", "").split("\n")[0]
)
df_gov["lower"] = df_gov[["min", "max", "Transaction"]].apply(
lambda x: int(float(x["min"]))
if x["Transaction"] == "Purchase"
else -int(float(x["max"])),
axis=1,
)
df_gov["upper"] = df_gov[["min", "max", "Transaction"]].apply(
lambda x: int(float(x["max"]))
if x["Transaction"] == "Purchase"
else -1 * int(float(x["min"])),
axis=1,
)
df_gov = df_gov.sort_values("TransactionDate", ascending=True)
return df_gov
@log_start_end(log=logger)
def get_qtr_contracts(analysis: str = "total", limit: int = 5) -> pd.DataFrame:
"""Analyzes quarterly contracts by ticker
Parameters
----------
analysis : str
How to analyze. Either gives total amount or sorts by high/low momentum.
limit : int, optional
Number to return, by default 5
Returns
-------
pd.DataFrame
Dataframe with tickers and total amount if total selected.
"""
df_contracts = get_government_trading("quarter-contracts")
if df_contracts.empty:
console.print("No quarterly government contracts found\n")
return pd.DataFrame()
if analysis == "total":
df_groups = (
df_contracts.groupby("Ticker")["Amount"].sum().sort_values(ascending=False)
)
return pd.DataFrame(df_groups[:limit])
if analysis in {"upmom", "downmom"}:
df_coef = pd.DataFrame(columns=["Ticker", "Coef"])
df_groups = df_contracts.groupby("Ticker")
for tick, data in df_groups:
regr = LinearRegression()
amounts = data.sort_values(by=["Year", "Qtr"])["Amount"].values
# Train the model using the training sets
regr.fit(np.arange(0, len(amounts)).reshape(-1, 1), amounts)
df_coef = df_coef.append(
{"Ticker": tick, "Coef": regr.coef_[0]}, ignore_index=True
)
return df_coef.sort_values(by=["Coef"], ascending=analysis == "downmom")[
"Ticker"
][:limit]
return pd.DataFrame()
@log_start_end(log=logger)
def get_lobbying(symbol: str, limit: int = 10) -> pd.DataFrame:
"""Corporate lobbying details
Parameters
----------
symbol: str
Ticker symbol to get corporate lobbying data from
limit: int
Number of events to show
Returns
-------
pd.DataFrame
Dataframe with corporate lobbying data
"""
df_lobbying = get_government_trading("corporate-lobbying", symbol=symbol)
if df_lobbying.empty:
console.print("No corporate lobbying found\n")
return pd.DataFrame()
df_lobbying.sort_values(by=["Date"], ascending=False)
return df_lobbying.head(limit) | /repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/stocks/government/quiverquant_model.py | 0.676299 | 0.275824 | quiverquant_model.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.