id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
19,576 | import fire
import pandas as pd
import pathlib
import qlib
import logging
from ...data import D
from ...log import get_module_logger
from ...utils import get_pre_trading_date, is_tradable_date
from ..evaluate import risk_analysis
from ..backtest.backtest import update_account
from .manager import UserManager
from .utils import prepare
from .utils import create_user_folder
from .executor import load_order_list, save_order_list
from .executor import SimulatorExecutor
from .executor import save_score_series, load_score_series
class Operator:
def __init__(self, client: str):
"""
Parameters
----------
client: str
The qlib client config file(.yaml)
"""
self.logger = get_module_logger("online operator", level=logging.INFO)
self.client = client
def init(client, path, date=None):
"""Initial UserManager(), get predict date and trade date
Parameters
----------
client: str
The qlib client config file(.yaml)
path : str
Path to save user account.
date : str (YYYY-MM-DD)
Trade date, when the generated order list will be traded.
Return
----------
um: UserManager()
pred_date: pd.Timestamp
trade_date: pd.Timestamp
"""
qlib.init_from_yaml_conf(client)
um = UserManager(user_data_path=pathlib.Path(path))
um.load_users()
if not date:
trade_date, pred_date = None, None
else:
trade_date = pd.Timestamp(date)
if not is_tradable_date(trade_date):
raise ValueError("trade date is not tradable date".format(trade_date.date()))
pred_date = get_pre_trading_date(trade_date, future=True)
return um, pred_date, trade_date
def add_user(self, id, config, path, date):
"""Add a new user into the a folder to run 'online' module.
Parameters
----------
id : str
User id, should be unique.
config : str
The file path (yaml) of user config
path : str
Path to save user account.
date : str (YYYY-MM-DD)
The date that user account was added.
"""
create_user_folder(path)
qlib.init_from_yaml_conf(self.client)
um = UserManager(user_data_path=path)
add_date = D.calendar(end_time=date)[-1]
if not is_tradable_date(add_date):
raise ValueError("add date is not tradable date".format(add_date.date()))
um.add_user(user_id=id, config_file=config, add_date=add_date)
def remove_user(self, id, path):
"""Remove user from folder used in 'online' module.
Parameters
----------
id : str
User id, should be unique.
path : str
Path to save user account.
"""
um = UserManager(user_data_path=path)
um.remove_user(user_id=id)
def generate(self, date, path):
"""Generate order list that will be traded at 'date'.
Parameters
----------
date : str (YYYY-MM-DD)
Trade date, when the generated order list will be traded.
path : str
Path to save user account.
"""
um, pred_date, trade_date = self.init(self.client, path, date)
for user_id, user in um.users.items():
dates, trade_exchange = prepare(um, pred_date, user_id)
# get and save the score at predict date
input_data = user.model.get_data_with_date(pred_date)
score_series = user.model.predict(input_data)
save_score_series(score_series, (pathlib.Path(path) / user_id), trade_date)
# update strategy (and model)
user.strategy.update(score_series, pred_date, trade_date)
# generate and save order list
order_list = user.strategy.generate_trade_decision(
score_series=score_series,
current=user.account.current_position,
trade_exchange=trade_exchange,
trade_date=trade_date,
)
save_order_list(
order_list=order_list,
user_path=(pathlib.Path(path) / user_id),
trade_date=trade_date,
)
self.logger.info("Generate order list at {} for {}".format(trade_date, user_id))
um.save_user_data(user_id)
def execute(self, date, exchange_config, path):
"""Execute the orderlist at 'date'.
Parameters
----------
date : str (YYYY-MM-DD)
Trade date, that the generated order list will be traded.
exchange_config: str
The file path (yaml) of exchange config
path : str
Path to save user account.
"""
um, pred_date, trade_date = self.init(self.client, path, date)
for user_id, user in um.users.items():
dates, trade_exchange = prepare(um, trade_date, user_id, exchange_config)
executor = SimulatorExecutor(trade_exchange=trade_exchange)
if str(dates[0].date()) != str(pred_date.date()):
raise ValueError(
"The account data is not newest! last trading date {}, today {}".format(
dates[0].date(), trade_date.date()
)
)
# load and execute the order list
# will not modify the trade_account after executing
order_list = load_order_list(user_path=(pathlib.Path(path) / user_id), trade_date=trade_date)
trade_info = executor.execute(order_list=order_list, trade_account=user.account, trade_date=trade_date)
executor.save_executed_file_from_trade_info(
trade_info=trade_info,
user_path=(pathlib.Path(path) / user_id),
trade_date=trade_date,
)
self.logger.info("execute order list at {} for {}".format(trade_date.date(), user_id))
def update(self, date, path, type="SIM"):
"""Update account at 'date'.
Parameters
----------
date : str (YYYY-MM-DD)
Trade date, that the generated order list will be traded.
path : str
Path to save user account.
type : str
which executor was been used to execute the order list
'SIM': SimulatorExecutor()
"""
if type not in ["SIM", "YC"]:
raise ValueError("type is invalid, {}".format(type))
um, pred_date, trade_date = self.init(self.client, path, date)
for user_id, user in um.users.items():
dates, trade_exchange = prepare(um, trade_date, user_id)
if type == "SIM":
executor = SimulatorExecutor(trade_exchange=trade_exchange)
else:
raise ValueError("not found executor")
# dates[0] is the last_trading_date
if str(dates[0].date()) > str(pred_date.date()):
raise ValueError(
"The account data is not newest! last trading date {}, today {}".format(
dates[0].date(), trade_date.date()
)
)
# load trade info and update account
trade_info = executor.load_trade_info_from_executed_file(
user_path=(pathlib.Path(path) / user_id), trade_date=trade_date
)
score_series = load_score_series((pathlib.Path(path) / user_id), trade_date)
update_account(user.account, trade_info, trade_exchange, trade_date)
portfolio_metrics = user.account.portfolio_metrics.generate_portfolio_metrics_dataframe()
self.logger.info(portfolio_metrics)
um.save_user_data(user_id)
self.logger.info("Update account state {} for {}".format(trade_date, user_id))
def simulate(self, id, config, exchange_config, start, end, path, bench="SH000905"):
"""Run the ( generate_trade_decision -> execute_order_list -> update_account) process everyday
from start date to end date.
Parameters
----------
id : str
user id, need to be unique
config : str
The file path (yaml) of user config
exchange_config: str
The file path (yaml) of exchange config
start : str "YYYY-MM-DD"
The start date to run the online simulate
end : str "YYYY-MM-DD"
The end date to run the online simulate
path : str
Path to save user account.
bench : str
The benchmark that our result compared with.
'SH000905' for csi500, 'SH000300' for csi300
"""
# Clear the current user if exists, then add a new user.
create_user_folder(path)
um = self.init(self.client, path, None)[0]
start_date, end_date = pd.Timestamp(start), pd.Timestamp(end)
try:
um.remove_user(user_id=id)
except BaseException:
pass
um.add_user(user_id=id, config_file=config, add_date=pd.Timestamp(start_date))
# Do the online simulate
um.load_users()
user = um.users[id]
dates, trade_exchange = prepare(um, end_date, id, exchange_config)
executor = SimulatorExecutor(trade_exchange=trade_exchange)
for pred_date, trade_date in zip(dates[:-2], dates[1:-1]):
user_path = pathlib.Path(path) / id
# 1. load and save score_series
input_data = user.model.get_data_with_date(pred_date)
score_series = user.model.predict(input_data)
save_score_series(score_series, (pathlib.Path(path) / id), trade_date)
# 2. update strategy (and model)
user.strategy.update(score_series, pred_date, trade_date)
# 3. generate and save order list
order_list = user.strategy.generate_trade_decision(
score_series=score_series,
current=user.account.current_position,
trade_exchange=trade_exchange,
trade_date=trade_date,
)
save_order_list(order_list=order_list, user_path=user_path, trade_date=trade_date)
# 4. auto execute order list
order_list = load_order_list(user_path=user_path, trade_date=trade_date)
trade_info = executor.execute(trade_account=user.account, order_list=order_list, trade_date=trade_date)
executor.save_executed_file_from_trade_info(
trade_info=trade_info, user_path=user_path, trade_date=trade_date
)
# 5. update account state
trade_info = executor.load_trade_info_from_executed_file(user_path=user_path, trade_date=trade_date)
update_account(user.account, trade_info, trade_exchange, trade_date)
portfolio_metrics = user.account.portfolio_metrics.generate_portfolio_metrics_dataframe()
self.logger.info(portfolio_metrics)
um.save_user_data(id)
self.show(id, path, bench)
def show(self, id, path, bench="SH000905"):
"""show the newly report (mean, std, information_ratio, annualized_return)
Parameters
----------
id : str
user id, need to be unique
path : str
Path to save user account.
bench : str
The benchmark that our result compared with.
'SH000905' for csi500, 'SH000300' for csi300
"""
um = self.init(self.client, path, None)[0]
if id not in um.users:
raise ValueError("Cannot find user ".format(id))
bench = D.features([bench], ["$change"]).loc[bench, "$change"]
portfolio_metrics = um.users[id].account.portfolio_metrics.generate_portfolio_metrics_dataframe()
portfolio_metrics["bench"] = bench
analysis_result = {}
r = (portfolio_metrics["return"] - portfolio_metrics["bench"]).dropna()
analysis_result["excess_return_without_cost"] = risk_analysis(r)
r = (portfolio_metrics["return"] - portfolio_metrics["bench"] - portfolio_metrics["cost"]).dropna()
analysis_result["excess_return_with_cost"] = risk_analysis(r)
print("Result:")
print("excess_return_without_cost:")
print(analysis_result["excess_return_without_cost"])
print("excess_return_with_cost:")
print(analysis_result["excess_return_with_cost"])
def run():
fire.Fire(Operator) | null |
19,577 | import numpy as np
import pandas as pd
from datetime import datetime
from qlib.data.cache import H
from qlib.data.data import Cal
from qlib.data.ops import ElemOperator, PairOperator
from qlib.utils.time import time_to_day_index
H = MemCache()
Cal: CalendarProviderWrapper = Wrapper()
The provided code snippet includes necessary dependencies for implementing the `get_calendar_day` function. Write a Python function `def get_calendar_day(freq="1min", future=False)` to solve the following problem:
Load High-Freq Calendar Date Using Memcache. !!!NOTE: Loading the calendar is quite slow. So loading calendar before start multiprocessing will make it faster. Parameters ---------- freq : str frequency of read calendar file. future : bool whether including future trading day. Returns ------- _calendar: array of date.
Here is the function:
def get_calendar_day(freq="1min", future=False):
"""
Load High-Freq Calendar Date Using Memcache.
!!!NOTE: Loading the calendar is quite slow. So loading calendar before start multiprocessing will make it faster.
Parameters
----------
freq : str
frequency of read calendar file.
future : bool
whether including future trading day.
Returns
-------
_calendar:
array of date.
"""
flag = f"{freq}_future_{future}_day"
if flag in H["c"]:
_calendar = H["c"][flag]
else:
_calendar = np.array(list(map(lambda x: x.date(), Cal.load_calendar(freq, future))))
H["c"][flag] = _calendar
return _calendar | Load High-Freq Calendar Date Using Memcache. !!!NOTE: Loading the calendar is quite slow. So loading calendar before start multiprocessing will make it faster. Parameters ---------- freq : str frequency of read calendar file. future : bool whether including future trading day. Returns ------- _calendar: array of date. |
19,578 | import numpy as np
import pandas as pd
from datetime import datetime
from qlib.data.cache import H
from qlib.data.data import Cal
from qlib.data.ops import ElemOperator, PairOperator
from qlib.utils.time import time_to_day_index
H = MemCache()
Cal: CalendarProviderWrapper = Wrapper()
The provided code snippet includes necessary dependencies for implementing the `get_calendar_minute` function. Write a Python function `def get_calendar_minute(freq="day", future=False)` to solve the following problem:
Load High-Freq Calendar Minute Using Memcache
Here is the function:
def get_calendar_minute(freq="day", future=False):
"""Load High-Freq Calendar Minute Using Memcache"""
flag = f"{freq}_future_{future}_day"
if flag in H["c"]:
_calendar = H["c"][flag]
else:
_calendar = np.array(list(map(lambda x: x.minute // 30, Cal.load_calendar(freq, future))))
H["c"][flag] = _calendar
return _calendar | Load High-Freq Calendar Minute Using Memcache |
19,579 | import matplotlib.pyplot as plt
import pandas as pd
The provided code snippet includes necessary dependencies for implementing the `sub_fig_generator` function. Write a Python function `def sub_fig_generator(sub_fs=(3, 3), col_n=10, row_n=1, wspace=None, hspace=None, sharex=False, sharey=False)` to solve the following problem:
sub_fig_generator. it will return a generator, each row contains <col_n> sub graph FIXME: Known limitation: - The last row will not be plotted automatically, please plot it outside the function Parameters ---------- sub_fs : the figure size of each subgraph in <col_n> * <row_n> subgraphs col_n : the number of subgraph in each row; It will generating a new graph after generating <col_n> of subgraphs. row_n : the number of subgraph in each column wspace : the width of the space for subgraphs in each row hspace : the height of blank space for subgraphs in each column You can try 0.3 if you feel it is too crowded Returns ------- It will return graphs with the shape of <col_n> each iter (it is squeezed).
Here is the function:
def sub_fig_generator(sub_fs=(3, 3), col_n=10, row_n=1, wspace=None, hspace=None, sharex=False, sharey=False):
"""sub_fig_generator.
it will return a generator, each row contains <col_n> sub graph
FIXME: Known limitation:
- The last row will not be plotted automatically, please plot it outside the function
Parameters
----------
sub_fs :
the figure size of each subgraph in <col_n> * <row_n> subgraphs
col_n :
the number of subgraph in each row; It will generating a new graph after generating <col_n> of subgraphs.
row_n :
the number of subgraph in each column
wspace :
the width of the space for subgraphs in each row
hspace :
the height of blank space for subgraphs in each column
You can try 0.3 if you feel it is too crowded
Returns
-------
It will return graphs with the shape of <col_n> each iter (it is squeezed).
"""
assert col_n > 1
while True:
fig, axes = plt.subplots(
row_n, col_n, figsize=(sub_fs[0] * col_n, sub_fs[1] * row_n), sharex=sharex, sharey=sharey
)
plt.subplots_adjust(wspace=wspace, hspace=hspace)
axes = axes.reshape(row_n, col_n)
for col in range(col_n):
res = axes[:, col].squeeze()
if res.size == 1:
res = res.item()
yield res
plt.show() | sub_fig_generator. it will return a generator, each row contains <col_n> sub graph FIXME: Known limitation: - The last row will not be plotted automatically, please plot it outside the function Parameters ---------- sub_fs : the figure size of each subgraph in <col_n> * <row_n> subgraphs col_n : the number of subgraph in each row; It will generating a new graph after generating <col_n> of subgraphs. row_n : the number of subgraph in each column wspace : the width of the space for subgraphs in each row hspace : the height of blank space for subgraphs in each column You can try 0.3 if you feel it is too crowded Returns ------- It will return graphs with the shape of <col_n> each iter (it is squeezed). |
19,580 | from functools import partial
import pandas as pd
import plotly.graph_objs as go
import statsmodels.api as sm
import matplotlib.pyplot as plt
from scipy import stats
from typing import Sequence
from qlib.typehint import Literal
from ..graph import ScatterGraph, SubplotsGraph, BarGraph, HeatmapGraph
from ..utils import guess_plotly_rangebreaks
class ScatterGraph(BaseGraph):
_name = "scatter"
class SubplotsGraph:
"""Create subplots same as df.plot(subplots=True)
Simple package for `plotly.tools.subplots`
"""
def __init__(
self,
df: pd.DataFrame = None,
kind_map: dict = None,
layout: dict = None,
sub_graph_layout: dict = None,
sub_graph_data: list = None,
subplots_kwargs: dict = None,
**kwargs
):
"""
:param df: pd.DataFrame
:param kind_map: dict, subplots graph kind and kwargs
eg: dict(kind='ScatterGraph', kwargs=dict())
:param layout: `go.Layout` parameters
:param sub_graph_layout: Layout of each graphic, similar to 'layout'
:param sub_graph_data: Instantiation parameters for each sub-graphic
eg: [(column_name, instance_parameters), ]
column_name: str or go.Figure
Instance_parameters:
- row: int, the row where the graph is located
- col: int, the col where the graph is located
- name: str, show name, default column_name in 'df'
- kind: str, graph kind, default `kind` param, eg: bar, scatter, ...
- graph_kwargs: dict, graph kwargs, default {}, used in `go.Bar(**graph_kwargs)`
:param subplots_kwargs: `plotly.tools.make_subplots` original parameters
- shared_xaxes: bool, default False
- shared_yaxes: bool, default False
- vertical_spacing: float, default 0.3 / rows
- subplot_titles: list, default []
If `sub_graph_data` is None, will generate 'subplot_titles' according to `df.columns`,
this field will be discarded
- specs: list, see `make_subplots` docs
- rows: int, Number of rows in the subplot grid, default 1
If `sub_graph_data` is None, will generate 'rows' according to `df`, this field will be discarded
- cols: int, Number of cols in the subplot grid, default 1
If `sub_graph_data` is None, will generate 'cols' according to `df`, this field will be discarded
:param kwargs:
"""
self._df = df
self._layout = layout
self._sub_graph_layout = sub_graph_layout
self._kind_map = kind_map
if self._kind_map is None:
self._kind_map = dict(kind="ScatterGraph", kwargs=dict())
self._subplots_kwargs = subplots_kwargs
if self._subplots_kwargs is None:
self._init_subplots_kwargs()
self.__cols = self._subplots_kwargs.get("cols", 2) # pylint: disable=W0238
self.__rows = self._subplots_kwargs.get( # pylint: disable=W0238
"rows", math.ceil(len(self._df.columns) / self.__cols)
)
self._sub_graph_data = sub_graph_data
if self._sub_graph_data is None:
self._init_sub_graph_data()
self._init_figure()
def _init_sub_graph_data(self):
"""
:return:
"""
self._sub_graph_data = []
self._subplot_titles = []
for i, column_name in enumerate(self._df.columns):
row = math.ceil((i + 1) / self.__cols)
_temp = (i + 1) % self.__cols
col = _temp if _temp else self.__cols
res_name = column_name.replace("_", " ")
_temp_row_data = (
column_name,
dict(
row=row,
col=col,
name=res_name,
kind=self._kind_map["kind"],
graph_kwargs=self._kind_map["kwargs"],
),
)
self._sub_graph_data.append(_temp_row_data)
self._subplot_titles.append(res_name)
def _init_subplots_kwargs(self):
"""
:return:
"""
# Default cols, rows
_cols = 2
_rows = math.ceil(len(self._df.columns) / 2)
self._subplots_kwargs = dict()
self._subplots_kwargs["rows"] = _rows
self._subplots_kwargs["cols"] = _cols
self._subplots_kwargs["shared_xaxes"] = False
self._subplots_kwargs["shared_yaxes"] = False
self._subplots_kwargs["vertical_spacing"] = 0.3 / _rows
self._subplots_kwargs["print_grid"] = False
self._subplots_kwargs["subplot_titles"] = self._df.columns.tolist()
def _init_figure(self):
"""
:return:
"""
self._figure = make_subplots(**self._subplots_kwargs)
for column_name, column_map in self._sub_graph_data:
if isinstance(column_name, go.Figure):
_graph_obj = column_name
elif isinstance(column_name, str):
temp_name = column_map.get("name", column_name.replace("_", " "))
kind = column_map.get("kind", self._kind_map.get("kind", "ScatterGraph"))
_graph_kwargs = column_map.get("graph_kwargs", self._kind_map.get("kwargs", {}))
_graph_obj = BaseGraph.get_instance_with_graph_parameters(
kind,
**dict(
df=self._df.loc[:, [column_name]],
name_dict={column_name: temp_name},
graph_kwargs=_graph_kwargs,
)
)
else:
raise TypeError()
row = column_map["row"]
col = column_map["col"]
_graph_data = getattr(_graph_obj, "data")
# for _item in _graph_data:
# _item.pop('xaxis', None)
# _item.pop('yaxis', None)
for _g_obj in _graph_data:
self._figure.add_trace(_g_obj, row=row, col=col)
if self._sub_graph_layout is not None:
for k, v in self._sub_graph_layout.items():
self._figure["layout"][k].update(v)
# NOTE: Use the default theme from plotly version 3.x: template=None
self._figure["layout"].update(template=None)
self._figure["layout"].update(self._layout)
def figure(self):
return self._figure
def guess_plotly_rangebreaks(dt_index: pd.DatetimeIndex):
"""
This function `guesses` the rangebreaks required to remove gaps in datetime index.
It basically calculates the difference between a `continuous` datetime index and index given.
For more details on `rangebreaks` params in plotly, see
https://plotly.com/python/reference/layout/xaxis/#layout-xaxis-rangebreaks
Parameters
----------
dt_index: pd.DatetimeIndex
The datetimes of the data.
Returns
-------
the `rangebreaks` to be passed into plotly axis.
"""
dt_idx = dt_index.sort_values()
gaps = dt_idx[1:] - dt_idx[:-1]
min_gap = gaps.min()
gaps_to_break = {}
for gap, d in zip(gaps, dt_idx[:-1]):
if gap > min_gap:
gaps_to_break.setdefault(gap - min_gap, []).append(d + min_gap)
return [dict(values=v, dvalue=int(k.total_seconds() * 1000)) for k, v in gaps_to_break.items()]
The provided code snippet includes necessary dependencies for implementing the `_group_return` function. Write a Python function `def _group_return(pred_label: pd.DataFrame = None, reverse: bool = False, N: int = 5, **kwargs) -> tuple` to solve the following problem:
:param pred_label: :param reverse: :param N: :return:
Here is the function:
def _group_return(pred_label: pd.DataFrame = None, reverse: bool = False, N: int = 5, **kwargs) -> tuple:
"""
:param pred_label:
:param reverse:
:param N:
:return:
"""
if reverse:
pred_label["score"] *= -1
pred_label = pred_label.sort_values("score", ascending=False)
# Group1 ~ Group5 only consider the dropna values
pred_label_drop = pred_label.dropna(subset=["score"])
# Group
t_df = pd.DataFrame(
{
"Group%d"
% (i + 1): pred_label_drop.groupby(level="datetime")["label"].apply(
lambda x: x[len(x) // N * i : len(x) // N * (i + 1)].mean() # pylint: disable=W0640
)
for i in range(N)
}
)
t_df.index = pd.to_datetime(t_df.index)
# Long-Short
t_df["long-short"] = t_df["Group1"] - t_df["Group%d" % N]
# Long-Average
t_df["long-average"] = t_df["Group1"] - pred_label.groupby(level="datetime")["label"].mean()
t_df = t_df.dropna(how="all") # for days which does not contain label
# Cumulative Return By Group
group_scatter_figure = ScatterGraph(
t_df.cumsum(),
layout=dict(
title="Cumulative Return",
xaxis=dict(tickangle=45, rangebreaks=kwargs.get("rangebreaks", guess_plotly_rangebreaks(t_df.index))),
),
).figure
t_df = t_df.loc[:, ["long-short", "long-average"]]
_bin_size = float(((t_df.max() - t_df.min()) / 20).min())
group_hist_figure = SubplotsGraph(
t_df,
kind_map=dict(kind="DistplotGraph", kwargs=dict(bin_size=_bin_size)),
subplots_kwargs=dict(
rows=1,
cols=2,
print_grid=False,
subplot_titles=["long-short", "long-average"],
),
).figure
return group_scatter_figure, group_hist_figure | :param pred_label: :param reverse: :param N: :return: |
19,581 | from functools import partial
import pandas as pd
import plotly.graph_objs as go
import statsmodels.api as sm
import matplotlib.pyplot as plt
from scipy import stats
from typing import Sequence
from qlib.typehint import Literal
from ..graph import ScatterGraph, SubplotsGraph, BarGraph, HeatmapGraph
from ..utils import guess_plotly_rangebreaks
def _plot_qq(data: pd.Series = None, dist=stats.norm) -> go.Figure:
"""
:param data:
:param dist:
:return:
"""
# NOTE: plotly.tools.mpl_to_plotly not actively maintained, resulting in errors in the new version of matplotlib,
# ref: https://github.com/plotly/plotly.py/issues/2913#issuecomment-730071567
# removing plotly.tools.mpl_to_plotly for greater compatibility with matplotlib versions
_plt_fig = sm.qqplot(data.dropna(), dist=dist, fit=True, line="45")
plt.close(_plt_fig)
qqplot_data = _plt_fig.gca().lines
fig = go.Figure()
fig.add_trace(
{
"type": "scatter",
"x": qqplot_data[0].get_xdata(),
"y": qqplot_data[0].get_ydata(),
"mode": "markers",
"marker": {"color": "#19d3f3"},
}
)
fig.add_trace(
{
"type": "scatter",
"x": qqplot_data[1].get_xdata(),
"y": qqplot_data[1].get_ydata(),
"mode": "lines",
"line": {"color": "#636efa"},
}
)
del qqplot_data
return fig
def ic_figure(ic_df: pd.DataFrame, show_nature_day=True, **kwargs) -> go.Figure:
r"""IC figure
:param ic_df: ic DataFrame
:param show_nature_day: whether to display the abscissa of non-trading day
:param \*\*kwargs: contains some parameters to control plot style in plotly. Currently, supports
- `rangebreaks`: https://plotly.com/python/time-series/#Hiding-Weekends-and-Holidays
:return: plotly.graph_objs.Figure
"""
if show_nature_day:
date_index = pd.date_range(ic_df.index.min(), ic_df.index.max())
ic_df = ic_df.reindex(date_index)
ic_bar_figure = BarGraph(
ic_df,
layout=dict(
title="Information Coefficient (IC)",
xaxis=dict(tickangle=45, rangebreaks=kwargs.get("rangebreaks", guess_plotly_rangebreaks(ic_df.index))),
),
).figure
return ic_bar_figure
class HeatmapGraph(BaseGraph):
_name = "heatmap"
def _get_data(self):
"""
:return:
"""
_data = [
self.get_instance_with_graph_parameters(
graph_type=self._graph_type,
x=self._df.columns,
y=self._df.index,
z=self._df.values.tolist(),
**self._graph_kwargs
)
]
return _data
class SubplotsGraph:
"""Create subplots same as df.plot(subplots=True)
Simple package for `plotly.tools.subplots`
"""
def __init__(
self,
df: pd.DataFrame = None,
kind_map: dict = None,
layout: dict = None,
sub_graph_layout: dict = None,
sub_graph_data: list = None,
subplots_kwargs: dict = None,
**kwargs
):
"""
:param df: pd.DataFrame
:param kind_map: dict, subplots graph kind and kwargs
eg: dict(kind='ScatterGraph', kwargs=dict())
:param layout: `go.Layout` parameters
:param sub_graph_layout: Layout of each graphic, similar to 'layout'
:param sub_graph_data: Instantiation parameters for each sub-graphic
eg: [(column_name, instance_parameters), ]
column_name: str or go.Figure
Instance_parameters:
- row: int, the row where the graph is located
- col: int, the col where the graph is located
- name: str, show name, default column_name in 'df'
- kind: str, graph kind, default `kind` param, eg: bar, scatter, ...
- graph_kwargs: dict, graph kwargs, default {}, used in `go.Bar(**graph_kwargs)`
:param subplots_kwargs: `plotly.tools.make_subplots` original parameters
- shared_xaxes: bool, default False
- shared_yaxes: bool, default False
- vertical_spacing: float, default 0.3 / rows
- subplot_titles: list, default []
If `sub_graph_data` is None, will generate 'subplot_titles' according to `df.columns`,
this field will be discarded
- specs: list, see `make_subplots` docs
- rows: int, Number of rows in the subplot grid, default 1
If `sub_graph_data` is None, will generate 'rows' according to `df`, this field will be discarded
- cols: int, Number of cols in the subplot grid, default 1
If `sub_graph_data` is None, will generate 'cols' according to `df`, this field will be discarded
:param kwargs:
"""
self._df = df
self._layout = layout
self._sub_graph_layout = sub_graph_layout
self._kind_map = kind_map
if self._kind_map is None:
self._kind_map = dict(kind="ScatterGraph", kwargs=dict())
self._subplots_kwargs = subplots_kwargs
if self._subplots_kwargs is None:
self._init_subplots_kwargs()
self.__cols = self._subplots_kwargs.get("cols", 2) # pylint: disable=W0238
self.__rows = self._subplots_kwargs.get( # pylint: disable=W0238
"rows", math.ceil(len(self._df.columns) / self.__cols)
)
self._sub_graph_data = sub_graph_data
if self._sub_graph_data is None:
self._init_sub_graph_data()
self._init_figure()
def _init_sub_graph_data(self):
"""
:return:
"""
self._sub_graph_data = []
self._subplot_titles = []
for i, column_name in enumerate(self._df.columns):
row = math.ceil((i + 1) / self.__cols)
_temp = (i + 1) % self.__cols
col = _temp if _temp else self.__cols
res_name = column_name.replace("_", " ")
_temp_row_data = (
column_name,
dict(
row=row,
col=col,
name=res_name,
kind=self._kind_map["kind"],
graph_kwargs=self._kind_map["kwargs"],
),
)
self._sub_graph_data.append(_temp_row_data)
self._subplot_titles.append(res_name)
def _init_subplots_kwargs(self):
"""
:return:
"""
# Default cols, rows
_cols = 2
_rows = math.ceil(len(self._df.columns) / 2)
self._subplots_kwargs = dict()
self._subplots_kwargs["rows"] = _rows
self._subplots_kwargs["cols"] = _cols
self._subplots_kwargs["shared_xaxes"] = False
self._subplots_kwargs["shared_yaxes"] = False
self._subplots_kwargs["vertical_spacing"] = 0.3 / _rows
self._subplots_kwargs["print_grid"] = False
self._subplots_kwargs["subplot_titles"] = self._df.columns.tolist()
def _init_figure(self):
"""
:return:
"""
self._figure = make_subplots(**self._subplots_kwargs)
for column_name, column_map in self._sub_graph_data:
if isinstance(column_name, go.Figure):
_graph_obj = column_name
elif isinstance(column_name, str):
temp_name = column_map.get("name", column_name.replace("_", " "))
kind = column_map.get("kind", self._kind_map.get("kind", "ScatterGraph"))
_graph_kwargs = column_map.get("graph_kwargs", self._kind_map.get("kwargs", {}))
_graph_obj = BaseGraph.get_instance_with_graph_parameters(
kind,
**dict(
df=self._df.loc[:, [column_name]],
name_dict={column_name: temp_name},
graph_kwargs=_graph_kwargs,
)
)
else:
raise TypeError()
row = column_map["row"]
col = column_map["col"]
_graph_data = getattr(_graph_obj, "data")
# for _item in _graph_data:
# _item.pop('xaxis', None)
# _item.pop('yaxis', None)
for _g_obj in _graph_data:
self._figure.add_trace(_g_obj, row=row, col=col)
if self._sub_graph_layout is not None:
for k, v in self._sub_graph_layout.items():
self._figure["layout"][k].update(v)
# NOTE: Use the default theme from plotly version 3.x: template=None
self._figure["layout"].update(template=None)
self._figure["layout"].update(self._layout)
def figure(self):
return self._figure
The provided code snippet includes necessary dependencies for implementing the `_pred_ic` function. Write a Python function `def _pred_ic( pred_label: pd.DataFrame = None, methods: Sequence[Literal["IC", "Rank IC"]] = ("IC", "Rank IC"), **kwargs ) -> tuple` to solve the following problem:
:param pred_label: pd.DataFrame must contain one column of realized return with name `label` and one column of predicted score names `score`. :param methods: Sequence[Literal["IC", "Rank IC"]] IC series to plot. IC is sectional pearson correlation between label and score Rank IC is the spearman correlation between label and score For the Monthly IC, IC histogram, IC Q-Q plot. Only the first type of IC will be plotted. :return:
Here is the function:
def _pred_ic(
pred_label: pd.DataFrame = None, methods: Sequence[Literal["IC", "Rank IC"]] = ("IC", "Rank IC"), **kwargs
) -> tuple:
"""
:param pred_label: pd.DataFrame
must contain one column of realized return with name `label` and one column of predicted score names `score`.
:param methods: Sequence[Literal["IC", "Rank IC"]]
IC series to plot.
IC is sectional pearson correlation between label and score
Rank IC is the spearman correlation between label and score
For the Monthly IC, IC histogram, IC Q-Q plot. Only the first type of IC will be plotted.
:return:
"""
_methods_mapping = {"IC": "pearson", "Rank IC": "spearman"}
def _corr_series(x, method):
return x["label"].corr(x["score"], method=method)
ic_df = pd.concat(
[
pred_label.groupby(level="datetime").apply(partial(_corr_series, method=_methods_mapping[m])).rename(m)
for m in methods
],
axis=1,
)
_ic = ic_df.iloc(axis=1)[0]
_index = _ic.index.get_level_values(0).astype("str").str.replace("-", "").str.slice(0, 6)
_monthly_ic = _ic.groupby(_index).mean()
_monthly_ic.index = pd.MultiIndex.from_arrays(
[_monthly_ic.index.str.slice(0, 4), _monthly_ic.index.str.slice(4, 6)],
names=["year", "month"],
)
# fill month
_month_list = pd.date_range(
start=pd.Timestamp(f"{_index.min()[:4]}0101"),
end=pd.Timestamp(f"{_index.max()[:4]}1231"),
freq="1M",
)
_years = []
_month = []
for _date in _month_list:
_date = _date.strftime("%Y%m%d")
_years.append(_date[:4])
_month.append(_date[4:6])
fill_index = pd.MultiIndex.from_arrays([_years, _month], names=["year", "month"])
_monthly_ic = _monthly_ic.reindex(fill_index)
ic_bar_figure = ic_figure(ic_df, kwargs.get("show_nature_day", False))
ic_heatmap_figure = HeatmapGraph(
_monthly_ic.unstack(),
layout=dict(title="Monthly IC", xaxis=dict(dtick=1), yaxis=dict(tickformat="04d", dtick=1)),
graph_kwargs=dict(xtype="array", ytype="array"),
).figure
dist = stats.norm
_qqplot_fig = _plot_qq(_ic, dist)
if isinstance(dist, stats.norm.__class__):
dist_name = "Normal"
else:
dist_name = "Unknown"
_ic_df = _ic.to_frame("IC")
_bin_size = ((_ic_df.max() - _ic_df.min()) / 20).min()
_sub_graph_data = [
(
"IC",
dict(
row=1,
col=1,
name="",
kind="DistplotGraph",
graph_kwargs=dict(bin_size=_bin_size),
),
),
(_qqplot_fig, dict(row=1, col=2)),
]
ic_hist_figure = SubplotsGraph(
_ic_df.dropna(),
kind_map=dict(kind="HistogramGraph", kwargs=dict()),
subplots_kwargs=dict(
rows=1,
cols=2,
print_grid=False,
subplot_titles=["IC", "IC %s Dist. Q-Q" % dist_name],
),
sub_graph_data=_sub_graph_data,
layout=dict(
yaxis2=dict(title="Observed Quantile"),
xaxis2=dict(title=f"{dist_name} Distribution Quantile"),
),
).figure
return ic_bar_figure, ic_heatmap_figure, ic_hist_figure | :param pred_label: pd.DataFrame must contain one column of realized return with name `label` and one column of predicted score names `score`. :param methods: Sequence[Literal["IC", "Rank IC"]] IC series to plot. IC is sectional pearson correlation between label and score Rank IC is the spearman correlation between label and score For the Monthly IC, IC histogram, IC Q-Q plot. Only the first type of IC will be plotted. :return: |
19,582 | from functools import partial
import pandas as pd
import plotly.graph_objs as go
import statsmodels.api as sm
import matplotlib.pyplot as plt
from scipy import stats
from typing import Sequence
from qlib.typehint import Literal
from ..graph import ScatterGraph, SubplotsGraph, BarGraph, HeatmapGraph
from ..utils import guess_plotly_rangebreaks
class ScatterGraph(BaseGraph):
_name = "scatter"
def guess_plotly_rangebreaks(dt_index: pd.DatetimeIndex):
"""
This function `guesses` the rangebreaks required to remove gaps in datetime index.
It basically calculates the difference between a `continuous` datetime index and index given.
For more details on `rangebreaks` params in plotly, see
https://plotly.com/python/reference/layout/xaxis/#layout-xaxis-rangebreaks
Parameters
----------
dt_index: pd.DatetimeIndex
The datetimes of the data.
Returns
-------
the `rangebreaks` to be passed into plotly axis.
"""
dt_idx = dt_index.sort_values()
gaps = dt_idx[1:] - dt_idx[:-1]
min_gap = gaps.min()
gaps_to_break = {}
for gap, d in zip(gaps, dt_idx[:-1]):
if gap > min_gap:
gaps_to_break.setdefault(gap - min_gap, []).append(d + min_gap)
return [dict(values=v, dvalue=int(k.total_seconds() * 1000)) for k, v in gaps_to_break.items()]
def _pred_autocorr(pred_label: pd.DataFrame, lag=1, **kwargs) -> tuple:
pred = pred_label.copy()
pred["score_last"] = pred.groupby(level="instrument")["score"].shift(lag)
ac = pred.groupby(level="datetime").apply(lambda x: x["score"].rank(pct=True).corr(x["score_last"].rank(pct=True)))
_df = ac.to_frame("value")
ac_figure = ScatterGraph(
_df,
layout=dict(
title="Auto Correlation",
xaxis=dict(tickangle=45, rangebreaks=kwargs.get("rangebreaks", guess_plotly_rangebreaks(_df.index))),
),
).figure
return (ac_figure,) | null |
19,583 | from functools import partial
import pandas as pd
import plotly.graph_objs as go
import statsmodels.api as sm
import matplotlib.pyplot as plt
from scipy import stats
from typing import Sequence
from qlib.typehint import Literal
from ..graph import ScatterGraph, SubplotsGraph, BarGraph, HeatmapGraph
from ..utils import guess_plotly_rangebreaks
class ScatterGraph(BaseGraph):
_name = "scatter"
def guess_plotly_rangebreaks(dt_index: pd.DatetimeIndex):
"""
This function `guesses` the rangebreaks required to remove gaps in datetime index.
It basically calculates the difference between a `continuous` datetime index and index given.
For more details on `rangebreaks` params in plotly, see
https://plotly.com/python/reference/layout/xaxis/#layout-xaxis-rangebreaks
Parameters
----------
dt_index: pd.DatetimeIndex
The datetimes of the data.
Returns
-------
the `rangebreaks` to be passed into plotly axis.
"""
dt_idx = dt_index.sort_values()
gaps = dt_idx[1:] - dt_idx[:-1]
min_gap = gaps.min()
gaps_to_break = {}
for gap, d in zip(gaps, dt_idx[:-1]):
if gap > min_gap:
gaps_to_break.setdefault(gap - min_gap, []).append(d + min_gap)
return [dict(values=v, dvalue=int(k.total_seconds() * 1000)) for k, v in gaps_to_break.items()]
def _pred_turnover(pred_label: pd.DataFrame, N=5, lag=1, **kwargs) -> tuple:
pred = pred_label.copy()
pred["score_last"] = pred.groupby(level="instrument")["score"].shift(lag)
top = pred.groupby(level="datetime").apply(
lambda x: 1
- x.nlargest(len(x) // N, columns="score").index.isin(x.nlargest(len(x) // N, columns="score_last").index).sum()
/ (len(x) // N)
)
bottom = pred.groupby(level="datetime").apply(
lambda x: 1
- x.nsmallest(len(x) // N, columns="score")
.index.isin(x.nsmallest(len(x) // N, columns="score_last").index)
.sum()
/ (len(x) // N)
)
r_df = pd.DataFrame(
{
"Top": top,
"Bottom": bottom,
}
)
turnover_figure = ScatterGraph(
r_df,
layout=dict(
title="Top-Bottom Turnover",
xaxis=dict(tickangle=45, rangebreaks=kwargs.get("rangebreaks", guess_plotly_rangebreaks(r_df.index))),
),
).figure
return (turnover_figure,) | null |
19,584 | from functools import partial
import pandas as pd
import plotly.graph_objs as go
import statsmodels.api as sm
import matplotlib.pyplot as plt
from scipy import stats
from typing import Sequence
from qlib.typehint import Literal
from ..graph import ScatterGraph, SubplotsGraph, BarGraph, HeatmapGraph
from ..utils import guess_plotly_rangebreaks
class BarGraph(BaseGraph):
_name = "bar"
The provided code snippet includes necessary dependencies for implementing the `model_performance_graph` function. Write a Python function `def model_performance_graph( pred_label: pd.DataFrame, lag: int = 1, N: int = 5, reverse=False, rank=False, graph_names: list = ["group_return", "pred_ic", "pred_autocorr"], show_notebook: bool = True, show_nature_day: bool = False, **kwargs, ) -> [list, tuple]` to solve the following problem:
r"""Model performance :param pred_label: index is **pd.MultiIndex**, index name is **[instrument, datetime]**; columns names is **[score, label]**. It is usually same as the label of model training(e.g. "Ref($close, -2)/Ref($close, -1) - 1"). .. code-block:: python instrument datetime score label SH600004 2017-12-11 -0.013502 -0.013502 2017-12-12 -0.072367 -0.072367 2017-12-13 -0.068605 -0.068605 2017-12-14 0.012440 0.012440 2017-12-15 -0.102778 -0.102778 :param lag: `pred.groupby(level='instrument')['score'].shift(lag)`. It will be only used in the auto-correlation computing. :param N: group number, default 5. :param reverse: if `True`, `pred['score'] *= -1`. :param rank: if **True**, calculate rank ic. :param graph_names: graph names; default ['cumulative_return', 'pred_ic', 'pred_autocorr', 'pred_turnover']. :param show_notebook: whether to display graphics in notebook, the default is `True`. :param show_nature_day: whether to display the abscissa of non-trading day. :param \*\*kwargs: contains some parameters to control plot style in plotly. Currently, supports - `rangebreaks`: https://plotly.com/python/time-series/#Hiding-Weekends-and-Holidays :return: if show_notebook is True, display in notebook; else return `plotly.graph_objs.Figure` list.
Here is the function:
def model_performance_graph(
pred_label: pd.DataFrame,
lag: int = 1,
N: int = 5,
reverse=False,
rank=False,
graph_names: list = ["group_return", "pred_ic", "pred_autocorr"],
show_notebook: bool = True,
show_nature_day: bool = False,
**kwargs,
) -> [list, tuple]:
r"""Model performance
:param pred_label: index is **pd.MultiIndex**, index name is **[instrument, datetime]**; columns names is **[score, label]**.
It is usually same as the label of model training(e.g. "Ref($close, -2)/Ref($close, -1) - 1").
.. code-block:: python
instrument datetime score label
SH600004 2017-12-11 -0.013502 -0.013502
2017-12-12 -0.072367 -0.072367
2017-12-13 -0.068605 -0.068605
2017-12-14 0.012440 0.012440
2017-12-15 -0.102778 -0.102778
:param lag: `pred.groupby(level='instrument')['score'].shift(lag)`. It will be only used in the auto-correlation computing.
:param N: group number, default 5.
:param reverse: if `True`, `pred['score'] *= -1`.
:param rank: if **True**, calculate rank ic.
:param graph_names: graph names; default ['cumulative_return', 'pred_ic', 'pred_autocorr', 'pred_turnover'].
:param show_notebook: whether to display graphics in notebook, the default is `True`.
:param show_nature_day: whether to display the abscissa of non-trading day.
:param \*\*kwargs: contains some parameters to control plot style in plotly. Currently, supports
- `rangebreaks`: https://plotly.com/python/time-series/#Hiding-Weekends-and-Holidays
:return: if show_notebook is True, display in notebook; else return `plotly.graph_objs.Figure` list.
"""
figure_list = []
for graph_name in graph_names:
fun_res = eval(f"_{graph_name}")(
pred_label=pred_label, lag=lag, N=N, reverse=reverse, rank=rank, show_nature_day=show_nature_day, **kwargs
)
figure_list += fun_res
if show_notebook:
BarGraph.show_graph_in_notebook(figure_list)
else:
return figure_list | r"""Model performance :param pred_label: index is **pd.MultiIndex**, index name is **[instrument, datetime]**; columns names is **[score, label]**. It is usually same as the label of model training(e.g. "Ref($close, -2)/Ref($close, -1) - 1"). .. code-block:: python instrument datetime score label SH600004 2017-12-11 -0.013502 -0.013502 2017-12-12 -0.072367 -0.072367 2017-12-13 -0.068605 -0.068605 2017-12-14 0.012440 0.012440 2017-12-15 -0.102778 -0.102778 :param lag: `pred.groupby(level='instrument')['score'].shift(lag)`. It will be only used in the auto-correlation computing. :param N: group number, default 5. :param reverse: if `True`, `pred['score'] *= -1`. :param rank: if **True**, calculate rank ic. :param graph_names: graph names; default ['cumulative_return', 'pred_ic', 'pred_autocorr', 'pred_turnover']. :param show_notebook: whether to display graphics in notebook, the default is `True`. :param show_nature_day: whether to display the abscissa of non-trading day. :param \*\*kwargs: contains some parameters to control plot style in plotly. Currently, supports - `rangebreaks`: https://plotly.com/python/time-series/#Hiding-Weekends-and-Holidays :return: if show_notebook is True, display in notebook; else return `plotly.graph_objs.Figure` list. |
19,585 | import copy
from typing import Iterable
import pandas as pd
import plotly.graph_objs as go
from ..graph import BaseGraph, SubplotsGraph
from ..analysis_position.parse_position import get_position_data
def _get_figure_with_position(
position: dict,
report_normal: pd.DataFrame,
label_data: pd.DataFrame,
start_date=None,
end_date=None,
) -> Iterable[go.Figure]:
"""Get average analysis figures
:param position: position
:param report_normal:
:param label_data:
:param start_date:
:param end_date:
:return:
"""
cum_return_df = _get_cum_return_data_with_position(position, report_normal, label_data, start_date, end_date)
cum_return_df = cum_return_df.set_index("date")
# FIXME: support HIGH-FREQ
cum_return_df.index = cum_return_df.index.strftime("%Y-%m-%d")
# Create figures
for _t_name in ["buy", "sell", "buy_minus_sell", "hold"]:
sub_graph_data = [
(
"cum_{}".format(_t_name),
dict(row=1, col=1, graph_kwargs={"mode": "lines+markers", "xaxis": "x3"}),
),
(
"{}_weight".format(_t_name.replace("minus", "plus") if "minus" in _t_name else _t_name),
dict(row=2, col=1),
),
(
"{}_value".format(_t_name),
dict(row=1, col=2, kind="HistogramGraph", graph_kwargs={}),
),
]
_default_xaxis = dict(showline=False, zeroline=True, tickangle=45)
_default_yaxis = dict(zeroline=True, showline=True, showticklabels=True)
sub_graph_layout = dict(
xaxis1=dict(**_default_xaxis, type="category", showticklabels=False),
xaxis3=dict(**_default_xaxis, type="category"),
xaxis2=_default_xaxis,
yaxis1=dict(**_default_yaxis, title=_t_name),
yaxis2=_default_yaxis,
yaxis3=_default_yaxis,
)
mean_value = cum_return_df["{}_value".format(_t_name)].mean()
layout = dict(
height=500,
title=f"{_t_name}(the red line in the histogram on the right represents the average)",
shapes=[
{
"type": "line",
"xref": "x2",
"yref": "paper",
"x0": mean_value,
"y0": 0,
"x1": mean_value,
"y1": 1,
# NOTE: 'fillcolor': '#d3d3d3', 'opacity': 0.3,
"line": {"color": "red", "width": 1},
},
],
)
kind_map = dict(kind="ScatterGraph", kwargs=dict(mode="lines+markers"))
specs = [
[{"rowspan": 1}, {"rowspan": 2}],
[{"rowspan": 1}, None],
]
subplots_kwargs = dict(
vertical_spacing=0.01,
rows=2,
cols=2,
row_width=[1, 2],
column_width=[3, 1],
print_grid=False,
specs=specs,
)
yield SubplotsGraph(
cum_return_df,
layout=layout,
kind_map=kind_map,
sub_graph_layout=sub_graph_layout,
sub_graph_data=sub_graph_data,
subplots_kwargs=subplots_kwargs,
).figure
class BaseGraph:
_name = None
def __init__(
self, df: pd.DataFrame = None, layout: dict = None, graph_kwargs: dict = None, name_dict: dict = None, **kwargs
):
"""
:param df:
:param layout:
:param graph_kwargs:
:param name_dict:
:param kwargs:
layout: dict
go.Layout parameters
graph_kwargs: dict
Graph parameters, eg: go.Bar(**graph_kwargs)
"""
self._df = df
self._layout = dict() if layout is None else layout
self._graph_kwargs = dict() if graph_kwargs is None else graph_kwargs
self._name_dict = name_dict
self.data = None
self._init_parameters(**kwargs)
self._init_data()
def _init_data(self):
"""
:return:
"""
if self._df.empty:
raise ValueError("df is empty.")
self.data = self._get_data()
def _init_parameters(self, **kwargs):
"""
:param kwargs
"""
# Instantiate graphics parameters
self._graph_type = self._name.lower().capitalize()
# Displayed column name
if self._name_dict is None:
self._name_dict = {_item: _item for _item in self._df.columns}
def get_instance_with_graph_parameters(graph_type: str = None, **kwargs):
"""
:param graph_type:
:param kwargs:
:return:
"""
try:
_graph_module = importlib.import_module("plotly.graph_objs")
_graph_class = getattr(_graph_module, graph_type)
except AttributeError:
_graph_module = importlib.import_module("qlib.contrib.report.graph")
_graph_class = getattr(_graph_module, graph_type)
return _graph_class(**kwargs)
def show_graph_in_notebook(figure_list: Iterable[go.Figure] = None):
"""
:param figure_list:
:return:
"""
py.init_notebook_mode()
for _fig in figure_list:
# NOTE: displays figures: https://plotly.com/python/renderers/
# default: plotly_mimetype+notebook
# support renderers: import plotly.io as pio; print(pio.renderers)
renderer = None
try:
# in notebook
_ipykernel = str(type(get_ipython()))
if "google.colab" in _ipykernel:
renderer = "colab"
except NameError:
pass
_fig.show(renderer=renderer)
def _get_layout(self) -> go.Layout:
"""
:return:
"""
return go.Layout(**self._layout)
def _get_data(self) -> list:
"""
:return:
"""
_data = [
self.get_instance_with_graph_parameters(
graph_type=self._graph_type, x=self._df.index, y=self._df[_col], name=_name, **self._graph_kwargs
)
for _col, _name in self._name_dict.items()
]
return _data
def figure(self) -> go.Figure:
"""
:return:
"""
_figure = go.Figure(data=self.data, layout=self._get_layout())
# NOTE: Use the default theme from plotly version 3.x, template=None
_figure["layout"].update(template=None)
return _figure
The provided code snippet includes necessary dependencies for implementing the `cumulative_return_graph` function. Write a Python function `def cumulative_return_graph( position: dict, report_normal: pd.DataFrame, label_data: pd.DataFrame, show_notebook=True, start_date=None, end_date=None, ) -> Iterable[go.Figure]` to solve the following problem:
Backtest buy, sell, and holding cumulative return graph Example: .. code-block:: python from qlib.data import D from qlib.contrib.evaluate import risk_analysis, backtest, long_short_backtest from qlib.contrib.strategy import TopkDropoutStrategy # backtest parameters bparas = {} bparas['limit_threshold'] = 0.095 bparas['account'] = 1000000000 sparas = {} sparas['topk'] = 50 sparas['n_drop'] = 5 strategy = TopkDropoutStrategy(**sparas) report_normal_df, positions = backtest(pred_df, strategy, **bparas) pred_df_dates = pred_df.index.get_level_values(level='datetime') features_df = D.features(D.instruments('csi500'), ['Ref($close, -1)/$close - 1'], pred_df_dates.min(), pred_df_dates.max()) features_df.columns = ['label'] qcr.analysis_position.cumulative_return_graph(positions, report_normal_df, features_df) Graph desc: - Axis X: Trading day. - Axis Y: - Above axis Y: `(((Ref($close, -1)/$close - 1) * weight).sum() / weight.sum()).cumsum()`. - Below axis Y: Daily weight sum. - In the **sell** graph, `y < 0` stands for profit; in other cases, `y > 0` stands for profit. - In the **buy_minus_sell** graph, the **y** value of the **weight** graph at the bottom is `buy_weight + sell_weight`. - In each graph, the **red line** in the histogram on the right represents the average. :param position: position data :param report_normal: .. code-block:: python return cost bench turnover date 2017-01-04 0.003421 0.000864 0.011693 0.576325 2017-01-05 0.000508 0.000447 0.000721 0.227882 2017-01-06 -0.003321 0.000212 -0.004322 0.102765 2017-01-09 0.006753 0.000212 0.006874 0.105864 2017-01-10 -0.000416 0.000440 -0.003350 0.208396 :param label_data: `D.features` result; index is `pd.MultiIndex`, index name is [`instrument`, `datetime`]; columns names is [`label`]. **The label T is the change from T to T+1**, it is recommended to use ``close``, example: `D.features(D.instruments('csi500'), ['Ref($close, -1)/$close-1'])` .. code-block:: python label instrument datetime SH600004 2017-12-11 -0.013502 2017-12-12 -0.072367 2017-12-13 -0.068605 2017-12-14 0.012440 2017-12-15 -0.102778 :param show_notebook: True or False. If True, show graph in notebook, else return figures :param start_date: start date :param end_date: end date :return:
Here is the function:
def cumulative_return_graph(
position: dict,
report_normal: pd.DataFrame,
label_data: pd.DataFrame,
show_notebook=True,
start_date=None,
end_date=None,
) -> Iterable[go.Figure]:
"""Backtest buy, sell, and holding cumulative return graph
Example:
.. code-block:: python
from qlib.data import D
from qlib.contrib.evaluate import risk_analysis, backtest, long_short_backtest
from qlib.contrib.strategy import TopkDropoutStrategy
# backtest parameters
bparas = {}
bparas['limit_threshold'] = 0.095
bparas['account'] = 1000000000
sparas = {}
sparas['topk'] = 50
sparas['n_drop'] = 5
strategy = TopkDropoutStrategy(**sparas)
report_normal_df, positions = backtest(pred_df, strategy, **bparas)
pred_df_dates = pred_df.index.get_level_values(level='datetime')
features_df = D.features(D.instruments('csi500'), ['Ref($close, -1)/$close - 1'], pred_df_dates.min(), pred_df_dates.max())
features_df.columns = ['label']
qcr.analysis_position.cumulative_return_graph(positions, report_normal_df, features_df)
Graph desc:
- Axis X: Trading day.
- Axis Y:
- Above axis Y: `(((Ref($close, -1)/$close - 1) * weight).sum() / weight.sum()).cumsum()`.
- Below axis Y: Daily weight sum.
- In the **sell** graph, `y < 0` stands for profit; in other cases, `y > 0` stands for profit.
- In the **buy_minus_sell** graph, the **y** value of the **weight** graph at the bottom is `buy_weight + sell_weight`.
- In each graph, the **red line** in the histogram on the right represents the average.
:param position: position data
:param report_normal:
.. code-block:: python
return cost bench turnover
date
2017-01-04 0.003421 0.000864 0.011693 0.576325
2017-01-05 0.000508 0.000447 0.000721 0.227882
2017-01-06 -0.003321 0.000212 -0.004322 0.102765
2017-01-09 0.006753 0.000212 0.006874 0.105864
2017-01-10 -0.000416 0.000440 -0.003350 0.208396
:param label_data: `D.features` result; index is `pd.MultiIndex`, index name is [`instrument`, `datetime`]; columns names is [`label`].
**The label T is the change from T to T+1**, it is recommended to use ``close``, example: `D.features(D.instruments('csi500'), ['Ref($close, -1)/$close-1'])`
.. code-block:: python
label
instrument datetime
SH600004 2017-12-11 -0.013502
2017-12-12 -0.072367
2017-12-13 -0.068605
2017-12-14 0.012440
2017-12-15 -0.102778
:param show_notebook: True or False. If True, show graph in notebook, else return figures
:param start_date: start date
:param end_date: end date
:return:
"""
position = copy.deepcopy(position)
report_normal = report_normal.copy()
label_data.columns = ["label"]
_figures = _get_figure_with_position(position, report_normal, label_data, start_date, end_date)
if show_notebook:
BaseGraph.show_graph_in_notebook(_figures)
else:
return _figures | Backtest buy, sell, and holding cumulative return graph Example: .. code-block:: python from qlib.data import D from qlib.contrib.evaluate import risk_analysis, backtest, long_short_backtest from qlib.contrib.strategy import TopkDropoutStrategy # backtest parameters bparas = {} bparas['limit_threshold'] = 0.095 bparas['account'] = 1000000000 sparas = {} sparas['topk'] = 50 sparas['n_drop'] = 5 strategy = TopkDropoutStrategy(**sparas) report_normal_df, positions = backtest(pred_df, strategy, **bparas) pred_df_dates = pred_df.index.get_level_values(level='datetime') features_df = D.features(D.instruments('csi500'), ['Ref($close, -1)/$close - 1'], pred_df_dates.min(), pred_df_dates.max()) features_df.columns = ['label'] qcr.analysis_position.cumulative_return_graph(positions, report_normal_df, features_df) Graph desc: - Axis X: Trading day. - Axis Y: - Above axis Y: `(((Ref($close, -1)/$close - 1) * weight).sum() / weight.sum()).cumsum()`. - Below axis Y: Daily weight sum. - In the **sell** graph, `y < 0` stands for profit; in other cases, `y > 0` stands for profit. - In the **buy_minus_sell** graph, the **y** value of the **weight** graph at the bottom is `buy_weight + sell_weight`. - In each graph, the **red line** in the histogram on the right represents the average. :param position: position data :param report_normal: .. code-block:: python return cost bench turnover date 2017-01-04 0.003421 0.000864 0.011693 0.576325 2017-01-05 0.000508 0.000447 0.000721 0.227882 2017-01-06 -0.003321 0.000212 -0.004322 0.102765 2017-01-09 0.006753 0.000212 0.006874 0.105864 2017-01-10 -0.000416 0.000440 -0.003350 0.208396 :param label_data: `D.features` result; index is `pd.MultiIndex`, index name is [`instrument`, `datetime`]; columns names is [`label`]. **The label T is the change from T to T+1**, it is recommended to use ``close``, example: `D.features(D.instruments('csi500'), ['Ref($close, -1)/$close-1'])` .. code-block:: python label instrument datetime SH600004 2017-12-11 -0.013502 2017-12-12 -0.072367 2017-12-13 -0.068605 2017-12-14 0.012440 2017-12-15 -0.102778 :param show_notebook: True or False. If True, show graph in notebook, else return figures :param start_date: start date :param end_date: end date :return: |
19,586 | import copy
from typing import Iterable
import pandas as pd
import plotly.graph_objs as go
from ..graph import ScatterGraph
from ..analysis_position.parse_position import get_position_data
def _get_figure_with_position(
position: dict, label_data: pd.DataFrame, start_date=None, end_date=None
) -> Iterable[go.Figure]:
"""Get average analysis figures
:param position: position
:param label_data:
:param start_date:
:param end_date:
:return:
"""
_position_df = get_position_data(
position,
label_data,
calculate_label_rank=True,
start_date=start_date,
end_date=end_date,
)
res_dict = dict()
_pos_gp = _position_df.groupby(level=1)
for _item in _pos_gp:
_date = _item[0]
_day_df = _item[1]
_day_value = res_dict.setdefault(_date, {})
for _i, _name in {0: "Hold", 1: "Buy", -1: "Sell"}.items():
_temp_df = _day_df[_day_df["status"] == _i]
if _temp_df.empty:
_day_value[_name] = 0
else:
_day_value[_name] = _temp_df["rank_label_mean"].values[0]
_res_df = pd.DataFrame.from_dict(res_dict, orient="index")
# FIXME: support HIGH-FREQ
_res_df.index = _res_df.index.strftime("%Y-%m-%d")
for _col in _res_df.columns:
yield ScatterGraph(
_res_df.loc[:, [_col]],
layout=dict(
title=_col,
xaxis=dict(type="category", tickangle=45),
yaxis=dict(title="lable-rank-ratio: %"),
),
graph_kwargs=dict(mode="lines+markers"),
).figure
class ScatterGraph(BaseGraph):
_name = "scatter"
The provided code snippet includes necessary dependencies for implementing the `rank_label_graph` function. Write a Python function `def rank_label_graph( position: dict, label_data: pd.DataFrame, start_date=None, end_date=None, show_notebook=True, ) -> Iterable[go.Figure]` to solve the following problem:
Ranking percentage of stocks buy, sell, and holding on the trading day. Average rank-ratio(similar to **sell_df['label'].rank(ascending=False) / len(sell_df)**) of daily trading Example: .. code-block:: python from qlib.data import D from qlib.contrib.evaluate import backtest from qlib.contrib.strategy import TopkDropoutStrategy # backtest parameters bparas = {} bparas['limit_threshold'] = 0.095 bparas['account'] = 1000000000 sparas = {} sparas['topk'] = 50 sparas['n_drop'] = 230 strategy = TopkDropoutStrategy(**sparas) _, positions = backtest(pred_df, strategy, **bparas) pred_df_dates = pred_df.index.get_level_values(level='datetime') features_df = D.features(D.instruments('csi500'), ['Ref($close, -1)/$close-1'], pred_df_dates.min(), pred_df_dates.max()) features_df.columns = ['label'] qcr.analysis_position.rank_label_graph(positions, features_df, pred_df_dates.min(), pred_df_dates.max()) :param position: position data; **qlib.backtest.backtest** result. :param label_data: **D.features** result; index is **pd.MultiIndex**, index name is **[instrument, datetime]**; columns names is **[label]**. **The label T is the change from T to T+1**, it is recommended to use ``close``, example: `D.features(D.instruments('csi500'), ['Ref($close, -1)/$close-1'])`. .. code-block:: python label instrument datetime SH600004 2017-12-11 -0.013502 2017-12-12 -0.072367 2017-12-13 -0.068605 2017-12-14 0.012440 2017-12-15 -0.102778 :param start_date: start date :param end_date: end_date :param show_notebook: **True** or **False**. If True, show graph in notebook, else return figures. :return:
Here is the function:
def rank_label_graph(
position: dict,
label_data: pd.DataFrame,
start_date=None,
end_date=None,
show_notebook=True,
) -> Iterable[go.Figure]:
"""Ranking percentage of stocks buy, sell, and holding on the trading day.
Average rank-ratio(similar to **sell_df['label'].rank(ascending=False) / len(sell_df)**) of daily trading
Example:
.. code-block:: python
from qlib.data import D
from qlib.contrib.evaluate import backtest
from qlib.contrib.strategy import TopkDropoutStrategy
# backtest parameters
bparas = {}
bparas['limit_threshold'] = 0.095
bparas['account'] = 1000000000
sparas = {}
sparas['topk'] = 50
sparas['n_drop'] = 230
strategy = TopkDropoutStrategy(**sparas)
_, positions = backtest(pred_df, strategy, **bparas)
pred_df_dates = pred_df.index.get_level_values(level='datetime')
features_df = D.features(D.instruments('csi500'), ['Ref($close, -1)/$close-1'], pred_df_dates.min(), pred_df_dates.max())
features_df.columns = ['label']
qcr.analysis_position.rank_label_graph(positions, features_df, pred_df_dates.min(), pred_df_dates.max())
:param position: position data; **qlib.backtest.backtest** result.
:param label_data: **D.features** result; index is **pd.MultiIndex**, index name is **[instrument, datetime]**; columns names is **[label]**.
**The label T is the change from T to T+1**, it is recommended to use ``close``, example: `D.features(D.instruments('csi500'), ['Ref($close, -1)/$close-1'])`.
.. code-block:: python
label
instrument datetime
SH600004 2017-12-11 -0.013502
2017-12-12 -0.072367
2017-12-13 -0.068605
2017-12-14 0.012440
2017-12-15 -0.102778
:param start_date: start date
:param end_date: end_date
:param show_notebook: **True** or **False**. If True, show graph in notebook, else return figures.
:return:
"""
position = copy.deepcopy(position)
label_data.columns = ["label"]
_figures = _get_figure_with_position(position, label_data, start_date, end_date)
if show_notebook:
ScatterGraph.show_graph_in_notebook(_figures)
else:
return _figures | Ranking percentage of stocks buy, sell, and holding on the trading day. Average rank-ratio(similar to **sell_df['label'].rank(ascending=False) / len(sell_df)**) of daily trading Example: .. code-block:: python from qlib.data import D from qlib.contrib.evaluate import backtest from qlib.contrib.strategy import TopkDropoutStrategy # backtest parameters bparas = {} bparas['limit_threshold'] = 0.095 bparas['account'] = 1000000000 sparas = {} sparas['topk'] = 50 sparas['n_drop'] = 230 strategy = TopkDropoutStrategy(**sparas) _, positions = backtest(pred_df, strategy, **bparas) pred_df_dates = pred_df.index.get_level_values(level='datetime') features_df = D.features(D.instruments('csi500'), ['Ref($close, -1)/$close-1'], pred_df_dates.min(), pred_df_dates.max()) features_df.columns = ['label'] qcr.analysis_position.rank_label_graph(positions, features_df, pred_df_dates.min(), pred_df_dates.max()) :param position: position data; **qlib.backtest.backtest** result. :param label_data: **D.features** result; index is **pd.MultiIndex**, index name is **[instrument, datetime]**; columns names is **[label]**. **The label T is the change from T to T+1**, it is recommended to use ``close``, example: `D.features(D.instruments('csi500'), ['Ref($close, -1)/$close-1'])`. .. code-block:: python label instrument datetime SH600004 2017-12-11 -0.013502 2017-12-12 -0.072367 2017-12-13 -0.068605 2017-12-14 0.012440 2017-12-15 -0.102778 :param start_date: start date :param end_date: end_date :param show_notebook: **True** or **False**. If True, show graph in notebook, else return figures. :return: |
19,587 | import pandas as pd
from ..graph import SubplotsGraph, BaseGraph
def _report_figure(df: pd.DataFrame) -> [list, tuple]:
"""
:param df:
:return:
"""
# Get data
report_df = _calculate_report_data(df)
# Maximum Drawdown
max_start_date, max_end_date = _calculate_maximum(report_df)
ex_max_start_date, ex_max_end_date = _calculate_maximum(report_df, True)
index_name = report_df.index.name
_temp_df = report_df.reset_index()
_temp_df.loc[-1] = 0
_temp_df = _temp_df.shift(1)
_temp_df.loc[0, index_name] = "T0"
_temp_df.set_index(index_name, inplace=True)
_temp_df.iloc[0] = 0
report_df = _temp_df
# Create figure
_default_kind_map = dict(kind="ScatterGraph", kwargs={"mode": "lines+markers"})
_temp_fill_args = {"fill": "tozeroy", "mode": "lines+markers"}
_column_row_col_dict = [
("cum_bench", dict(row=1, col=1)),
("cum_return_wo_cost", dict(row=1, col=1)),
("cum_return_w_cost", dict(row=1, col=1)),
("return_wo_mdd", dict(row=2, col=1, graph_kwargs=_temp_fill_args)),
("return_w_cost_mdd", dict(row=3, col=1, graph_kwargs=_temp_fill_args)),
("cum_ex_return_wo_cost", dict(row=4, col=1)),
("cum_ex_return_w_cost", dict(row=4, col=1)),
("turnover", dict(row=5, col=1)),
("cum_ex_return_w_cost_mdd", dict(row=6, col=1, graph_kwargs=_temp_fill_args)),
("cum_ex_return_wo_cost_mdd", dict(row=7, col=1, graph_kwargs=_temp_fill_args)),
]
_subplot_layout = dict()
for i in range(1, 8):
# yaxis
_subplot_layout.update({"yaxis{}".format(i): dict(zeroline=True, showline=True, showticklabels=True)})
_show_line = i == 7
_subplot_layout.update({"xaxis{}".format(i): dict(showline=_show_line, type="category", tickangle=45)})
_layout_style = dict(
height=1200,
title=" ",
shapes=[
{
"type": "rect",
"xref": "x",
"yref": "paper",
"x0": max_start_date,
"y0": 0.55,
"x1": max_end_date,
"y1": 1,
"fillcolor": "#d3d3d3",
"opacity": 0.3,
"line": {
"width": 0,
},
},
{
"type": "rect",
"xref": "x",
"yref": "paper",
"x0": ex_max_start_date,
"y0": 0,
"x1": ex_max_end_date,
"y1": 0.55,
"fillcolor": "#d3d3d3",
"opacity": 0.3,
"line": {
"width": 0,
},
},
],
)
_subplot_kwargs = dict(
shared_xaxes=True,
vertical_spacing=0.01,
rows=7,
cols=1,
row_width=[1, 1, 1, 3, 1, 1, 3],
print_grid=False,
)
figure = SubplotsGraph(
df=report_df,
layout=_layout_style,
sub_graph_data=_column_row_col_dict,
subplots_kwargs=_subplot_kwargs,
kind_map=_default_kind_map,
sub_graph_layout=_subplot_layout,
).figure
return (figure,)
class BaseGraph:
_name = None
def __init__(
self, df: pd.DataFrame = None, layout: dict = None, graph_kwargs: dict = None, name_dict: dict = None, **kwargs
):
"""
:param df:
:param layout:
:param graph_kwargs:
:param name_dict:
:param kwargs:
layout: dict
go.Layout parameters
graph_kwargs: dict
Graph parameters, eg: go.Bar(**graph_kwargs)
"""
self._df = df
self._layout = dict() if layout is None else layout
self._graph_kwargs = dict() if graph_kwargs is None else graph_kwargs
self._name_dict = name_dict
self.data = None
self._init_parameters(**kwargs)
self._init_data()
def _init_data(self):
"""
:return:
"""
if self._df.empty:
raise ValueError("df is empty.")
self.data = self._get_data()
def _init_parameters(self, **kwargs):
"""
:param kwargs
"""
# Instantiate graphics parameters
self._graph_type = self._name.lower().capitalize()
# Displayed column name
if self._name_dict is None:
self._name_dict = {_item: _item for _item in self._df.columns}
def get_instance_with_graph_parameters(graph_type: str = None, **kwargs):
"""
:param graph_type:
:param kwargs:
:return:
"""
try:
_graph_module = importlib.import_module("plotly.graph_objs")
_graph_class = getattr(_graph_module, graph_type)
except AttributeError:
_graph_module = importlib.import_module("qlib.contrib.report.graph")
_graph_class = getattr(_graph_module, graph_type)
return _graph_class(**kwargs)
def show_graph_in_notebook(figure_list: Iterable[go.Figure] = None):
"""
:param figure_list:
:return:
"""
py.init_notebook_mode()
for _fig in figure_list:
# NOTE: displays figures: https://plotly.com/python/renderers/
# default: plotly_mimetype+notebook
# support renderers: import plotly.io as pio; print(pio.renderers)
renderer = None
try:
# in notebook
_ipykernel = str(type(get_ipython()))
if "google.colab" in _ipykernel:
renderer = "colab"
except NameError:
pass
_fig.show(renderer=renderer)
def _get_layout(self) -> go.Layout:
"""
:return:
"""
return go.Layout(**self._layout)
def _get_data(self) -> list:
"""
:return:
"""
_data = [
self.get_instance_with_graph_parameters(
graph_type=self._graph_type, x=self._df.index, y=self._df[_col], name=_name, **self._graph_kwargs
)
for _col, _name in self._name_dict.items()
]
return _data
def figure(self) -> go.Figure:
"""
:return:
"""
_figure = go.Figure(data=self.data, layout=self._get_layout())
# NOTE: Use the default theme from plotly version 3.x, template=None
_figure["layout"].update(template=None)
return _figure
The provided code snippet includes necessary dependencies for implementing the `report_graph` function. Write a Python function `def report_graph(report_df: pd.DataFrame, show_notebook: bool = True) -> [list, tuple]` to solve the following problem:
display backtest report Example: .. code-block:: python import qlib import pandas as pd from qlib.utils.time import Freq from qlib.utils import flatten_dict from qlib.backtest import backtest, executor from qlib.contrib.evaluate import risk_analysis from qlib.contrib.strategy import TopkDropoutStrategy # init qlib qlib.init(provider_uri=<qlib data dir>) CSI300_BENCH = "SH000300" FREQ = "day" STRATEGY_CONFIG = { "topk": 50, "n_drop": 5, # pred_score, pd.Series "signal": pred_score, } EXECUTOR_CONFIG = { "time_per_step": "day", "generate_portfolio_metrics": True, } backtest_config = { "start_time": "2017-01-01", "end_time": "2020-08-01", "account": 100000000, "benchmark": CSI300_BENCH, "exchange_kwargs": { "freq": FREQ, "limit_threshold": 0.095, "deal_price": "close", "open_cost": 0.0005, "close_cost": 0.0015, "min_cost": 5, }, } # strategy object strategy_obj = TopkDropoutStrategy(**STRATEGY_CONFIG) # executor object executor_obj = executor.SimulatorExecutor(**EXECUTOR_CONFIG) # backtest portfolio_metric_dict, indicator_dict = backtest(executor=executor_obj, strategy=strategy_obj, **backtest_config) analysis_freq = "{0}{1}".format(*Freq.parse(FREQ)) # backtest info report_normal_df, positions_normal = portfolio_metric_dict.get(analysis_freq) qcr.analysis_position.report_graph(report_normal_df) :param report_df: **df.index.name** must be **date**, **df.columns** must contain **return**, **turnover**, **cost**, **bench**. .. code-block:: python return cost bench turnover date 2017-01-04 0.003421 0.000864 0.011693 0.576325 2017-01-05 0.000508 0.000447 0.000721 0.227882 2017-01-06 -0.003321 0.000212 -0.004322 0.102765 2017-01-09 0.006753 0.000212 0.006874 0.105864 2017-01-10 -0.000416 0.000440 -0.003350 0.208396 :param show_notebook: whether to display graphics in notebook, the default is **True**. :return: if show_notebook is True, display in notebook; else return **plotly.graph_objs.Figure** list.
Here is the function:
def report_graph(report_df: pd.DataFrame, show_notebook: bool = True) -> [list, tuple]:
"""display backtest report
Example:
.. code-block:: python
import qlib
import pandas as pd
from qlib.utils.time import Freq
from qlib.utils import flatten_dict
from qlib.backtest import backtest, executor
from qlib.contrib.evaluate import risk_analysis
from qlib.contrib.strategy import TopkDropoutStrategy
# init qlib
qlib.init(provider_uri=<qlib data dir>)
CSI300_BENCH = "SH000300"
FREQ = "day"
STRATEGY_CONFIG = {
"topk": 50,
"n_drop": 5,
# pred_score, pd.Series
"signal": pred_score,
}
EXECUTOR_CONFIG = {
"time_per_step": "day",
"generate_portfolio_metrics": True,
}
backtest_config = {
"start_time": "2017-01-01",
"end_time": "2020-08-01",
"account": 100000000,
"benchmark": CSI300_BENCH,
"exchange_kwargs": {
"freq": FREQ,
"limit_threshold": 0.095,
"deal_price": "close",
"open_cost": 0.0005,
"close_cost": 0.0015,
"min_cost": 5,
},
}
# strategy object
strategy_obj = TopkDropoutStrategy(**STRATEGY_CONFIG)
# executor object
executor_obj = executor.SimulatorExecutor(**EXECUTOR_CONFIG)
# backtest
portfolio_metric_dict, indicator_dict = backtest(executor=executor_obj, strategy=strategy_obj, **backtest_config)
analysis_freq = "{0}{1}".format(*Freq.parse(FREQ))
# backtest info
report_normal_df, positions_normal = portfolio_metric_dict.get(analysis_freq)
qcr.analysis_position.report_graph(report_normal_df)
:param report_df: **df.index.name** must be **date**, **df.columns** must contain **return**, **turnover**, **cost**, **bench**.
.. code-block:: python
return cost bench turnover
date
2017-01-04 0.003421 0.000864 0.011693 0.576325
2017-01-05 0.000508 0.000447 0.000721 0.227882
2017-01-06 -0.003321 0.000212 -0.004322 0.102765
2017-01-09 0.006753 0.000212 0.006874 0.105864
2017-01-10 -0.000416 0.000440 -0.003350 0.208396
:param show_notebook: whether to display graphics in notebook, the default is **True**.
:return: if show_notebook is True, display in notebook; else return **plotly.graph_objs.Figure** list.
"""
report_df = report_df.copy()
fig_list = _report_figure(report_df)
if show_notebook:
BaseGraph.show_graph_in_notebook(fig_list)
else:
return fig_list | display backtest report Example: .. code-block:: python import qlib import pandas as pd from qlib.utils.time import Freq from qlib.utils import flatten_dict from qlib.backtest import backtest, executor from qlib.contrib.evaluate import risk_analysis from qlib.contrib.strategy import TopkDropoutStrategy # init qlib qlib.init(provider_uri=<qlib data dir>) CSI300_BENCH = "SH000300" FREQ = "day" STRATEGY_CONFIG = { "topk": 50, "n_drop": 5, # pred_score, pd.Series "signal": pred_score, } EXECUTOR_CONFIG = { "time_per_step": "day", "generate_portfolio_metrics": True, } backtest_config = { "start_time": "2017-01-01", "end_time": "2020-08-01", "account": 100000000, "benchmark": CSI300_BENCH, "exchange_kwargs": { "freq": FREQ, "limit_threshold": 0.095, "deal_price": "close", "open_cost": 0.0005, "close_cost": 0.0015, "min_cost": 5, }, } # strategy object strategy_obj = TopkDropoutStrategy(**STRATEGY_CONFIG) # executor object executor_obj = executor.SimulatorExecutor(**EXECUTOR_CONFIG) # backtest portfolio_metric_dict, indicator_dict = backtest(executor=executor_obj, strategy=strategy_obj, **backtest_config) analysis_freq = "{0}{1}".format(*Freq.parse(FREQ)) # backtest info report_normal_df, positions_normal = portfolio_metric_dict.get(analysis_freq) qcr.analysis_position.report_graph(report_normal_df) :param report_df: **df.index.name** must be **date**, **df.columns** must contain **return**, **turnover**, **cost**, **bench**. .. code-block:: python return cost bench turnover date 2017-01-04 0.003421 0.000864 0.011693 0.576325 2017-01-05 0.000508 0.000447 0.000721 0.227882 2017-01-06 -0.003321 0.000212 -0.004322 0.102765 2017-01-09 0.006753 0.000212 0.006874 0.105864 2017-01-10 -0.000416 0.000440 -0.003350 0.208396 :param show_notebook: whether to display graphics in notebook, the default is **True**. :return: if show_notebook is True, display in notebook; else return **plotly.graph_objs.Figure** list. |
19,588 | from typing import Iterable
import pandas as pd
import plotly.graph_objs as py
from ...evaluate import risk_analysis
from ..graph import SubplotsGraph, ScatterGraph
def _get_risk_analysis_figure(analysis_df: pd.DataFrame) -> Iterable[py.Figure]:
"""Get analysis graph figure
:param analysis_df:
:return:
"""
if analysis_df is None:
return []
_figure = SubplotsGraph(
_get_all_risk_analysis(analysis_df),
kind_map=dict(kind="BarGraph", kwargs={}),
subplots_kwargs={"rows": 1, "cols": 4},
).figure
return (_figure,)
def _get_monthly_risk_analysis_figure(report_normal_df: pd.DataFrame) -> Iterable[py.Figure]:
"""Get analysis monthly graph figure
:param report_normal_df:
:param report_long_short_df:
:return:
"""
# if report_normal_df is None and report_long_short_df is None:
# return []
if report_normal_df is None:
return []
# if report_normal_df is None:
# report_normal_df = pd.DataFrame(index=report_long_short_df.index)
# if report_long_short_df is None:
# report_long_short_df = pd.DataFrame(index=report_normal_df.index)
_monthly_df = _get_monthly_risk_analysis_with_report(
report_normal_df=report_normal_df,
# report_long_short_df=report_long_short_df,
)
for _feature in ["annualized_return", "max_drawdown", "information_ratio", "std"]:
_temp_df = _get_monthly_analysis_with_feature(_monthly_df, _feature)
yield ScatterGraph(
_temp_df,
layout=dict(title=_feature, xaxis=dict(type="category", tickangle=45)),
graph_kwargs={"mode": "lines+markers"},
).figure
class ScatterGraph(BaseGraph):
_name = "scatter"
The provided code snippet includes necessary dependencies for implementing the `risk_analysis_graph` function. Write a Python function `def risk_analysis_graph( analysis_df: pd.DataFrame = None, report_normal_df: pd.DataFrame = None, report_long_short_df: pd.DataFrame = None, show_notebook: bool = True, ) -> Iterable[py.Figure]` to solve the following problem:
Generate analysis graph and monthly analysis Example: .. code-block:: python import qlib import pandas as pd from qlib.utils.time import Freq from qlib.utils import flatten_dict from qlib.backtest import backtest, executor from qlib.contrib.evaluate import risk_analysis from qlib.contrib.strategy import TopkDropoutStrategy # init qlib qlib.init(provider_uri=<qlib data dir>) CSI300_BENCH = "SH000300" FREQ = "day" STRATEGY_CONFIG = { "topk": 50, "n_drop": 5, # pred_score, pd.Series "signal": pred_score, } EXECUTOR_CONFIG = { "time_per_step": "day", "generate_portfolio_metrics": True, } backtest_config = { "start_time": "2017-01-01", "end_time": "2020-08-01", "account": 100000000, "benchmark": CSI300_BENCH, "exchange_kwargs": { "freq": FREQ, "limit_threshold": 0.095, "deal_price": "close", "open_cost": 0.0005, "close_cost": 0.0015, "min_cost": 5, }, } # strategy object strategy_obj = TopkDropoutStrategy(**STRATEGY_CONFIG) # executor object executor_obj = executor.SimulatorExecutor(**EXECUTOR_CONFIG) # backtest portfolio_metric_dict, indicator_dict = backtest(executor=executor_obj, strategy=strategy_obj, **backtest_config) analysis_freq = "{0}{1}".format(*Freq.parse(FREQ)) # backtest info report_normal_df, positions_normal = portfolio_metric_dict.get(analysis_freq) analysis = dict() analysis["excess_return_without_cost"] = risk_analysis( report_normal_df["return"] - report_normal_df["bench"], freq=analysis_freq ) analysis["excess_return_with_cost"] = risk_analysis( report_normal_df["return"] - report_normal_df["bench"] - report_normal_df["cost"], freq=analysis_freq ) analysis_df = pd.concat(analysis) # type: pd.DataFrame analysis_position.risk_analysis_graph(analysis_df, report_normal_df) :param analysis_df: analysis data, index is **pd.MultiIndex**; columns names is **[risk]**. .. code-block:: python risk excess_return_without_cost mean 0.000692 std 0.005374 annualized_return 0.174495 information_ratio 2.045576 max_drawdown -0.079103 excess_return_with_cost mean 0.000499 std 0.005372 annualized_return 0.125625 information_ratio 1.473152 max_drawdown -0.088263 :param report_normal_df: **df.index.name** must be **date**, df.columns must contain **return**, **turnover**, **cost**, **bench**. .. code-block:: python return cost bench turnover date 2017-01-04 0.003421 0.000864 0.011693 0.576325 2017-01-05 0.000508 0.000447 0.000721 0.227882 2017-01-06 -0.003321 0.000212 -0.004322 0.102765 2017-01-09 0.006753 0.000212 0.006874 0.105864 2017-01-10 -0.000416 0.000440 -0.003350 0.208396 :param report_long_short_df: **df.index.name** must be **date**, df.columns contain **long**, **short**, **long_short**. .. code-block:: python long short long_short date 2017-01-04 -0.001360 0.001394 0.000034 2017-01-05 0.002456 0.000058 0.002514 2017-01-06 0.000120 0.002739 0.002859 2017-01-09 0.001436 0.001838 0.003273 2017-01-10 0.000824 -0.001944 -0.001120 :param show_notebook: Whether to display graphics in a notebook, default **True**. If True, show graph in notebook If False, return graph figure :return:
Here is the function:
def risk_analysis_graph(
analysis_df: pd.DataFrame = None,
report_normal_df: pd.DataFrame = None,
report_long_short_df: pd.DataFrame = None,
show_notebook: bool = True,
) -> Iterable[py.Figure]:
"""Generate analysis graph and monthly analysis
Example:
.. code-block:: python
import qlib
import pandas as pd
from qlib.utils.time import Freq
from qlib.utils import flatten_dict
from qlib.backtest import backtest, executor
from qlib.contrib.evaluate import risk_analysis
from qlib.contrib.strategy import TopkDropoutStrategy
# init qlib
qlib.init(provider_uri=<qlib data dir>)
CSI300_BENCH = "SH000300"
FREQ = "day"
STRATEGY_CONFIG = {
"topk": 50,
"n_drop": 5,
# pred_score, pd.Series
"signal": pred_score,
}
EXECUTOR_CONFIG = {
"time_per_step": "day",
"generate_portfolio_metrics": True,
}
backtest_config = {
"start_time": "2017-01-01",
"end_time": "2020-08-01",
"account": 100000000,
"benchmark": CSI300_BENCH,
"exchange_kwargs": {
"freq": FREQ,
"limit_threshold": 0.095,
"deal_price": "close",
"open_cost": 0.0005,
"close_cost": 0.0015,
"min_cost": 5,
},
}
# strategy object
strategy_obj = TopkDropoutStrategy(**STRATEGY_CONFIG)
# executor object
executor_obj = executor.SimulatorExecutor(**EXECUTOR_CONFIG)
# backtest
portfolio_metric_dict, indicator_dict = backtest(executor=executor_obj, strategy=strategy_obj, **backtest_config)
analysis_freq = "{0}{1}".format(*Freq.parse(FREQ))
# backtest info
report_normal_df, positions_normal = portfolio_metric_dict.get(analysis_freq)
analysis = dict()
analysis["excess_return_without_cost"] = risk_analysis(
report_normal_df["return"] - report_normal_df["bench"], freq=analysis_freq
)
analysis["excess_return_with_cost"] = risk_analysis(
report_normal_df["return"] - report_normal_df["bench"] - report_normal_df["cost"], freq=analysis_freq
)
analysis_df = pd.concat(analysis) # type: pd.DataFrame
analysis_position.risk_analysis_graph(analysis_df, report_normal_df)
:param analysis_df: analysis data, index is **pd.MultiIndex**; columns names is **[risk]**.
.. code-block:: python
risk
excess_return_without_cost mean 0.000692
std 0.005374
annualized_return 0.174495
information_ratio 2.045576
max_drawdown -0.079103
excess_return_with_cost mean 0.000499
std 0.005372
annualized_return 0.125625
information_ratio 1.473152
max_drawdown -0.088263
:param report_normal_df: **df.index.name** must be **date**, df.columns must contain **return**, **turnover**, **cost**, **bench**.
.. code-block:: python
return cost bench turnover
date
2017-01-04 0.003421 0.000864 0.011693 0.576325
2017-01-05 0.000508 0.000447 0.000721 0.227882
2017-01-06 -0.003321 0.000212 -0.004322 0.102765
2017-01-09 0.006753 0.000212 0.006874 0.105864
2017-01-10 -0.000416 0.000440 -0.003350 0.208396
:param report_long_short_df: **df.index.name** must be **date**, df.columns contain **long**, **short**, **long_short**.
.. code-block:: python
long short long_short
date
2017-01-04 -0.001360 0.001394 0.000034
2017-01-05 0.002456 0.000058 0.002514
2017-01-06 0.000120 0.002739 0.002859
2017-01-09 0.001436 0.001838 0.003273
2017-01-10 0.000824 -0.001944 -0.001120
:param show_notebook: Whether to display graphics in a notebook, default **True**.
If True, show graph in notebook
If False, return graph figure
:return:
"""
_figure_list = list(_get_risk_analysis_figure(analysis_df)) + list(
_get_monthly_risk_analysis_figure(
report_normal_df,
# report_long_short_df,
)
)
if show_notebook:
ScatterGraph.show_graph_in_notebook(_figure_list)
else:
return _figure_list | Generate analysis graph and monthly analysis Example: .. code-block:: python import qlib import pandas as pd from qlib.utils.time import Freq from qlib.utils import flatten_dict from qlib.backtest import backtest, executor from qlib.contrib.evaluate import risk_analysis from qlib.contrib.strategy import TopkDropoutStrategy # init qlib qlib.init(provider_uri=<qlib data dir>) CSI300_BENCH = "SH000300" FREQ = "day" STRATEGY_CONFIG = { "topk": 50, "n_drop": 5, # pred_score, pd.Series "signal": pred_score, } EXECUTOR_CONFIG = { "time_per_step": "day", "generate_portfolio_metrics": True, } backtest_config = { "start_time": "2017-01-01", "end_time": "2020-08-01", "account": 100000000, "benchmark": CSI300_BENCH, "exchange_kwargs": { "freq": FREQ, "limit_threshold": 0.095, "deal_price": "close", "open_cost": 0.0005, "close_cost": 0.0015, "min_cost": 5, }, } # strategy object strategy_obj = TopkDropoutStrategy(**STRATEGY_CONFIG) # executor object executor_obj = executor.SimulatorExecutor(**EXECUTOR_CONFIG) # backtest portfolio_metric_dict, indicator_dict = backtest(executor=executor_obj, strategy=strategy_obj, **backtest_config) analysis_freq = "{0}{1}".format(*Freq.parse(FREQ)) # backtest info report_normal_df, positions_normal = portfolio_metric_dict.get(analysis_freq) analysis = dict() analysis["excess_return_without_cost"] = risk_analysis( report_normal_df["return"] - report_normal_df["bench"], freq=analysis_freq ) analysis["excess_return_with_cost"] = risk_analysis( report_normal_df["return"] - report_normal_df["bench"] - report_normal_df["cost"], freq=analysis_freq ) analysis_df = pd.concat(analysis) # type: pd.DataFrame analysis_position.risk_analysis_graph(analysis_df, report_normal_df) :param analysis_df: analysis data, index is **pd.MultiIndex**; columns names is **[risk]**. .. code-block:: python risk excess_return_without_cost mean 0.000692 std 0.005374 annualized_return 0.174495 information_ratio 2.045576 max_drawdown -0.079103 excess_return_with_cost mean 0.000499 std 0.005372 annualized_return 0.125625 information_ratio 1.473152 max_drawdown -0.088263 :param report_normal_df: **df.index.name** must be **date**, df.columns must contain **return**, **turnover**, **cost**, **bench**. .. code-block:: python return cost bench turnover date 2017-01-04 0.003421 0.000864 0.011693 0.576325 2017-01-05 0.000508 0.000447 0.000721 0.227882 2017-01-06 -0.003321 0.000212 -0.004322 0.102765 2017-01-09 0.006753 0.000212 0.006874 0.105864 2017-01-10 -0.000416 0.000440 -0.003350 0.208396 :param report_long_short_df: **df.index.name** must be **date**, df.columns contain **long**, **short**, **long_short**. .. code-block:: python long short long_short date 2017-01-04 -0.001360 0.001394 0.000034 2017-01-05 0.002456 0.000058 0.002514 2017-01-06 0.000120 0.002739 0.002859 2017-01-09 0.001436 0.001838 0.003273 2017-01-10 0.000824 -0.001944 -0.001120 :param show_notebook: Whether to display graphics in a notebook, default **True**. If True, show graph in notebook If False, return graph figure :return: |
19,589 | import pandas as pd
from ..graph import ScatterGraph
from ..utils import guess_plotly_rangebreaks
def _get_score_ic(pred_label: pd.DataFrame):
"""
:param pred_label:
:return:
"""
concat_data = pred_label.copy()
concat_data.dropna(axis=0, how="any", inplace=True)
_ic = concat_data.groupby(level="datetime").apply(lambda x: x["label"].corr(x["score"]))
_rank_ic = concat_data.groupby(level="datetime").apply(lambda x: x["label"].corr(x["score"], method="spearman"))
return pd.DataFrame({"ic": _ic, "rank_ic": _rank_ic})
class ScatterGraph(BaseGraph):
_name = "scatter"
def guess_plotly_rangebreaks(dt_index: pd.DatetimeIndex):
"""
This function `guesses` the rangebreaks required to remove gaps in datetime index.
It basically calculates the difference between a `continuous` datetime index and index given.
For more details on `rangebreaks` params in plotly, see
https://plotly.com/python/reference/layout/xaxis/#layout-xaxis-rangebreaks
Parameters
----------
dt_index: pd.DatetimeIndex
The datetimes of the data.
Returns
-------
the `rangebreaks` to be passed into plotly axis.
"""
dt_idx = dt_index.sort_values()
gaps = dt_idx[1:] - dt_idx[:-1]
min_gap = gaps.min()
gaps_to_break = {}
for gap, d in zip(gaps, dt_idx[:-1]):
if gap > min_gap:
gaps_to_break.setdefault(gap - min_gap, []).append(d + min_gap)
return [dict(values=v, dvalue=int(k.total_seconds() * 1000)) for k, v in gaps_to_break.items()]
The provided code snippet includes necessary dependencies for implementing the `score_ic_graph` function. Write a Python function `def score_ic_graph(pred_label: pd.DataFrame, show_notebook: bool = True, **kwargs) -> [list, tuple]` to solve the following problem:
score IC Example: .. code-block:: python from qlib.data import D from qlib.contrib.report import analysis_position pred_df_dates = pred_df.index.get_level_values(level='datetime') features_df = D.features(D.instruments('csi500'), ['Ref($close, -2)/Ref($close, -1)-1'], pred_df_dates.min(), pred_df_dates.max()) features_df.columns = ['label'] pred_label = pd.concat([features_df, pred], axis=1, sort=True).reindex(features_df.index) analysis_position.score_ic_graph(pred_label) :param pred_label: index is **pd.MultiIndex**, index name is **[instrument, datetime]**; columns names is **[score, label]**. .. code-block:: python instrument datetime score label SH600004 2017-12-11 -0.013502 -0.013502 2017-12-12 -0.072367 -0.072367 2017-12-13 -0.068605 -0.068605 2017-12-14 0.012440 0.012440 2017-12-15 -0.102778 -0.102778 :param show_notebook: whether to display graphics in notebook, the default is **True**. :return: if show_notebook is True, display in notebook; else return **plotly.graph_objs.Figure** list.
Here is the function:
def score_ic_graph(pred_label: pd.DataFrame, show_notebook: bool = True, **kwargs) -> [list, tuple]:
"""score IC
Example:
.. code-block:: python
from qlib.data import D
from qlib.contrib.report import analysis_position
pred_df_dates = pred_df.index.get_level_values(level='datetime')
features_df = D.features(D.instruments('csi500'), ['Ref($close, -2)/Ref($close, -1)-1'], pred_df_dates.min(), pred_df_dates.max())
features_df.columns = ['label']
pred_label = pd.concat([features_df, pred], axis=1, sort=True).reindex(features_df.index)
analysis_position.score_ic_graph(pred_label)
:param pred_label: index is **pd.MultiIndex**, index name is **[instrument, datetime]**; columns names is **[score, label]**.
.. code-block:: python
instrument datetime score label
SH600004 2017-12-11 -0.013502 -0.013502
2017-12-12 -0.072367 -0.072367
2017-12-13 -0.068605 -0.068605
2017-12-14 0.012440 0.012440
2017-12-15 -0.102778 -0.102778
:param show_notebook: whether to display graphics in notebook, the default is **True**.
:return: if show_notebook is True, display in notebook; else return **plotly.graph_objs.Figure** list.
"""
_ic_df = _get_score_ic(pred_label)
_figure = ScatterGraph(
_ic_df,
layout=dict(
title="Score IC",
xaxis=dict(tickangle=45, rangebreaks=kwargs.get("rangebreaks", guess_plotly_rangebreaks(_ic_df.index))),
),
graph_kwargs={"mode": "lines+markers"},
).figure
if show_notebook:
ScatterGraph.show_graph_in_notebook([_figure])
else:
return (_figure,) | score IC Example: .. code-block:: python from qlib.data import D from qlib.contrib.report import analysis_position pred_df_dates = pred_df.index.get_level_values(level='datetime') features_df = D.features(D.instruments('csi500'), ['Ref($close, -2)/Ref($close, -1)-1'], pred_df_dates.min(), pred_df_dates.max()) features_df.columns = ['label'] pred_label = pd.concat([features_df, pred], axis=1, sort=True).reindex(features_df.index) analysis_position.score_ic_graph(pred_label) :param pred_label: index is **pd.MultiIndex**, index name is **[instrument, datetime]**; columns names is **[score, label]**. .. code-block:: python instrument datetime score label SH600004 2017-12-11 -0.013502 -0.013502 2017-12-12 -0.072367 -0.072367 2017-12-13 -0.068605 -0.068605 2017-12-14 0.012440 0.012440 2017-12-15 -0.102778 -0.102778 :param show_notebook: whether to display graphics in notebook, the default is **True**. :return: if show_notebook is True, display in notebook; else return **plotly.graph_objs.Figure** list. |
19,590 | import numpy as np
import torch
from torch import nn
from qlib.constant import EPS
from qlib.log import get_module_logger
The provided code snippet includes necessary dependencies for implementing the `preds_to_weight_with_clamp` function. Write a Python function `def preds_to_weight_with_clamp(preds, clip_weight=None, clip_method="tanh")` to solve the following problem:
Clip the weights. Parameters ---------- clip_weight: float The clip threshold. clip_method: str The clip method. Current available: "clamp", "tanh", and "sigmoid".
Here is the function:
def preds_to_weight_with_clamp(preds, clip_weight=None, clip_method="tanh"):
"""
Clip the weights.
Parameters
----------
clip_weight: float
The clip threshold.
clip_method: str
The clip method. Current available: "clamp", "tanh", and "sigmoid".
"""
if clip_weight is not None:
if clip_method == "clamp":
weights = torch.exp(preds)
weights = weights.clamp(1.0 / clip_weight, clip_weight)
elif clip_method == "tanh":
weights = torch.exp(torch.tanh(preds) * np.log(clip_weight))
elif clip_method == "sigmoid":
# intuitively assume its sum is 1
if clip_weight == 0.0:
weights = torch.ones_like(preds)
else:
sm = nn.Sigmoid()
weights = sm(preds) * clip_weight # TODO: The clip_weight is useless here.
weights = weights / torch.sum(weights) * weights.numel()
else:
raise ValueError("Unknown clip_method")
else:
weights = torch.exp(preds)
return weights | Clip the weights. Parameters ---------- clip_weight: float The clip threshold. clip_method: str The clip method. Current available: "clamp", "tanh", and "sigmoid". |
19,591 | import pandas as pd
from typing import Tuple
from qlib import get_module_logger
from qlib.utils.paral import complex_parallel, DelayedDict
from joblib import Parallel, delayed
The provided code snippet includes necessary dependencies for implementing the `calc_long_short_prec` function. Write a Python function `def calc_long_short_prec( pred: pd.Series, label: pd.Series, date_col="datetime", quantile: float = 0.2, dropna=False, is_alpha=False ) -> Tuple[pd.Series, pd.Series]` to solve the following problem:
calculate the precision for long and short operation :param pred/label: index is **pd.MultiIndex**, index name is **[datetime, instruments]**; columns names is **[score]**. .. code-block:: python score datetime instrument 2020-12-01 09:30:00 SH600068 0.553634 SH600195 0.550017 SH600276 0.540321 SH600584 0.517297 SH600715 0.544674 label : label date_col : date_col Returns ------- (pd.Series, pd.Series) long precision and short precision in time level
Here is the function:
def calc_long_short_prec(
pred: pd.Series, label: pd.Series, date_col="datetime", quantile: float = 0.2, dropna=False, is_alpha=False
) -> Tuple[pd.Series, pd.Series]:
"""
calculate the precision for long and short operation
:param pred/label: index is **pd.MultiIndex**, index name is **[datetime, instruments]**; columns names is **[score]**.
.. code-block:: python
score
datetime instrument
2020-12-01 09:30:00 SH600068 0.553634
SH600195 0.550017
SH600276 0.540321
SH600584 0.517297
SH600715 0.544674
label :
label
date_col :
date_col
Returns
-------
(pd.Series, pd.Series)
long precision and short precision in time level
"""
if is_alpha:
label = label - label.mean(level=date_col)
if int(1 / quantile) >= len(label.index.get_level_values(1).unique()):
raise ValueError("Need more instruments to calculate precision")
df = pd.DataFrame({"pred": pred, "label": label})
if dropna:
df.dropna(inplace=True)
group = df.groupby(level=date_col)
def N(x):
return int(len(x) * quantile)
# find the top/low quantile of prediction and treat them as long and short target
long = group.apply(lambda x: x.nlargest(N(x), columns="pred").label).reset_index(level=0, drop=True)
short = group.apply(lambda x: x.nsmallest(N(x), columns="pred").label).reset_index(level=0, drop=True)
groupll = long.groupby(date_col)
l_dom = groupll.apply(lambda x: x > 0)
l_c = groupll.count()
groups = short.groupby(date_col)
s_dom = groups.apply(lambda x: x < 0)
s_c = groups.count()
return (l_dom.groupby(date_col).sum() / l_c), (s_dom.groupby(date_col).sum() / s_c) | calculate the precision for long and short operation :param pred/label: index is **pd.MultiIndex**, index name is **[datetime, instruments]**; columns names is **[score]**. .. code-block:: python score datetime instrument 2020-12-01 09:30:00 SH600068 0.553634 SH600195 0.550017 SH600276 0.540321 SH600584 0.517297 SH600715 0.544674 label : label date_col : date_col Returns ------- (pd.Series, pd.Series) long precision and short precision in time level |
19,592 | import pandas as pd
from typing import Tuple
from qlib import get_module_logger
from qlib.utils.paral import complex_parallel, DelayedDict
from joblib import Parallel, delayed
The provided code snippet includes necessary dependencies for implementing the `calc_long_short_return` function. Write a Python function `def calc_long_short_return( pred: pd.Series, label: pd.Series, date_col: str = "datetime", quantile: float = 0.2, dropna: bool = False, ) -> Tuple[pd.Series, pd.Series]` to solve the following problem:
calculate long-short return Note: `label` must be raw stock returns. Parameters ---------- pred : pd.Series stock predictions label : pd.Series stock returns date_col : str datetime index name quantile : float long-short quantile Returns ---------- long_short_r : pd.Series daily long-short returns long_avg_r : pd.Series daily long-average returns
Here is the function:
def calc_long_short_return(
pred: pd.Series,
label: pd.Series,
date_col: str = "datetime",
quantile: float = 0.2,
dropna: bool = False,
) -> Tuple[pd.Series, pd.Series]:
"""
calculate long-short return
Note:
`label` must be raw stock returns.
Parameters
----------
pred : pd.Series
stock predictions
label : pd.Series
stock returns
date_col : str
datetime index name
quantile : float
long-short quantile
Returns
----------
long_short_r : pd.Series
daily long-short returns
long_avg_r : pd.Series
daily long-average returns
"""
df = pd.DataFrame({"pred": pred, "label": label})
if dropna:
df.dropna(inplace=True)
group = df.groupby(level=date_col)
def N(x):
return int(len(x) * quantile)
r_long = group.apply(lambda x: x.nlargest(N(x), columns="pred").label.mean())
r_short = group.apply(lambda x: x.nsmallest(N(x), columns="pred").label.mean())
r_avg = group.label.mean()
return (r_long - r_short) / 2, r_avg | calculate long-short return Note: `label` must be raw stock returns. Parameters ---------- pred : pd.Series stock predictions label : pd.Series stock returns date_col : str datetime index name quantile : float long-short quantile Returns ---------- long_short_r : pd.Series daily long-short returns long_avg_r : pd.Series daily long-average returns |
19,593 | import pandas as pd
from typing import Tuple
from qlib import get_module_logger
from qlib.utils.paral import complex_parallel, DelayedDict
from joblib import Parallel, delayed
def pred_autocorr(pred: pd.Series, lag=1, inst_col="instrument", date_col="datetime"):
"""pred_autocorr.
Limitation:
- If the datetime is not sequential densely, the correlation will be calulated based on adjacent dates. (some users may expected NaN)
:param pred: pd.Series with following format
instrument datetime
SH600000 2016-01-04 -0.000403
2016-01-05 -0.000753
2016-01-06 -0.021801
2016-01-07 -0.065230
2016-01-08 -0.062465
:type pred: pd.Series
:param lag:
"""
if isinstance(pred, pd.DataFrame):
pred = pred.iloc[:, 0]
get_module_logger("pred_autocorr").warning(f"Only the first column in {pred.columns} of `pred` is kept")
pred_ustk = pred.sort_index().unstack(inst_col)
corr_s = {}
for (idx, cur), (_, prev) in zip(pred_ustk.iterrows(), pred_ustk.shift(lag).iterrows()):
corr_s[idx] = cur.corr(prev)
corr_s = pd.Series(corr_s).sort_index()
return corr_s
def complex_parallel(paral: Parallel, complex_iter):
"""complex_parallel.
Find all the delayed function created by delayed in complex_iter, run them parallelly and then replace it with the result
>>> from qlib.utils.paral import complex_parallel
>>> from joblib import Parallel, delayed
>>> complex_iter = {"a": delayed(sum)([1,2,3]), "b": [1, 2, delayed(sum)([10, 1])]}
>>> complex_parallel(Parallel(), complex_iter)
{'a': 6, 'b': [1, 2, 11]}
Parameters
----------
paral : Parallel
paral
complex_iter :
NOTE: only list, tuple and dict will be explored!!!!
Returns
-------
complex_iter whose delayed joblib tasks are replaced with its execution results.
"""
complex_iter, dt_all = _replace_and_get_dt(complex_iter)
for res, dt in zip(paral(dt.get_delayed_tuple() for dt in dt_all), dt_all):
dt.set_res(res)
complex_iter = _recover_dt(complex_iter)
return complex_iter
The provided code snippet includes necessary dependencies for implementing the `pred_autocorr_all` function. Write a Python function `def pred_autocorr_all(pred_dict, n_jobs=-1, **kwargs)` to solve the following problem:
calculate auto correlation for pred_dict Parameters ---------- pred_dict : dict A dict like {<method_name>: <prediction>} kwargs : all these arguments will be passed into pred_autocorr
Here is the function:
def pred_autocorr_all(pred_dict, n_jobs=-1, **kwargs):
"""
calculate auto correlation for pred_dict
Parameters
----------
pred_dict : dict
A dict like {<method_name>: <prediction>}
kwargs :
all these arguments will be passed into pred_autocorr
"""
ac_dict = {}
for k, pred in pred_dict.items():
ac_dict[k] = delayed(pred_autocorr)(pred, **kwargs)
return complex_parallel(Parallel(n_jobs=n_jobs, verbose=10), ac_dict) | calculate auto correlation for pred_dict Parameters ---------- pred_dict : dict A dict like {<method_name>: <prediction>} kwargs : all these arguments will be passed into pred_autocorr |
19,594 | import pandas as pd
from typing import Tuple
from qlib import get_module_logger
from qlib.utils.paral import complex_parallel, DelayedDict
from joblib import Parallel, delayed
def calc_ic(pred: pd.Series, label: pd.Series, date_col="datetime", dropna=False) -> (pd.Series, pd.Series):
"""calc_ic.
Parameters
----------
pred :
pred
label :
label
date_col :
date_col
Returns
-------
(pd.Series, pd.Series)
ic and rank ic
"""
df = pd.DataFrame({"pred": pred, "label": label})
ic = df.groupby(date_col).apply(lambda df: df["pred"].corr(df["label"]))
ric = df.groupby(date_col).apply(lambda df: df["pred"].corr(df["label"], method="spearman"))
if dropna:
return ic.dropna(), ric.dropna()
else:
return ic, ric
class DelayedDict(DelayedTask):
"""DelayedDict.
It is designed for following feature:
Converting following existing code to parallel
- constructing a dict
- key can be gotten instantly
- computation of values tasks a lot of time.
- AND ALL the values are calculated in a SINGLE function
"""
def __init__(self, key_l, delayed_tpl):
self.key_l = key_l
self.delayed_tpl = delayed_tpl
def get_delayed_tuple(self):
return self.delayed_tpl
def get_replacement(self):
return dict(zip(self.key_l, self.res))
def complex_parallel(paral: Parallel, complex_iter):
"""complex_parallel.
Find all the delayed function created by delayed in complex_iter, run them parallelly and then replace it with the result
>>> from qlib.utils.paral import complex_parallel
>>> from joblib import Parallel, delayed
>>> complex_iter = {"a": delayed(sum)([1,2,3]), "b": [1, 2, delayed(sum)([10, 1])]}
>>> complex_parallel(Parallel(), complex_iter)
{'a': 6, 'b': [1, 2, 11]}
Parameters
----------
paral : Parallel
paral
complex_iter :
NOTE: only list, tuple and dict will be explored!!!!
Returns
-------
complex_iter whose delayed joblib tasks are replaced with its execution results.
"""
complex_iter, dt_all = _replace_and_get_dt(complex_iter)
for res, dt in zip(paral(dt.get_delayed_tuple() for dt in dt_all), dt_all):
dt.set_res(res)
complex_iter = _recover_dt(complex_iter)
return complex_iter
The provided code snippet includes necessary dependencies for implementing the `calc_all_ic` function. Write a Python function `def calc_all_ic(pred_dict_all, label, date_col="datetime", dropna=False, n_jobs=-1)` to solve the following problem:
calc_all_ic. Parameters ---------- pred_dict_all : A dict like {<method_name>: <prediction>} label: A pd.Series of label values Returns ------- {'Q2+IND_z': {'ic': <ic series like> 2016-01-04 -0.057407 ... 2020-05-28 0.183470 2020-05-29 0.171393 'ric': <rank ic series like> 2016-01-04 -0.040888 ... 2020-05-28 0.236665 2020-05-29 0.183886 } ...}
Here is the function:
def calc_all_ic(pred_dict_all, label, date_col="datetime", dropna=False, n_jobs=-1):
"""calc_all_ic.
Parameters
----------
pred_dict_all :
A dict like {<method_name>: <prediction>}
label:
A pd.Series of label values
Returns
-------
{'Q2+IND_z': {'ic': <ic series like>
2016-01-04 -0.057407
...
2020-05-28 0.183470
2020-05-29 0.171393
'ric': <rank ic series like>
2016-01-04 -0.040888
...
2020-05-28 0.236665
2020-05-29 0.183886
}
...}
"""
pred_all_ics = {}
for k, pred in pred_dict_all.items():
pred_all_ics[k] = DelayedDict(["ic", "ric"], delayed(calc_ic)(pred, label, date_col=date_col, dropna=dropna))
pred_all_ics = complex_parallel(Parallel(n_jobs=n_jobs, verbose=10), pred_all_ics)
return pred_all_ics | calc_all_ic. Parameters ---------- pred_dict_all : A dict like {<method_name>: <prediction>} label: A pd.Series of label values Returns ------- {'Q2+IND_z': {'ic': <ic series like> 2016-01-04 -0.057407 ... 2020-05-28 0.183470 2020-05-29 0.171393 'ric': <rank ic series like> 2016-01-04 -0.040888 ... 2020-05-28 0.236665 2020-05-29 0.183886 } ...} |
19,595 | import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `count_parameters` function. Write a Python function `def count_parameters(models_or_parameters, unit="m")` to solve the following problem:
This function is to obtain the storage size unit of a (or multiple) models. Parameters ---------- models_or_parameters : PyTorch model(s) or a list of parameters. unit : the storage size unit. Returns ------- The number of parameters of the given model(s) or parameters.
Here is the function:
def count_parameters(models_or_parameters, unit="m"):
"""
This function is to obtain the storage size unit of a (or multiple) models.
Parameters
----------
models_or_parameters : PyTorch model(s) or a list of parameters.
unit : the storage size unit.
Returns
-------
The number of parameters of the given model(s) or parameters.
"""
if isinstance(models_or_parameters, nn.Module):
counts = sum(v.numel() for v in models_or_parameters.parameters())
elif isinstance(models_or_parameters, nn.Parameter):
counts = models_or_parameters.numel()
elif isinstance(models_or_parameters, (list, tuple)):
return sum(count_parameters(x, unit) for x in models_or_parameters)
else:
counts = sum(v.numel() for v in models_or_parameters)
unit = unit.lower()
if unit in ("kb", "k"):
counts /= 2**10
elif unit in ("mb", "m"):
counts /= 2**20
elif unit in ("gb", "g"):
counts /= 2**30
elif unit is not None:
raise ValueError("Unknown unit: {:}".format(unit))
return counts | This function is to obtain the storage size unit of a (or multiple) models. Parameters ---------- models_or_parameters : PyTorch model(s) or a list of parameters. unit : the storage size unit. Returns ------- The number of parameters of the given model(s) or parameters. |
19,596 | import os
from torch.utils.data import Dataset, DataLoader
import copy
from typing import Text, Union
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Function
from qlib.contrib.model.pytorch_utils import count_parameters
from qlib.data.dataset import DatasetH
from qlib.data.dataset.handler import DataHandlerLP
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.utils import get_or_create_path
class data_loader(Dataset):
def __init__(self, df):
self.df_feature = df["feature"]
self.df_label_reg = df["label"]
self.df_index = df.index
self.df_feature = torch.tensor(
self.df_feature.values.reshape(-1, 6, 60).transpose(0, 2, 1), dtype=torch.float32
)
self.df_label_reg = torch.tensor(self.df_label_reg.values.reshape(-1), dtype=torch.float32)
def __getitem__(self, index):
sample, label_reg = self.df_feature[index], self.df_label_reg[index]
return sample, label_reg
def __len__(self):
return len(self.df_feature)
def get_stock_loader(df, batch_size, shuffle=True):
train_loader = DataLoader(data_loader(df), batch_size=batch_size, shuffle=shuffle)
return train_loader | null |
19,597 | import os
from torch.utils.data import Dataset, DataLoader
import copy
from typing import Text, Union
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Function
from qlib.contrib.model.pytorch_utils import count_parameters
from qlib.data.dataset import DatasetH
from qlib.data.dataset.handler import DataHandlerLP
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.utils import get_or_create_path
def get_index(num_domain=2):
index = []
for i in range(num_domain):
for j in range(i + 1, num_domain + 1):
index.append((i, j))
return index | null |
19,598 | import os
from torch.utils.data import Dataset, DataLoader
import copy
from typing import Text, Union
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Function
from qlib.contrib.model.pytorch_utils import count_parameters
from qlib.data.dataset import DatasetH
from qlib.data.dataset.handler import DataHandlerLP
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.utils import get_or_create_path
def cosine(source, target):
source, target = source.mean(), target.mean()
cos = nn.CosineSimilarity(dim=0)
loss = cos(source, target)
return loss.mean() | null |
19,599 | import os
from torch.utils.data import Dataset, DataLoader
import copy
from typing import Text, Union
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Function
from qlib.contrib.model.pytorch_utils import count_parameters
from qlib.data.dataset import DatasetH
from qlib.data.dataset.handler import DataHandlerLP
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.utils import get_or_create_path
class ReverseLayerF(Function):
def forward(ctx, x, alpha):
def backward(ctx, grad_output):
class Discriminator(nn.Module):
def __init__(self, input_dim=256, hidden_dim=256):
def forward(self, x):
def adv(source, target, device, input_dim=256, hidden_dim=512):
domain_loss = nn.BCELoss()
# !!! Pay attention to .cuda !!!
adv_net = Discriminator(input_dim, hidden_dim).to(device)
domain_src = torch.ones(len(source)).to(device)
domain_tar = torch.zeros(len(target)).to(device)
domain_src, domain_tar = domain_src.view(domain_src.shape[0], 1), domain_tar.view(domain_tar.shape[0], 1)
reverse_src = ReverseLayerF.apply(source, 1)
reverse_tar = ReverseLayerF.apply(target, 1)
pred_src = adv_net(reverse_src)
pred_tar = adv_net(reverse_tar)
loss_s, loss_t = domain_loss(pred_src, domain_src), domain_loss(pred_tar, domain_tar)
loss = loss_s + loss_t
return loss | null |
19,600 | import os
from torch.utils.data import Dataset, DataLoader
import copy
from typing import Text, Union
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Function
from qlib.contrib.model.pytorch_utils import count_parameters
from qlib.data.dataset import DatasetH
from qlib.data.dataset.handler import DataHandlerLP
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.utils import get_or_create_path
def CORAL(source, target, device):
d = source.size(1)
ns, nt = source.size(0), target.size(0)
# source covariance
tmp_s = torch.ones((1, ns)).to(device) @ source
cs = (source.t() @ source - (tmp_s.t() @ tmp_s) / ns) / (ns - 1)
# target covariance
tmp_t = torch.ones((1, nt)).to(device) @ target
ct = (target.t() @ target - (tmp_t.t() @ tmp_t) / nt) / (nt - 1)
# frobenius norm
loss = (cs - ct).pow(2).sum()
loss = loss / (4 * d * d)
return loss | null |
19,601 | import os
from torch.utils.data import Dataset, DataLoader
import copy
from typing import Text, Union
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Function
from qlib.contrib.model.pytorch_utils import count_parameters
from qlib.data.dataset import DatasetH
from qlib.data.dataset.handler import DataHandlerLP
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.utils import get_or_create_path
def pairwise_dist(X, Y):
n, d = X.shape
m, _ = Y.shape
assert d == Y.shape[1]
a = X.unsqueeze(1).expand(n, m, d)
b = Y.unsqueeze(0).expand(n, m, d)
return torch.pow(a - b, 2).sum(2) | null |
19,602 | import os
from torch.utils.data import Dataset, DataLoader
import copy
from typing import Text, Union
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Function
from qlib.contrib.model.pytorch_utils import count_parameters
from qlib.data.dataset import DatasetH
from qlib.data.dataset.handler import DataHandlerLP
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.utils import get_or_create_path
def pairwise_dist_np(X, Y):
n, d = X.shape
m, _ = Y.shape
assert d == Y.shape[1]
a = np.expand_dims(X, 1)
b = np.expand_dims(Y, 0)
a = np.tile(a, (1, m, 1))
b = np.tile(b, (n, 1, 1))
return np.power(a - b, 2).sum(2) | null |
19,603 | import os
from torch.utils.data import Dataset, DataLoader
import copy
from typing import Text, Union
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Function
from qlib.contrib.model.pytorch_utils import count_parameters
from qlib.data.dataset import DatasetH
from qlib.data.dataset.handler import DataHandlerLP
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.utils import get_or_create_path
def pa(X, Y):
XY = np.dot(X, Y.T)
XX = np.sum(np.square(X), axis=1)
XX = np.transpose([XX])
YY = np.sum(np.square(Y), axis=1)
dist = XX + YY - 2 * XY
return dist | null |
19,604 | import os
from torch.utils.data import Dataset, DataLoader
import copy
from typing import Text, Union
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Function
from qlib.contrib.model.pytorch_utils import count_parameters
from qlib.data.dataset import DatasetH
from qlib.data.dataset.handler import DataHandlerLP
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.utils import get_or_create_path
def kl_div(source, target):
if len(source) < len(target):
target = target[: len(source)]
elif len(source) > len(target):
source = source[: len(target)]
criterion = nn.KLDivLoss(reduction="batchmean")
loss = criterion(source.log(), target)
return loss
def js(source, target):
if len(source) < len(target):
target = target[: len(source)]
elif len(source) > len(target):
source = source[: len(target)]
M = 0.5 * (source + target)
loss_1, loss_2 = kl_div(source, M), kl_div(target, M)
return 0.5 * (loss_1 + loss_2) | null |
19,605 | from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from typing import Text, Union
import copy
from ...utils import get_or_create_path
from ...log import get_module_logger
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Function
from .pytorch_utils import count_parameters
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
def make_ix_like(input, dim=0):
d = input.size(dim)
rho = torch.arange(1, d + 1, device=input.device, dtype=input.dtype)
view = [1] * input.dim()
view[0] = -1
return rho.view(view).transpose(0, dim) | null |
19,606 | from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import copy
import math
from ...utils import get_or_create_path
from ...log import get_module_logger
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
from torch.nn.modules.container import ModuleList
def _get_clones(module, N):
return ModuleList([copy.deepcopy(module) for i in range(N)]) | null |
19,607 | import io
import os
import copy
import math
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm
from qlib.constant import EPS
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.contrib.data.dataset import MTSDatasetH
def evaluate(pred):
pred = pred.rank(pct=True) # transform into percentiles
score = pred.score
label = pred.label
diff = score - label
MSE = (diff**2).mean()
MAE = (diff.abs()).mean()
IC = score.corr(label, method="spearman")
return {"MSE": MSE, "MAE": MAE, "IC": IC} | null |
19,608 | import io
import os
import copy
import math
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
SummaryWriter = None
from tqdm import tqdm
from qlib.constant import EPS
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.contrib.data.dataset import MTSDatasetH
def sinkhorn(Q, n_iters=3, epsilon=0.1):
# epsilon should be adjusted according to logits value's scale
with torch.no_grad():
Q = torch.exp(Q / epsilon)
Q = shoot_infs(Q)
for i in range(n_iters):
Q /= Q.sum(dim=0, keepdim=True)
Q /= Q.sum(dim=1, keepdim=True)
return Q
def loss_fn(pred, label):
mask = ~torch.isnan(label)
if len(pred.shape) == 2:
label = label[:, None]
return (pred[mask] - label[mask]).pow(2).mean(dim=0)
def minmax_norm(x):
xmin = x.min(dim=-1, keepdim=True).values
xmax = x.max(dim=-1, keepdim=True).values
mask = (xmin == xmax).squeeze()
x = (x - xmin) / (xmax - xmin + EPS)
x[mask] = 1
return x
The provided code snippet includes necessary dependencies for implementing the `transport_sample` function. Write a Python function `def transport_sample(all_preds, label, choice, prob, hist_loss, count, transport_method, alpha, training=False)` to solve the following problem:
sample-wise transport Args: all_preds (torch.Tensor): predictions from all predictors, [sample x states] label (torch.Tensor): label, [sample] choice (torch.Tensor): gumbel softmax choice, [sample x states] prob (torch.Tensor): router predicted probility, [sample x states] hist_loss (torch.Tensor): history loss matrix, [sample x states] count (list): sample counts for each day, empty list for sample-wise transport transport_method (str): transportation method alpha (float): fusion parameter for calculating transport loss matrix training (bool): indicate training or inference
Here is the function:
def transport_sample(all_preds, label, choice, prob, hist_loss, count, transport_method, alpha, training=False):
"""
sample-wise transport
Args:
all_preds (torch.Tensor): predictions from all predictors, [sample x states]
label (torch.Tensor): label, [sample]
choice (torch.Tensor): gumbel softmax choice, [sample x states]
prob (torch.Tensor): router predicted probility, [sample x states]
hist_loss (torch.Tensor): history loss matrix, [sample x states]
count (list): sample counts for each day, empty list for sample-wise transport
transport_method (str): transportation method
alpha (float): fusion parameter for calculating transport loss matrix
training (bool): indicate training or inference
"""
assert all_preds.shape == choice.shape
assert len(all_preds) == len(label)
assert transport_method in ["oracle", "router"]
all_loss = torch.zeros_like(all_preds)
mask = ~torch.isnan(label)
all_loss[mask] = (all_preds[mask] - label[mask, None]).pow(2) # [sample x states]
L = minmax_norm(all_loss.detach())
Lh = L * alpha + minmax_norm(hist_loss) * (1 - alpha) # add hist loss for transport
Lh = minmax_norm(Lh)
P = sinkhorn(-Lh)
del Lh
if transport_method == "router":
if training:
pred = (all_preds * choice).sum(dim=1) # gumbel softmax
else:
pred = all_preds[range(len(all_preds)), prob.argmax(dim=-1)] # argmax
else:
pred = (all_preds * P).sum(dim=1)
if transport_method == "router":
loss = loss_fn(pred, label)
else:
loss = (all_loss * P).sum(dim=1).mean()
return loss, pred, L, P | sample-wise transport Args: all_preds (torch.Tensor): predictions from all predictors, [sample x states] label (torch.Tensor): label, [sample] choice (torch.Tensor): gumbel softmax choice, [sample x states] prob (torch.Tensor): router predicted probility, [sample x states] hist_loss (torch.Tensor): history loss matrix, [sample x states] count (list): sample counts for each day, empty list for sample-wise transport transport_method (str): transportation method alpha (float): fusion parameter for calculating transport loss matrix training (bool): indicate training or inference |
19,609 | import io
import os
import copy
import math
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
SummaryWriter = None
from tqdm import tqdm
from qlib.constant import EPS
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.contrib.data.dataset import MTSDatasetH
def sinkhorn(Q, n_iters=3, epsilon=0.1):
# epsilon should be adjusted according to logits value's scale
with torch.no_grad():
Q = torch.exp(Q / epsilon)
Q = shoot_infs(Q)
for i in range(n_iters):
Q /= Q.sum(dim=0, keepdim=True)
Q /= Q.sum(dim=1, keepdim=True)
return Q
def loss_fn(pred, label):
mask = ~torch.isnan(label)
if len(pred.shape) == 2:
label = label[:, None]
return (pred[mask] - label[mask]).pow(2).mean(dim=0)
def minmax_norm(x):
xmin = x.min(dim=-1, keepdim=True).values
xmax = x.max(dim=-1, keepdim=True).values
mask = (xmin == xmax).squeeze()
x = (x - xmin) / (xmax - xmin + EPS)
x[mask] = 1
return x
The provided code snippet includes necessary dependencies for implementing the `transport_daily` function. Write a Python function `def transport_daily(all_preds, label, choice, prob, hist_loss, count, transport_method, alpha, training=False)` to solve the following problem:
daily transport Args: all_preds (torch.Tensor): predictions from all predictors, [sample x states] label (torch.Tensor): label, [sample] choice (torch.Tensor): gumbel softmax choice, [days x states] prob (torch.Tensor): router predicted probility, [days x states] hist_loss (torch.Tensor): history loss matrix, [days x states] count (list): sample counts for each day, [days] transport_method (str): transportation method alpha (float): fusion parameter for calculating transport loss matrix training (bool): indicate training or inference
Here is the function:
def transport_daily(all_preds, label, choice, prob, hist_loss, count, transport_method, alpha, training=False):
"""
daily transport
Args:
all_preds (torch.Tensor): predictions from all predictors, [sample x states]
label (torch.Tensor): label, [sample]
choice (torch.Tensor): gumbel softmax choice, [days x states]
prob (torch.Tensor): router predicted probility, [days x states]
hist_loss (torch.Tensor): history loss matrix, [days x states]
count (list): sample counts for each day, [days]
transport_method (str): transportation method
alpha (float): fusion parameter for calculating transport loss matrix
training (bool): indicate training or inference
"""
assert len(prob) == len(count)
assert len(all_preds) == sum(count)
assert transport_method in ["oracle", "router"]
all_loss = [] # loss of all predictions
start = 0
for i, cnt in enumerate(count):
slc = slice(start, start + cnt) # samples from the i-th day
start += cnt
tloss = loss_fn(all_preds[slc], label[slc]) # loss of the i-th day
all_loss.append(tloss)
all_loss = torch.stack(all_loss, dim=0) # [days x states]
L = minmax_norm(all_loss.detach())
Lh = L * alpha + minmax_norm(hist_loss) * (1 - alpha) # add hist loss for transport
Lh = minmax_norm(Lh)
P = sinkhorn(-Lh)
del Lh
pred = []
start = 0
for i, cnt in enumerate(count):
slc = slice(start, start + cnt) # samples from the i-th day
start += cnt
if transport_method == "router":
if training:
tpred = all_preds[slc] @ choice[i] # gumbel softmax
else:
tpred = all_preds[slc][:, prob[i].argmax(dim=-1)] # argmax
else:
tpred = all_preds[slc] @ P[i]
pred.append(tpred)
pred = torch.cat(pred, dim=0) # [samples]
if transport_method == "router":
loss = loss_fn(pred, label)
else:
loss = (all_loss * P).sum(dim=1).mean()
return loss, pred, L, P | daily transport Args: all_preds (torch.Tensor): predictions from all predictors, [sample x states] label (torch.Tensor): label, [sample] choice (torch.Tensor): gumbel softmax choice, [days x states] prob (torch.Tensor): router predicted probility, [days x states] hist_loss (torch.Tensor): history loss matrix, [days x states] count (list): sample counts for each day, [days] transport_method (str): transportation method alpha (float): fusion parameter for calculating transport loss matrix training (bool): indicate training or inference |
19,610 | import io
import os
import copy
import math
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm
from qlib.constant import EPS
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.contrib.data.dataset import MTSDatasetH
The provided code snippet includes necessary dependencies for implementing the `load_state_dict_unsafe` function. Write a Python function `def load_state_dict_unsafe(model, state_dict)` to solve the following problem:
Load state dict to provided model while ignore exceptions.
Here is the function:
def load_state_dict_unsafe(model, state_dict):
"""
Load state dict to provided model while ignore exceptions.
"""
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
load(model)
load = None # break load->load reference cycle
return {"unexpected_keys": unexpected_keys, "missing_keys": missing_keys, "error_msgs": error_msgs} | Load state dict to provided model while ignore exceptions. |
19,611 | import io
import os
import copy
import math
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm
from qlib.constant import EPS
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.contrib.data.dataset import MTSDatasetH
def plot(P):
assert isinstance(P, pd.DataFrame)
fig, axes = plt.subplots(1, 2, figsize=(10, 4))
P.plot.area(ax=axes[0], xlabel="")
P.idxmax(axis=1).value_counts().sort_index().plot.bar(ax=axes[1], xlabel="")
plt.tight_layout()
with io.BytesIO() as buf:
plt.savefig(buf, format="png")
buf.seek(0)
img = plt.imread(buf)
plt.close()
return np.uint8(img * 255) | null |
19,612 | from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from typing import Text, Union
import copy
import math
from ...utils import get_or_create_path
from ...log import get_module_logger
import torch
import torch.nn as nn
import torch.optim as optim
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
from torch.nn.modules.container import ModuleList
def _get_clones(module, N):
return ModuleList([copy.deepcopy(module) for i in range(N)]) | null |
19,613 | from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from scipy.stats import spearmanr, pearsonr
from ..data import D
from collections import OrderedDict
def get_position_list_value(positions):
# generate instrument list and date for whole poitions
instruments = set()
for day, position in positions.items():
instruments.update(position.keys())
instruments = list(set(instruments) - {"cash"}) # filter 'cash'
instruments.sort()
day_list = list(positions.keys())
day_list.sort()
start_date, end_date = day_list[0], day_list[-1]
# load data
fields = ["$close"]
close_data_df = D.features(
instruments,
fields,
start_time=start_date,
end_time=end_date,
freq="day",
disk_cache=0,
)
# generate value
# return dict for time:position_value
value_dict = OrderedDict()
for day, position in positions.items():
value = _get_position_value_from_df(evaluate_date=day, position=position, close_data_df=close_data_df)
value_dict[day] = value
return value_dict
The provided code snippet includes necessary dependencies for implementing the `get_daily_return_series_from_positions` function. Write a Python function `def get_daily_return_series_from_positions(positions, init_asset_value)` to solve the following problem:
Parameters generate daily return series from position view positions: positions generated by strategy init_asset_value : init asset value return: pd.Series of daily return , return_series[date] = daily return rate
Here is the function:
def get_daily_return_series_from_positions(positions, init_asset_value):
"""Parameters
generate daily return series from position view
positions: positions generated by strategy
init_asset_value : init asset value
return: pd.Series of daily return , return_series[date] = daily return rate
"""
value_dict = get_position_list_value(positions)
value_series = pd.Series(value_dict)
value_series = value_series.sort_index() # check date
return_series = value_series.pct_change()
return_series[value_series.index[0]] = (
value_series[value_series.index[0]] / init_asset_value - 1
) # update daily return for the first date
return return_series | Parameters generate daily return series from position view positions: positions generated by strategy init_asset_value : init asset value return: pd.Series of daily return , return_series[date] = daily return rate |
19,614 | from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from scipy.stats import spearmanr, pearsonr
from ..data import D
from collections import OrderedDict
def get_position_value(evaluate_date, position):
"""sum of close*amount
get value of position
use close price
positions:
{
Timestamp('2016-01-05 00:00:00'):
{
'SH600022':
{
'amount':100.00,
'price':12.00
},
'cash':100000.0
}
}
It means Hold 100.0 'SH600022' and 100000.0 RMB in '2016-01-05'
"""
# load close price for position
# position should also consider cash
instruments = list(position.keys())
instruments = list(set(instruments) - {"cash"}) # filter 'cash'
fields = ["$close"]
close_data_df = D.features(
instruments,
fields,
start_time=evaluate_date,
end_time=evaluate_date,
freq="day",
disk_cache=0,
)
value = _get_position_value_from_df(evaluate_date, position, close_data_df)
return value
The provided code snippet includes necessary dependencies for implementing the `get_annual_return_from_positions` function. Write a Python function `def get_annual_return_from_positions(positions, init_asset_value)` to solve the following problem:
Annualized Returns p_r = (p_end / p_start)^{(250/n)} - 1 p_r annual return p_end final value p_start init value n days of backtest
Here is the function:
def get_annual_return_from_positions(positions, init_asset_value):
"""Annualized Returns
p_r = (p_end / p_start)^{(250/n)} - 1
p_r annual return
p_end final value
p_start init value
n days of backtest
"""
date_range_list = sorted(list(positions.keys()))
end_time = date_range_list[-1]
p_end = get_position_value(end_time, positions[end_time])
p_start = init_asset_value
n_period = len(date_range_list)
annual = pow((p_end / p_start), (250 / n_period)) - 1
return annual | Annualized Returns p_r = (p_end / p_start)^{(250/n)} - 1 p_r annual return p_end final value p_start init value n days of backtest |
19,615 | from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from scipy.stats import spearmanr, pearsonr
from ..data import D
from collections import OrderedDict
def get_annaul_return_from_return_series(r, method="ci"):
"""Risk Analysis from daily return series
Parameters
----------
r : pandas.Series
daily return series
method : str
interest calculation method, ci(compound interest)/si(simple interest)
"""
mean = r.mean()
annual = (1 + mean) ** 250 - 1 if method == "ci" else mean * 250
return annual
The provided code snippet includes necessary dependencies for implementing the `get_sharpe_ratio_from_return_series` function. Write a Python function `def get_sharpe_ratio_from_return_series(r, risk_free_rate=0.00, method="ci")` to solve the following problem:
Risk Analysis Parameters ---------- r : pandas.Series daily return series method : str interest calculation method, ci(compound interest)/si(simple interest) risk_free_rate : float risk_free_rate, default as 0.00, can set as 0.03 etc
Here is the function:
def get_sharpe_ratio_from_return_series(r, risk_free_rate=0.00, method="ci"):
"""Risk Analysis
Parameters
----------
r : pandas.Series
daily return series
method : str
interest calculation method, ci(compound interest)/si(simple interest)
risk_free_rate : float
risk_free_rate, default as 0.00, can set as 0.03 etc
"""
std = r.std(ddof=1)
annual = get_annaul_return_from_return_series(r, method=method)
sharpe = (annual - risk_free_rate) / std / np.sqrt(250)
return sharpe | Risk Analysis Parameters ---------- r : pandas.Series daily return series method : str interest calculation method, ci(compound interest)/si(simple interest) risk_free_rate : float risk_free_rate, default as 0.00, can set as 0.03 etc |
19,616 | from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from scipy.stats import spearmanr, pearsonr
from ..data import D
from collections import OrderedDict
The provided code snippet includes necessary dependencies for implementing the `get_max_drawdown_from_series` function. Write a Python function `def get_max_drawdown_from_series(r)` to solve the following problem:
Risk Analysis from asset value cumprod way Parameters ---------- r : pandas.Series daily return series
Here is the function:
def get_max_drawdown_from_series(r):
"""Risk Analysis from asset value
cumprod way
Parameters
----------
r : pandas.Series
daily return series
"""
# mdd = ((r.cumsum() - r.cumsum().cummax()) / (1 + r.cumsum().cummax())).min()
mdd = (((1 + r).cumprod() - (1 + r).cumprod().cummax()) / ((1 + r).cumprod().cummax())).min()
return mdd | Risk Analysis from asset value cumprod way Parameters ---------- r : pandas.Series daily return series |
19,617 | from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from scipy.stats import spearmanr, pearsonr
from ..data import D
from collections import OrderedDict
def get_turnover_rate():
# in backtest
pass | null |
19,618 | from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from scipy.stats import spearmanr, pearsonr
from ..data import D
from collections import OrderedDict
def get_annaul_return_from_return_series(r, method="ci"):
"""Risk Analysis from daily return series
Parameters
----------
r : pandas.Series
daily return series
method : str
interest calculation method, ci(compound interest)/si(simple interest)
"""
mean = r.mean()
annual = (1 + mean) ** 250 - 1 if method == "ci" else mean * 250
return annual
def get_beta(r, b):
"""Risk Analysis beta
Parameters
----------
r : pandas.Series
daily return series of strategy
b : pandas.Series
daily return series of baseline
"""
cov_r_b = np.cov(r, b)
var_b = np.var(b)
return cov_r_b / var_b
def get_alpha(r, b, risk_free_rate=0.03):
beta = get_beta(r, b)
annaul_r = get_annaul_return_from_return_series(r)
annaul_b = get_annaul_return_from_return_series(b)
alpha = annaul_r - risk_free_rate - beta * (annaul_b - risk_free_rate)
return alpha | null |
19,619 | from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from scipy.stats import spearmanr, pearsonr
from ..data import D
from collections import OrderedDict
def get_volatility_from_series(r):
return r.std(ddof=1) | null |
19,620 | from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from scipy.stats import spearmanr, pearsonr
from ..data import D
from collections import OrderedDict
The provided code snippet includes necessary dependencies for implementing the `get_rank_ic` function. Write a Python function `def get_rank_ic(a, b)` to solve the following problem:
Rank IC Parameters ---------- r : pandas.Series daily score series of feature b : pandas.Series daily return series
Here is the function:
def get_rank_ic(a, b):
"""Rank IC
Parameters
----------
r : pandas.Series
daily score series of feature
b : pandas.Series
daily return series
"""
return spearmanr(a, b).correlation | Rank IC Parameters ---------- r : pandas.Series daily score series of feature b : pandas.Series daily return series |
19,621 | from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from scipy.stats import spearmanr, pearsonr
from ..data import D
from collections import OrderedDict
def get_normal_ic(a, b):
return pearsonr(a, b)[0] | null |
19,622 | from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import warnings
from typing import Union
from ..log import get_module_logger
from ..utils import get_date_range
from ..utils.resam import Freq
from ..strategy.base import BaseStrategy
from ..backtest import get_exchange, position, backtest as backtest_func, executor as _executor
from ..data import D
from ..config import C
from ..data.dataset.utils import get_level_index
The provided code snippet includes necessary dependencies for implementing the `indicator_analysis` function. Write a Python function `def indicator_analysis(df, method="mean")` to solve the following problem:
analyze statistical time-series indicators of trading Parameters ---------- df : pandas.DataFrame columns: like ['pa', 'pos', 'ffr', 'deal_amount', 'value']. Necessary fields: - 'pa' is the price advantage in trade indicators - 'pos' is the positive rate in trade indicators - 'ffr' is the fulfill rate in trade indicators Optional fields: - 'deal_amount' is the total deal deal_amount, only necessary when method is 'amount_weighted' - 'value' is the total trade value, only necessary when method is 'value_weighted' index: Index(datetime) method : str, optional statistics method of pa/ffr, by default "mean" - if method is 'mean', count the mean statistical value of each trade indicator - if method is 'amount_weighted', count the deal_amount weighted mean statistical value of each trade indicator - if method is 'value_weighted', count the value weighted mean statistical value of each trade indicator Note: statistics method of pos is always "mean" Returns ------- pd.DataFrame statistical value of each trade indicators
Here is the function:
def indicator_analysis(df, method="mean"):
"""analyze statistical time-series indicators of trading
Parameters
----------
df : pandas.DataFrame
columns: like ['pa', 'pos', 'ffr', 'deal_amount', 'value'].
Necessary fields:
- 'pa' is the price advantage in trade indicators
- 'pos' is the positive rate in trade indicators
- 'ffr' is the fulfill rate in trade indicators
Optional fields:
- 'deal_amount' is the total deal deal_amount, only necessary when method is 'amount_weighted'
- 'value' is the total trade value, only necessary when method is 'value_weighted'
index: Index(datetime)
method : str, optional
statistics method of pa/ffr, by default "mean"
- if method is 'mean', count the mean statistical value of each trade indicator
- if method is 'amount_weighted', count the deal_amount weighted mean statistical value of each trade indicator
- if method is 'value_weighted', count the value weighted mean statistical value of each trade indicator
Note: statistics method of pos is always "mean"
Returns
-------
pd.DataFrame
statistical value of each trade indicators
"""
weights_dict = {
"mean": df["count"],
"amount_weighted": df["deal_amount"].abs(),
"value_weighted": df["value"].abs(),
}
if method not in weights_dict:
raise ValueError(f"indicator_analysis method {method} is not supported!")
# statistic pa/ffr indicator
indicators_df = df[["ffr", "pa"]]
weights = weights_dict.get(method)
res = indicators_df.mul(weights, axis=0).sum() / weights.sum()
# statistic pos
weights = weights_dict.get("mean")
res.loc["pos"] = df["pos"].mul(weights).sum() / weights.sum()
res = res.to_frame("value")
return res | analyze statistical time-series indicators of trading Parameters ---------- df : pandas.DataFrame columns: like ['pa', 'pos', 'ffr', 'deal_amount', 'value']. Necessary fields: - 'pa' is the price advantage in trade indicators - 'pos' is the positive rate in trade indicators - 'ffr' is the fulfill rate in trade indicators Optional fields: - 'deal_amount' is the total deal deal_amount, only necessary when method is 'amount_weighted' - 'value' is the total trade value, only necessary when method is 'value_weighted' index: Index(datetime) method : str, optional statistics method of pa/ffr, by default "mean" - if method is 'mean', count the mean statistical value of each trade indicator - if method is 'amount_weighted', count the deal_amount weighted mean statistical value of each trade indicator - if method is 'value_weighted', count the value weighted mean statistical value of each trade indicator Note: statistics method of pos is always "mean" Returns ------- pd.DataFrame statistical value of each trade indicators |
19,623 | from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import warnings
from typing import Union
from ..log import get_module_logger
from ..utils import get_date_range
from ..utils.resam import Freq
from ..strategy.base import BaseStrategy
from ..backtest import get_exchange, position, backtest as backtest_func, executor as _executor
from ..data import D
from ..config import C
from ..data.dataset.utils import get_level_index
def get_date_range(trading_date, left_shift=0, right_shift=0, future=False):
"""get trading date range by shift
Parameters
----------
trading_date: pd.Timestamp
left_shift: int
right_shift: int
future: bool
"""
from ..data import D # pylint: disable=C0415
start = get_date_by_shift(trading_date, left_shift, future=future)
end = get_date_by_shift(trading_date, right_shift, future=future)
calendar = D.calendar(start, end, future=future)
return calendar
def get_exchange(
exchange: Union[str, dict, object, Path] = None,
freq: str = "day",
start_time: Union[pd.Timestamp, str] = None,
end_time: Union[pd.Timestamp, str] = None,
codes: Union[list, str] = "all",
subscribe_fields: list = [],
open_cost: float = 0.0015,
close_cost: float = 0.0025,
min_cost: float = 5.0,
limit_threshold: Union[Tuple[str, str], float, None] | None = None,
deal_price: Union[str, Tuple[str, str], List[str]] | None = None,
**kwargs: Any,
) -> Exchange:
"""get_exchange
Parameters
----------
# exchange related arguments
exchange: Exchange
It could be None or any types that are acceptable by `init_instance_by_config`.
freq: str
frequency of data.
start_time: Union[pd.Timestamp, str]
closed start time for backtest.
end_time: Union[pd.Timestamp, str]
closed end time for backtest.
codes: Union[list, str]
list stock_id list or a string of instruments (i.e. all, csi500, sse50)
subscribe_fields: list
subscribe fields.
open_cost : float
open transaction cost. It is a ratio. The cost is proportional to your order's deal amount.
close_cost : float
close transaction cost. It is a ratio. The cost is proportional to your order's deal amount.
min_cost : float
min transaction cost. It is an absolute amount of cost instead of a ratio of your order's deal amount.
e.g. You must pay at least 5 yuan of commission regardless of your order's deal amount.
deal_price: Union[str, Tuple[str, str], List[str]]
The `deal_price` supports following two types of input
- <deal_price> : str
- (<buy_price>, <sell_price>): Tuple[str, str] or List[str]
<deal_price>, <buy_price> or <sell_price> := <price>
<price> := str
- for example '$close', '$open', '$vwap' ("close" is OK. `Exchange` will help to prepend
"$" to the expression)
limit_threshold : float
limit move 0.1 (10%) for example, long and short with same limit.
Returns
-------
:class: Exchange
an initialized Exchange object
"""
if limit_threshold is None:
limit_threshold = C.limit_threshold
if exchange is None:
logger.info("Create new exchange")
exchange = Exchange(
freq=freq,
start_time=start_time,
end_time=end_time,
codes=codes,
deal_price=deal_price,
subscribe_fields=subscribe_fields,
limit_threshold=limit_threshold,
open_cost=open_cost,
close_cost=close_cost,
min_cost=min_cost,
**kwargs,
)
return exchange
else:
return init_instance_by_config(exchange, accept_types=Exchange)
C = QlibConfig(_default_config)
def get_level_index(df: pd.DataFrame, level=Union[str, int]) -> int:
"""
get the level index of `df` given `level`
Parameters
----------
df : pd.DataFrame
data
level : Union[str, int]
index level
Returns
-------
int:
The level index in the multiple index
"""
if isinstance(level, str):
try:
return df.index.names.index(level)
except (AttributeError, ValueError):
# NOTE: If level index is not given in the data, the default level index will be ('datetime', 'instrument')
return ("datetime", "instrument").index(level)
elif isinstance(level, int):
return level
else:
raise NotImplementedError(f"This type of input is not supported")
The provided code snippet includes necessary dependencies for implementing the `long_short_backtest` function. Write a Python function `def long_short_backtest( pred, topk=50, deal_price=None, shift=1, open_cost=0, close_cost=0, trade_unit=None, limit_threshold=None, min_cost=5, subscribe_fields=[], extract_codes=False, )` to solve the following problem:
A backtest for long-short strategy :param pred: The trading signal produced on day `T`. :param topk: The short topk securities and long topk securities. :param deal_price: The price to deal the trading. :param shift: Whether to shift prediction by one day. The trading day will be T+1 if shift==1. :param open_cost: open transaction cost. :param close_cost: close transaction cost. :param trade_unit: 100 for China A. :param limit_threshold: limit move 0.1 (10%) for example, long and short with same limit. :param min_cost: min transaction cost. :param subscribe_fields: subscribe fields. :param extract_codes: bool. will we pass the codes extracted from the pred to the exchange. NOTE: This will be faster with offline qlib. :return: The result of backtest, it is represented by a dict. { "long": long_returns(excess), "short": short_returns(excess), "long_short": long_short_returns}
Here is the function:
def long_short_backtest(
pred,
topk=50,
deal_price=None,
shift=1,
open_cost=0,
close_cost=0,
trade_unit=None,
limit_threshold=None,
min_cost=5,
subscribe_fields=[],
extract_codes=False,
):
"""
A backtest for long-short strategy
:param pred: The trading signal produced on day `T`.
:param topk: The short topk securities and long topk securities.
:param deal_price: The price to deal the trading.
:param shift: Whether to shift prediction by one day. The trading day will be T+1 if shift==1.
:param open_cost: open transaction cost.
:param close_cost: close transaction cost.
:param trade_unit: 100 for China A.
:param limit_threshold: limit move 0.1 (10%) for example, long and short with same limit.
:param min_cost: min transaction cost.
:param subscribe_fields: subscribe fields.
:param extract_codes: bool.
will we pass the codes extracted from the pred to the exchange.
NOTE: This will be faster with offline qlib.
:return: The result of backtest, it is represented by a dict.
{ "long": long_returns(excess),
"short": short_returns(excess),
"long_short": long_short_returns}
"""
if get_level_index(pred, level="datetime") == 1:
pred = pred.swaplevel().sort_index()
if trade_unit is None:
trade_unit = C.trade_unit
if limit_threshold is None:
limit_threshold = C.limit_threshold
if deal_price is None:
deal_price = C.deal_price
if deal_price[0] != "$":
deal_price = "$" + deal_price
subscribe_fields = subscribe_fields.copy()
profit_str = f"Ref({deal_price}, -1)/{deal_price} - 1"
subscribe_fields.append(profit_str)
trade_exchange = get_exchange(
pred=pred,
deal_price=deal_price,
subscribe_fields=subscribe_fields,
limit_threshold=limit_threshold,
open_cost=open_cost,
close_cost=close_cost,
min_cost=min_cost,
trade_unit=trade_unit,
extract_codes=extract_codes,
shift=shift,
)
_pred_dates = pred.index.get_level_values(level="datetime")
predict_dates = D.calendar(start_time=_pred_dates.min(), end_time=_pred_dates.max())
trade_dates = np.append(predict_dates[shift:], get_date_range(predict_dates[-1], left_shift=1, right_shift=shift))
long_returns = {}
short_returns = {}
ls_returns = {}
for pdate, date in zip(predict_dates, trade_dates):
score = pred.loc(axis=0)[pdate, :]
score = score.reset_index().sort_values(by="score", ascending=False)
long_stocks = list(score.iloc[:topk]["instrument"])
short_stocks = list(score.iloc[-topk:]["instrument"])
score = score.set_index(["datetime", "instrument"]).sort_index()
long_profit = []
short_profit = []
all_profit = []
for stock in long_stocks:
if not trade_exchange.is_stock_tradable(stock_id=stock, trade_date=date):
continue
profit = trade_exchange.get_quote_info(stock_id=stock, start_time=date, end_time=date, field=profit_str)
if np.isnan(profit):
long_profit.append(0)
else:
long_profit.append(profit)
for stock in short_stocks:
if not trade_exchange.is_stock_tradable(stock_id=stock, trade_date=date):
continue
profit = trade_exchange.get_quote_info(stock_id=stock, start_time=date, end_time=date, field=profit_str)
if np.isnan(profit):
short_profit.append(0)
else:
short_profit.append(profit * -1)
for stock in list(score.loc(axis=0)[pdate, :].index.get_level_values(level=0)):
# exclude the suspend stock
if trade_exchange.check_stock_suspended(stock_id=stock, trade_date=date):
continue
profit = trade_exchange.get_quote_info(stock_id=stock, start_time=date, end_time=date, field=profit_str)
if np.isnan(profit):
all_profit.append(0)
else:
all_profit.append(profit)
long_returns[date] = np.mean(long_profit) - np.mean(all_profit)
short_returns[date] = np.mean(short_profit) + np.mean(all_profit)
ls_returns[date] = np.mean(short_profit) + np.mean(long_profit)
return dict(
zip(
["long", "short", "long_short"],
map(pd.Series, [long_returns, short_returns, ls_returns]),
)
) | A backtest for long-short strategy :param pred: The trading signal produced on day `T`. :param topk: The short topk securities and long topk securities. :param deal_price: The price to deal the trading. :param shift: Whether to shift prediction by one day. The trading day will be T+1 if shift==1. :param open_cost: open transaction cost. :param close_cost: close transaction cost. :param trade_unit: 100 for China A. :param limit_threshold: limit move 0.1 (10%) for example, long and short with same limit. :param min_cost: min transaction cost. :param subscribe_fields: subscribe fields. :param extract_codes: bool. will we pass the codes extracted from the pred to the exchange. NOTE: This will be faster with offline qlib. :return: The result of backtest, it is represented by a dict. { "long": long_returns(excess), "short": short_returns(excess), "long_short": long_short_returns} |
19,624 | from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import warnings
from typing import Union
from ..log import get_module_logger
from ..utils import get_date_range
from ..utils.resam import Freq
from ..strategy.base import BaseStrategy
from ..backtest import get_exchange, position, backtest as backtest_func, executor as _executor
from ..data import D
from ..config import C
from ..data.dataset.utils import get_level_index
def backtest_daily(
start_time: Union[str, pd.Timestamp],
end_time: Union[str, pd.Timestamp],
strategy: Union[str, dict, BaseStrategy],
executor: Union[str, dict, _executor.BaseExecutor] = None,
account: Union[float, int, position.Position] = 1e8,
benchmark: str = "SH000300",
exchange_kwargs: dict = None,
pos_type: str = "Position",
):
def t_run():
pred_FN = "./check_pred.csv"
pred: pd.DataFrame = pd.read_csv(pred_FN)
pred["datetime"] = pd.to_datetime(pred["datetime"])
pred = pred.set_index([pred.columns[0], pred.columns[1]])
pred = pred.iloc[:9000]
strategy_config = {
"topk": 50,
"n_drop": 5,
"signal": pred,
}
report_df, positions = backtest_daily(start_time="2017-01-01", end_time="2020-08-01", strategy=strategy_config)
print(report_df.head())
print(positions.keys())
print(positions[list(positions.keys())[0]])
return 0 | null |
19,625 | import torch
import numpy as np
import pandas as pd
import torch
def data_to_tensor(data, device="cpu", raise_error=False):
if isinstance(data, torch.Tensor):
if device == "cpu":
return data.cpu()
else:
return data.to(device)
if isinstance(data, (pd.DataFrame, pd.Series)):
return data_to_tensor(torch.from_numpy(data.values).float(), device)
elif isinstance(data, np.ndarray):
return data_to_tensor(torch.from_numpy(data).float(), device)
elif isinstance(data, (tuple, list)):
return [data_to_tensor(i, device) for i in data]
elif isinstance(data, dict):
return {k: data_to_tensor(v, device) for k, v in data.items()}
else:
if raise_error:
raise ValueError(f"Unsupported data type: {type(data)}.")
else:
return data | null |
19,626 | from __future__ import annotations
from typing import Any, Generic, TypeVar
import gym
import numpy as np
from gym import spaces
from qlib.typehint import final
from .simulator import ActType, StateType
class GymSpaceValidationError(Exception):
def __init__(self, message: str, space: gym.Space, x: Any) -> None:
self.message = message
self.space = space
self.x = x
def __str__(self) -> str:
return f"{self.message}\n Space: {self.space}\n Sample: {self.x}"
The provided code snippet includes necessary dependencies for implementing the `_gym_space_contains` function. Write a Python function `def _gym_space_contains(space: gym.Space, x: Any) -> None` to solve the following problem:
Strengthened version of gym.Space.contains. Giving more diagnostic information on why validation fails. Throw exception rather than returning true or false.
Here is the function:
def _gym_space_contains(space: gym.Space, x: Any) -> None:
"""Strengthened version of gym.Space.contains.
Giving more diagnostic information on why validation fails.
Throw exception rather than returning true or false.
"""
if isinstance(space, spaces.Dict):
if not isinstance(x, dict) or len(x) != len(space):
raise GymSpaceValidationError("Sample must be a dict with same length as space.", space, x)
for k, subspace in space.spaces.items():
if k not in x:
raise GymSpaceValidationError(f"Key {k} not found in sample.", space, x)
try:
_gym_space_contains(subspace, x[k])
except GymSpaceValidationError as e:
raise GymSpaceValidationError(f"Subspace of key {k} validation error.", space, x) from e
elif isinstance(space, spaces.Tuple):
if isinstance(x, (list, np.ndarray)):
x = tuple(x) # Promote list and ndarray to tuple for contains check
if not isinstance(x, tuple) or len(x) != len(space):
raise GymSpaceValidationError("Sample must be a tuple with same length as space.", space, x)
for i, (subspace, part) in enumerate(zip(space, x)):
try:
_gym_space_contains(subspace, part)
except GymSpaceValidationError as e:
raise GymSpaceValidationError(f"Subspace of index {i} validation error.", space, x) from e
else:
if not space.contains(x):
raise GymSpaceValidationError("Validation error reported by gym.", space, x) | Strengthened version of gym.Space.contains. Giving more diagnostic information on why validation fails. Throw exception rather than returning true or false. |
19,627 | from __future__ import annotations
import argparse
import os
import random
import sys
import warnings
from pathlib import Path
from typing import cast, List, Optional
import numpy as np
import pandas as pd
import torch
import yaml
from qlib.backtest import Order
from qlib.backtest.decision import OrderDir
from qlib.constant import ONE_MIN
from qlib.rl.data.native import load_handler_intraday_processed_data
from qlib.rl.interpreter import ActionInterpreter, StateInterpreter
from qlib.rl.order_execution import SingleAssetOrderExecutionSimple
from qlib.rl.reward import Reward
from qlib.rl.trainer import Checkpoint, backtest, train
from qlib.rl.trainer.callbacks import Callback, EarlyStopping, MetricsWriter
from qlib.rl.utils.log import CsvWriter
from qlib.utils import init_instance_by_config
from tianshou.policy import BasePolicy
from torch.utils.data import Dataset
import torch
def seed_everything(seed: int) -> None:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True | null |
19,628 | from __future__ import annotations
import argparse
import os
import random
import sys
import warnings
from pathlib import Path
from typing import cast, List, Optional
import numpy as np
import pandas as pd
import torch
import yaml
from qlib.backtest import Order
from qlib.backtest.decision import OrderDir
from qlib.constant import ONE_MIN
from qlib.rl.data.native import load_handler_intraday_processed_data
from qlib.rl.interpreter import ActionInterpreter, StateInterpreter
from qlib.rl.order_execution import SingleAssetOrderExecutionSimple
from qlib.rl.reward import Reward
from qlib.rl.trainer import Checkpoint, backtest, train
from qlib.rl.trainer.callbacks import Callback, EarlyStopping, MetricsWriter
from qlib.rl.utils.log import CsvWriter
from qlib.utils import init_instance_by_config
from tianshou.policy import BasePolicy
from torch.utils.data import Dataset
def _read_orders(order_dir: Path) -> pd.DataFrame:
if os.path.isfile(order_dir):
return pd.read_pickle(order_dir)
else:
orders = []
for file in order_dir.iterdir():
order_data = pd.read_pickle(file)
orders.append(order_data)
return pd.concat(orders) | null |
19,629 | from __future__ import annotations
import argparse
import os
import random
import sys
import warnings
from pathlib import Path
from typing import cast, List, Optional
import numpy as np
import pandas as pd
import torch
import yaml
from qlib.backtest import Order
from qlib.backtest.decision import OrderDir
from qlib.constant import ONE_MIN
from qlib.rl.data.native import load_handler_intraday_processed_data
from qlib.rl.interpreter import ActionInterpreter, StateInterpreter
from qlib.rl.order_execution import SingleAssetOrderExecutionSimple
from qlib.rl.reward import Reward
from qlib.rl.trainer import Checkpoint, backtest, train
from qlib.rl.trainer.callbacks import Callback, EarlyStopping, MetricsWriter
from qlib.rl.utils.log import CsvWriter
from qlib.utils import init_instance_by_config
from tianshou.policy import BasePolicy
from torch.utils.data import Dataset
class LazyLoadDataset(Dataset):
def __init__(
self,
data_dir: str,
order_file_path: Path,
default_start_time_index: int,
default_end_time_index: int,
) -> None:
self._default_start_time_index = default_start_time_index
self._default_end_time_index = default_end_time_index
self._order_df = _read_orders(order_file_path).reset_index()
self._ticks_index: Optional[pd.DatetimeIndex] = None
self._data_dir = Path(data_dir)
def __len__(self) -> int:
return len(self._order_df)
def __getitem__(self, index: int) -> Order:
row = self._order_df.iloc[index]
date = pd.Timestamp(str(row["date"]))
if self._ticks_index is None:
# TODO: We only load ticks index once based on the assumption that ticks index of different dates
# TODO: in one experiment are all the same. If that assumption is not hold, we need to load ticks index
# TODO: of all dates.
data = load_handler_intraday_processed_data(
data_dir=self._data_dir,
stock_id=row["instrument"],
date=date,
feature_columns_today=[],
feature_columns_yesterday=[],
backtest=True,
index_only=True,
)
self._ticks_index = [t - date for t in data.today.index]
order = Order(
stock_id=row["instrument"],
amount=row["amount"],
direction=OrderDir(int(row["order_type"])),
start_time=date + self._ticks_index[self._default_start_time_index],
end_time=date + self._ticks_index[self._default_end_time_index - 1] + ONE_MIN,
)
return order
class StateInterpreter(Generic[StateType, ObsType], Interpreter):
"""State Interpreter that interpret execution result of qlib executor into rl env state"""
def observation_space(self) -> gym.Space:
raise NotImplementedError()
def __call__(self, simulator_state: StateType) -> ObsType:
obs = self.interpret(simulator_state)
self.validate(obs)
return obs
def validate(self, obs: ObsType) -> None:
"""Validate whether an observation belongs to the pre-defined observation space."""
_gym_space_contains(self.observation_space, obs)
def interpret(self, simulator_state: StateType) -> ObsType:
"""Interpret the state of simulator.
Parameters
----------
simulator_state
Retrieved with ``simulator.get_state()``.
Returns
-------
State needed by policy. Should conform with the state space defined in ``observation_space``.
"""
raise NotImplementedError("interpret is not implemented!")
class ActionInterpreter(Generic[StateType, PolicyActType, ActType], Interpreter):
"""Action Interpreter that interpret rl agent action into qlib orders"""
def action_space(self) -> gym.Space:
raise NotImplementedError()
def __call__(self, simulator_state: StateType, action: PolicyActType) -> ActType:
self.validate(action)
obs = self.interpret(simulator_state, action)
return obs
def validate(self, action: PolicyActType) -> None:
"""Validate whether an action belongs to the pre-defined action space."""
_gym_space_contains(self.action_space, action)
def interpret(self, simulator_state: StateType, action: PolicyActType) -> ActType:
"""Convert the policy action to simulator action.
Parameters
----------
simulator_state
Retrieved with ``simulator.get_state()``.
action
Raw action given by policy.
Returns
-------
The action needed by simulator,
"""
raise NotImplementedError("interpret is not implemented!")
class Reward(Generic[SimulatorState]):
"""
Reward calculation component that takes a single argument: state of simulator. Returns a real number: reward.
Subclass should implement ``reward(simulator_state)`` to implement their own reward calculation recipe.
"""
env: Optional[EnvWrapper] = None
def __call__(self, simulator_state: SimulatorState) -> float:
return self.reward(simulator_state)
def reward(self, simulator_state: SimulatorState) -> float:
"""Implement this method for your own reward."""
raise NotImplementedError("Implement reward calculation recipe in `reward()`.")
def log(self, name: str, value: Any) -> None:
assert self.env is not None
self.env.logger.add_scalar(name, value)
class Callback:
"""Base class of all callbacks."""
def on_fit_start(self, trainer: Trainer, vessel: TrainingVesselBase) -> None:
"""Called before the whole fit process begins."""
def on_fit_end(self, trainer: Trainer, vessel: TrainingVesselBase) -> None:
"""Called after the whole fit process ends."""
def on_train_start(self, trainer: Trainer, vessel: TrainingVesselBase) -> None:
"""Called when each collect for training begins."""
def on_train_end(self, trainer: Trainer, vessel: TrainingVesselBase) -> None:
"""Called when the training ends.
To access all outputs produced during training, cache the data in either trainer and vessel,
and post-process them in this hook.
"""
def on_validate_start(self, trainer: Trainer, vessel: TrainingVesselBase) -> None:
"""Called when every run for validation begins."""
def on_validate_end(self, trainer: Trainer, vessel: TrainingVesselBase) -> None:
"""Called when the validation ends."""
def on_test_start(self, trainer: Trainer, vessel: TrainingVesselBase) -> None:
"""Called when every run of testing begins."""
def on_test_end(self, trainer: Trainer, vessel: TrainingVesselBase) -> None:
"""Called when the testing ends."""
def on_iter_start(self, trainer: Trainer, vessel: TrainingVesselBase) -> None:
"""Called when every iteration (i.e., collect) starts."""
def on_iter_end(self, trainer: Trainer, vessel: TrainingVesselBase) -> None:
"""Called upon every end of iteration.
This is called **after** the bump of ``current_iter``,
when the previous iteration is considered complete.
"""
def state_dict(self) -> Any:
"""Get a state dict of the callback for pause and resume."""
def load_state_dict(self, state_dict: Any) -> None:
"""Resume the callback from a saved state dict."""
class EarlyStopping(Callback):
"""Stop training when a monitored metric has stopped improving.
The earlystopping callback will be triggered each time validation ends.
It will examine the metrics produced in validation,
and get the metric with name ``monitor` (``monitor`` is ``reward`` by default),
to check whether it's no longer increasing / decreasing.
It takes ``min_delta`` and ``patience`` if applicable.
If it's found to be not increasing / decreasing any more.
``trainer.should_stop`` will be set to true,
and the training terminates.
Implementation reference: https://github.com/keras-team/keras/blob/v2.9.0/keras/callbacks.py#L1744-L1893
"""
def __init__(
self,
monitor: str = "reward",
min_delta: float = 0.0,
patience: int = 0,
mode: Literal["min", "max"] = "max",
baseline: float | None = None,
restore_best_weights: bool = False,
):
super().__init__()
self.monitor = monitor
self.patience = patience
self.baseline = baseline
self.min_delta = abs(min_delta)
self.restore_best_weights = restore_best_weights
self.best_weights: Any | None = None
if mode not in ["min", "max"]:
raise ValueError("Unsupported earlystopping mode: " + mode)
if mode == "min":
self.monitor_op = np.less
elif mode == "max":
self.monitor_op = np.greater
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def state_dict(self) -> dict:
return {"wait": self.wait, "best": self.best, "best_weights": self.best_weights, "best_iter": self.best_iter}
def load_state_dict(self, state_dict: dict) -> None:
self.wait = state_dict["wait"]
self.best = state_dict["best"]
self.best_weights = state_dict["best_weights"]
self.best_iter = state_dict["best_iter"]
def on_fit_start(self, trainer: Trainer, vessel: TrainingVesselBase) -> None:
# Allow instances to be re-used
self.wait = 0
self.best = np.inf if self.monitor_op == np.less else -np.inf
self.best_weights = None
self.best_iter = 0
def on_validate_end(self, trainer: Trainer, vessel: TrainingVesselBase) -> None:
current = self.get_monitor_value(trainer)
if current is None:
return
if self.restore_best_weights and self.best_weights is None:
# Restore the weights after first iteration if no progress is ever made.
self.best_weights = copy.deepcopy(vessel.state_dict())
self.wait += 1
if self._is_improvement(current, self.best):
self.best = current
self.best_iter = trainer.current_iter
if self.restore_best_weights:
self.best_weights = copy.deepcopy(vessel.state_dict())
# Only restart wait if we beat both the baseline and our previous best.
if self.baseline is None or self._is_improvement(current, self.baseline):
self.wait = 0
msg = (
f"#{trainer.current_iter} current reward: {current:.4f}, best reward: {self.best:.4f} in #{self.best_iter}"
)
_logger.info(msg)
# Only check after the first epoch.
if self.wait >= self.patience and trainer.current_iter > 0:
trainer.should_stop = True
_logger.info(f"On iteration %d: early stopping", trainer.current_iter + 1)
if self.restore_best_weights and self.best_weights is not None:
_logger.info("Restoring model weights from the end of the best iteration: %d", self.best_iter + 1)
vessel.load_state_dict(self.best_weights)
def get_monitor_value(self, trainer: Trainer) -> Any:
monitor_value = trainer.metrics.get(self.monitor)
if monitor_value is None:
_logger.warning(
"Early stopping conditioned on metric `%s` which is not available. Available metrics are: %s",
self.monitor,
",".join(list(trainer.metrics.keys())),
)
return monitor_value
def _is_improvement(self, monitor_value, reference_value):
return self.monitor_op(monitor_value - self.min_delta, reference_value)
class MetricsWriter(Callback):
"""Dump training metrics to file."""
def __init__(self, dirpath: Path) -> None:
self.dirpath = dirpath
self.dirpath.mkdir(exist_ok=True, parents=True)
self.train_records: List[dict] = []
self.valid_records: List[dict] = []
def on_train_end(self, trainer: Trainer, vessel: TrainingVesselBase) -> None:
self.train_records.append({k: v for k, v in trainer.metrics.items() if not k.startswith("val/")})
pd.DataFrame.from_records(self.train_records).to_csv(self.dirpath / "train_result.csv", index=True)
def on_validate_end(self, trainer: Trainer, vessel: TrainingVesselBase) -> None:
self.valid_records.append({k: v for k, v in trainer.metrics.items() if k.startswith("val/")})
pd.DataFrame.from_records(self.valid_records).to_csv(self.dirpath / "validation_result.csv", index=True)
class CsvWriter(LogWriter):
"""Dump all episode metrics to a ``result.csv``.
This is not the correct implementation. It's only used for first iteration.
"""
SUPPORTED_TYPES = (float, str, pd.Timestamp)
all_records: List[Dict[str, Any]]
# FIXME: save & reload
def __init__(self, output_dir: Path, loglevel: int | LogLevel = LogLevel.PERIODIC) -> None:
super().__init__(loglevel)
self.output_dir = output_dir
self.output_dir.mkdir(exist_ok=True)
def clear(self) -> None:
super().clear()
self.all_records = []
def log_episode(self, length: int, rewards: List[float], contents: List[Dict[str, Any]]) -> None:
# FIXME Same as ConsoleLogger, needs a refactor to eliminate code-dup
episode_wise_contents: Dict[str, list] = defaultdict(list)
for step_contents in contents:
for name, value in step_contents.items():
if isinstance(value, self.SUPPORTED_TYPES):
episode_wise_contents[name].append(value)
logs: Dict[str, float] = {}
for name, values in episode_wise_contents.items():
logs[name] = self.aggregation(values, name) # type: ignore
self.all_records.append(logs)
def on_env_all_done(self) -> None:
# FIXME: this is temporary
pd.DataFrame.from_records(self.all_records).to_csv(self.output_dir / "result.csv", index=False)
def train_and_test(
env_config: dict,
simulator_config: dict,
trainer_config: dict,
data_config: dict,
state_interpreter: StateInterpreter,
action_interpreter: ActionInterpreter,
policy: BasePolicy,
reward: Reward,
run_training: bool,
run_backtest: bool,
) -> None:
order_root_path = Path(data_config["source"]["order_dir"])
data_granularity = simulator_config.get("data_granularity", 1)
def _simulator_factory_simple(order: Order) -> SingleAssetOrderExecutionSimple:
return SingleAssetOrderExecutionSimple(
order=order,
data_dir=data_config["source"]["feature_root_dir"],
feature_columns_today=data_config["source"]["feature_columns_today"],
feature_columns_yesterday=data_config["source"]["feature_columns_yesterday"],
data_granularity=data_granularity,
ticks_per_step=simulator_config["time_per_step"],
vol_threshold=simulator_config["vol_limit"],
)
assert data_config["source"]["default_start_time_index"] % data_granularity == 0
assert data_config["source"]["default_end_time_index"] % data_granularity == 0
if run_training:
train_dataset, valid_dataset = [
LazyLoadDataset(
data_dir=data_config["source"]["feature_root_dir"],
order_file_path=order_root_path / tag,
default_start_time_index=data_config["source"]["default_start_time_index"] // data_granularity,
default_end_time_index=data_config["source"]["default_end_time_index"] // data_granularity,
)
for tag in ("train", "valid")
]
callbacks: List[Callback] = []
if "checkpoint_path" in trainer_config:
callbacks.append(MetricsWriter(dirpath=Path(trainer_config["checkpoint_path"])))
callbacks.append(
Checkpoint(
dirpath=Path(trainer_config["checkpoint_path"]) / "checkpoints",
every_n_iters=trainer_config.get("checkpoint_every_n_iters", 1),
save_latest="copy",
),
)
if "earlystop_patience" in trainer_config:
callbacks.append(
EarlyStopping(
patience=trainer_config["earlystop_patience"],
monitor="val/pa",
)
)
train(
simulator_fn=_simulator_factory_simple,
state_interpreter=state_interpreter,
action_interpreter=action_interpreter,
policy=policy,
reward=reward,
initial_states=cast(List[Order], train_dataset),
trainer_kwargs={
"max_iters": trainer_config["max_epoch"],
"finite_env_type": env_config["parallel_mode"],
"concurrency": env_config["concurrency"],
"val_every_n_iters": trainer_config.get("val_every_n_epoch", None),
"callbacks": callbacks,
},
vessel_kwargs={
"episode_per_iter": trainer_config["episode_per_collect"],
"update_kwargs": {
"batch_size": trainer_config["batch_size"],
"repeat": trainer_config["repeat_per_collect"],
},
"val_initial_states": valid_dataset,
},
)
if run_backtest:
test_dataset = LazyLoadDataset(
data_dir=data_config["source"]["feature_root_dir"],
order_file_path=order_root_path / "test",
default_start_time_index=data_config["source"]["default_start_time_index"] // data_granularity,
default_end_time_index=data_config["source"]["default_end_time_index"] // data_granularity,
)
backtest(
simulator_fn=_simulator_factory_simple,
state_interpreter=state_interpreter,
action_interpreter=action_interpreter,
initial_states=test_dataset,
policy=policy,
logger=CsvWriter(Path(trainer_config["checkpoint_path"])),
reward=reward,
finite_env_type=env_config["parallel_mode"],
concurrency=env_config["concurrency"],
) | null |
19,630 | from __future__ import annotations
from pathlib import Path
import pandas as pd
def read_order_file(order_file: Path | pd.DataFrame) -> pd.DataFrame:
if isinstance(order_file, pd.DataFrame):
return order_file
order_file = Path(order_file)
if order_file.suffix == ".pkl":
order_df = pd.read_pickle(order_file).reset_index()
elif order_file.suffix == ".csv":
order_df = pd.read_csv(order_file)
else:
raise TypeError(f"Unsupported order file type: {order_file}")
if "date" in order_df.columns:
# legacy dataframe columns
order_df = order_df.rename(columns={"date": "datetime", "order_type": "direction"})
order_df["datetime"] = order_df["datetime"].astype(str)
return order_df | null |
19,631 | import os
import platform
import shutil
import sys
import tempfile
from importlib import import_module
import yaml
def merge_a_into_b(a: dict, b: dict) -> dict:
b = b.copy()
for k, v in a.items():
if isinstance(v, dict) and k in b:
v.pop(DELETE_KEY, False)
b[k] = merge_a_into_b(v, b[k])
else:
b[k] = v
return b
def parse_backtest_config(path: str) -> dict:
abs_path = os.path.abspath(path)
check_file_exist(abs_path)
file_ext_name = os.path.splitext(abs_path)[1]
if file_ext_name not in (".py", ".json", ".yaml", ".yml"):
raise IOError("Only py/yml/yaml/json type are supported now!")
with tempfile.TemporaryDirectory() as tmp_config_dir:
with tempfile.NamedTemporaryFile(dir=tmp_config_dir, suffix=file_ext_name) as tmp_config_file:
if platform.system() == "Windows":
tmp_config_file.close()
tmp_config_name = os.path.basename(tmp_config_file.name)
shutil.copyfile(abs_path, tmp_config_file.name)
if abs_path.endswith(".py"):
tmp_module_name = os.path.splitext(tmp_config_name)[0]
sys.path.insert(0, tmp_config_dir)
module = import_module(tmp_module_name)
sys.path.pop(0)
config = {k: v for k, v in module.__dict__.items() if not k.startswith("__")}
del sys.modules[tmp_module_name]
else:
with open(tmp_config_file.name) as input_stream:
config = yaml.safe_load(input_stream)
if "_base_" in config:
base_file_name = config.pop("_base_")
if not isinstance(base_file_name, list):
base_file_name = [base_file_name]
for f in base_file_name:
base_config = parse_backtest_config(os.path.join(os.path.dirname(abs_path), f))
config = merge_a_into_b(a=config, b=base_config)
return config
def _convert_all_list_to_tuple(config: dict) -> dict:
for k, v in config.items():
if isinstance(v, list):
config[k] = tuple(v)
elif isinstance(v, dict):
config[k] = _convert_all_list_to_tuple(v)
return config
def get_backtest_config_fromfile(path: str) -> dict:
backtest_config = parse_backtest_config(path)
exchange_config_default = {
"open_cost": 0.0005,
"close_cost": 0.0015,
"min_cost": 5.0,
"trade_unit": 100.0,
"cash_limit": None,
}
backtest_config["exchange"] = merge_a_into_b(a=backtest_config["exchange"], b=exchange_config_default)
backtest_config["exchange"] = _convert_all_list_to_tuple(backtest_config["exchange"])
backtest_config_default = {
"debug_single_stock": None,
"debug_single_day": None,
"concurrency": -1,
"multiplier": 1.0,
"output_dir": "outputs_backtest/",
"generate_report": False,
"data_granularity": "1min",
}
backtest_config = merge_a_into_b(a=backtest_config, b=backtest_config_default)
return backtest_config | null |
19,632 | from __future__ import annotations
from typing import Any, Callable, Dict, List, Sequence, cast
from tianshou.policy import BasePolicy
from qlib.rl.interpreter import ActionInterpreter, StateInterpreter
from qlib.rl.reward import Reward
from qlib.rl.simulator import InitialStateType, Simulator
from qlib.rl.utils import FiniteEnvType, LogWriter
from .trainer import Trainer
from .vessel import TrainingVessel
class StateInterpreter(Generic[StateType, ObsType], Interpreter):
"""State Interpreter that interpret execution result of qlib executor into rl env state"""
def observation_space(self) -> gym.Space:
raise NotImplementedError()
def __call__(self, simulator_state: StateType) -> ObsType:
obs = self.interpret(simulator_state)
self.validate(obs)
return obs
def validate(self, obs: ObsType) -> None:
"""Validate whether an observation belongs to the pre-defined observation space."""
_gym_space_contains(self.observation_space, obs)
def interpret(self, simulator_state: StateType) -> ObsType:
"""Interpret the state of simulator.
Parameters
----------
simulator_state
Retrieved with ``simulator.get_state()``.
Returns
-------
State needed by policy. Should conform with the state space defined in ``observation_space``.
"""
raise NotImplementedError("interpret is not implemented!")
class ActionInterpreter(Generic[StateType, PolicyActType, ActType], Interpreter):
"""Action Interpreter that interpret rl agent action into qlib orders"""
def action_space(self) -> gym.Space:
raise NotImplementedError()
def __call__(self, simulator_state: StateType, action: PolicyActType) -> ActType:
self.validate(action)
obs = self.interpret(simulator_state, action)
return obs
def validate(self, action: PolicyActType) -> None:
"""Validate whether an action belongs to the pre-defined action space."""
_gym_space_contains(self.action_space, action)
def interpret(self, simulator_state: StateType, action: PolicyActType) -> ActType:
"""Convert the policy action to simulator action.
Parameters
----------
simulator_state
Retrieved with ``simulator.get_state()``.
action
Raw action given by policy.
Returns
-------
The action needed by simulator,
"""
raise NotImplementedError("interpret is not implemented!")
class Reward(Generic[SimulatorState]):
"""
Reward calculation component that takes a single argument: state of simulator. Returns a real number: reward.
Subclass should implement ``reward(simulator_state)`` to implement their own reward calculation recipe.
"""
env: Optional[EnvWrapper] = None
def __call__(self, simulator_state: SimulatorState) -> float:
return self.reward(simulator_state)
def reward(self, simulator_state: SimulatorState) -> float:
"""Implement this method for your own reward."""
raise NotImplementedError("Implement reward calculation recipe in `reward()`.")
def log(self, name: str, value: Any) -> None:
assert self.env is not None
self.env.logger.add_scalar(name, value)
class Simulator(Generic[InitialStateType, StateType, ActType]):
"""
Simulator that resets with ``__init__``, and transits with ``step(action)``.
To make the data-flow clear, we make the following restrictions to Simulator:
1. The only way to modify the inner status of a simulator is by using ``step(action)``.
2. External modules can *read* the status of a simulator by using ``simulator.get_state()``,
and check whether the simulator is in the ending state by calling ``simulator.done()``.
A simulator is defined to be bounded with three types:
- *InitialStateType* that is the type of the data used to create the simulator.
- *StateType* that is the type of the **status** (state) of the simulator.
- *ActType* that is the type of the **action**, which is the input received in each step.
Different simulators might share the same StateType. For example, when they are dealing with the same task,
but with different simulation implementation. With the same type, they can safely share other components in the MDP.
Simulators are ephemeral. The lifecycle of a simulator starts with an initial state, and ends with the trajectory.
In another word, when the trajectory ends, simulator is recycled.
If simulators want to share context between (e.g., for speed-up purposes),
this could be done by accessing the weak reference of environment wrapper.
Attributes
----------
env
A reference of env-wrapper, which could be useful in some corner cases.
Simulators are discouraged to use this, because it's prone to induce errors.
"""
env: Optional[EnvWrapper] = None
def __init__(self, initial: InitialStateType, **kwargs: Any) -> None:
pass
def step(self, action: ActType) -> None:
"""Receives an action of ActType.
Simulator should update its internal state, and return None.
The updated state can be retrieved with ``simulator.get_state()``.
"""
raise NotImplementedError()
def get_state(self) -> StateType:
raise NotImplementedError()
def done(self) -> bool:
"""Check whether the simulator is in a "done" state.
When simulator is in a "done" state,
it should no longer receives any ``step`` request.
As simulators are ephemeral, to reset the simulator,
the old one should be destroyed and a new simulator can be created.
"""
raise NotImplementedError()
class Trainer:
"""
Utility to train a policy on a particular task.
Different from traditional DL trainer, the iteration of this trainer is "collect",
rather than "epoch", or "mini-batch".
In each collect, :class:`Collector` collects a number of policy-env interactions, and accumulates
them into a replay buffer. This buffer is used as the "data" to train the policy.
At the end of each collect, the policy is *updated* several times.
The API has some resemblence with `PyTorch Lightning <https://pytorch-lightning.readthedocs.io/>`__,
but it's essentially different because this trainer is built for RL applications, and thus
most configurations are under RL context.
We are still looking for ways to incorporate existing trainer libraries, because it looks like
big efforts to build a trainer as powerful as those libraries, and also, that's not our primary goal.
It's essentially different
`tianshou's built-in trainers <https://tianshou.readthedocs.io/en/master/api/tianshou.trainer.html>`__,
as it's far much more complicated than that.
Parameters
----------
max_iters
Maximum iterations before stopping.
val_every_n_iters
Perform validation every n iterations (i.e., training collects).
logger
Logger to record the backtest results. Logger must be present because
without logger, all information will be lost.
finite_env_type
Type of finite env implementation.
concurrency
Parallel workers.
fast_dev_run
Create a subset for debugging.
How this is implemented depends on the implementation of training vessel.
For :class:`~qlib.rl.vessel.TrainingVessel`, if greater than zero,
a random subset sized ``fast_dev_run`` will be used
instead of ``train_initial_states`` and ``val_initial_states``.
"""
should_stop: bool
"""Set to stop the training."""
metrics: dict
"""Numeric metrics of produced in train/val/test.
In the middle of training / validation, metrics will be of the latest episode.
When each iteration of training / validation finishes, metrics will be the aggregation
of all episodes encountered in this iteration.
Cleared on every new iteration of training.
In fit, validation metrics will be prefixed with ``val/``.
"""
current_iter: int
"""Current iteration (collect) of training."""
loggers: List[LogWriter]
"""A list of log writers."""
def __init__(
self,
*,
max_iters: int | None = None,
val_every_n_iters: int | None = None,
loggers: LogWriter | List[LogWriter] | None = None,
callbacks: List[Callback] | None = None,
finite_env_type: FiniteEnvType = "subproc",
concurrency: int = 2,
fast_dev_run: int | None = None,
):
self.max_iters = max_iters
self.val_every_n_iters = val_every_n_iters
if isinstance(loggers, list):
self.loggers = loggers
elif isinstance(loggers, LogWriter):
self.loggers = [loggers]
else:
self.loggers = []
self.loggers.append(LogBuffer(self._metrics_callback, loglevel=self._min_loglevel()))
self.callbacks: List[Callback] = callbacks if callbacks is not None else []
self.finite_env_type = finite_env_type
self.concurrency = concurrency
self.fast_dev_run = fast_dev_run
self.current_stage: Literal["train", "val", "test"] = "train"
self.vessel: TrainingVesselBase = cast(TrainingVesselBase, None)
def initialize(self):
"""Initialize the whole training process.
The states here should be synchronized with state_dict.
"""
self.should_stop = False
self.current_iter = 0
self.current_episode = 0
self.current_stage = "train"
def initialize_iter(self):
"""Initialize one iteration / collect."""
self.metrics = {}
def state_dict(self) -> dict:
"""Putting every states of current training into a dict, at best effort.
It doesn't try to handle all the possible kinds of states in the middle of one training collect.
For most cases at the end of each iteration, things should be usually correct.
Note that it's also intended behavior that replay buffer data in the collector will be lost.
"""
return {
"vessel": self.vessel.state_dict(),
"callbacks": {name: callback.state_dict() for name, callback in self.named_callbacks().items()},
"loggers": {name: logger.state_dict() for name, logger in self.named_loggers().items()},
"should_stop": self.should_stop,
"current_iter": self.current_iter,
"current_episode": self.current_episode,
"current_stage": self.current_stage,
"metrics": self.metrics,
}
def get_policy_state_dict(ckpt_path: Path) -> OrderedDict:
state_dict = torch.load(ckpt_path, map_location="cpu")
if "vessel" in state_dict:
state_dict = state_dict["vessel"]["policy"]
return state_dict
def load_state_dict(self, state_dict: dict) -> None:
"""Load all states into current trainer."""
self.vessel.load_state_dict(state_dict["vessel"])
for name, callback in self.named_callbacks().items():
callback.load_state_dict(state_dict["callbacks"][name])
for name, logger in self.named_loggers().items():
logger.load_state_dict(state_dict["loggers"][name])
self.should_stop = state_dict["should_stop"]
self.current_iter = state_dict["current_iter"]
self.current_episode = state_dict["current_episode"]
self.current_stage = state_dict["current_stage"]
self.metrics = state_dict["metrics"]
def named_callbacks(self) -> Dict[str, Callback]:
"""Retrieve a collection of callbacks where each one has a name.
Useful when saving checkpoints.
"""
return _named_collection(self.callbacks)
def named_loggers(self) -> Dict[str, LogWriter]:
"""Retrieve a collection of loggers where each one has a name.
Useful when saving checkpoints.
"""
return _named_collection(self.loggers)
def fit(self, vessel: TrainingVesselBase, ckpt_path: Path | None = None) -> None:
"""Train the RL policy upon the defined simulator.
Parameters
----------
vessel
A bundle of all elements used in training.
ckpt_path
Load a pre-trained / paused training checkpoint.
"""
self.vessel = vessel
vessel.assign_trainer(self)
if ckpt_path is not None:
_logger.info("Resuming states from %s", str(ckpt_path))
self.load_state_dict(torch.load(ckpt_path))
else:
self.initialize()
self._call_callback_hooks("on_fit_start")
while not self.should_stop:
msg = f"\n{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\tTrain iteration {self.current_iter + 1}/{self.max_iters}"
_logger.info(msg)
self.initialize_iter()
self._call_callback_hooks("on_iter_start")
self.current_stage = "train"
self._call_callback_hooks("on_train_start")
# TODO
# Add a feature that supports reloading the training environment every few iterations.
with _wrap_context(vessel.train_seed_iterator()) as iterator:
vector_env = self.venv_from_iterator(iterator)
self.vessel.train(vector_env)
del vector_env # FIXME: Explicitly delete this object to avoid memory leak.
self._call_callback_hooks("on_train_end")
if self.val_every_n_iters is not None and (self.current_iter + 1) % self.val_every_n_iters == 0:
# Implementation of validation loop
self.current_stage = "val"
self._call_callback_hooks("on_validate_start")
with _wrap_context(vessel.val_seed_iterator()) as iterator:
vector_env = self.venv_from_iterator(iterator)
self.vessel.validate(vector_env)
del vector_env # FIXME: Explicitly delete this object to avoid memory leak.
self._call_callback_hooks("on_validate_end")
# This iteration is considered complete.
# Bumping the current iteration counter.
self.current_iter += 1
if self.max_iters is not None and self.current_iter >= self.max_iters:
self.should_stop = True
self._call_callback_hooks("on_iter_end")
self._call_callback_hooks("on_fit_end")
def test(self, vessel: TrainingVesselBase) -> None:
"""Test the RL policy against the simulator.
The simulator will be fed with data generated in ``test_seed_iterator``.
Parameters
----------
vessel
A bundle of all related elements.
"""
self.vessel = vessel
vessel.assign_trainer(self)
self.initialize_iter()
self.current_stage = "test"
self._call_callback_hooks("on_test_start")
with _wrap_context(vessel.test_seed_iterator()) as iterator:
vector_env = self.venv_from_iterator(iterator)
self.vessel.test(vector_env)
del vector_env # FIXME: Explicitly delete this object to avoid memory leak.
self._call_callback_hooks("on_test_end")
def venv_from_iterator(self, iterator: Iterable[InitialStateType]) -> FiniteVectorEnv:
"""Create a vectorized environment from iterator and the training vessel."""
def env_factory():
# FIXME: state_interpreter and action_interpreter are stateful (having a weakref of env),
# and could be thread unsafe.
# I'm not sure whether it's a design flaw.
# I'll rethink about this when designing the trainer.
if self.finite_env_type == "dummy":
# We could only experience the "threading-unsafe" problem in dummy.
state = copy.deepcopy(self.vessel.state_interpreter)
action = copy.deepcopy(self.vessel.action_interpreter)
rew = copy.deepcopy(self.vessel.reward)
else:
state = self.vessel.state_interpreter
action = self.vessel.action_interpreter
rew = self.vessel.reward
return EnvWrapper(
self.vessel.simulator_fn,
state,
action,
iterator,
rew,
logger=LogCollector(min_loglevel=self._min_loglevel()),
)
return vectorize_env(
env_factory,
self.finite_env_type,
self.concurrency,
self.loggers,
)
def _metrics_callback(self, on_episode: bool, on_collect: bool, log_buffer: LogBuffer) -> None:
if on_episode:
# Update the global counter.
self.current_episode = log_buffer.global_episode
metrics = log_buffer.episode_metrics()
elif on_collect:
# Update the latest metrics.
metrics = log_buffer.collect_metrics()
if self.current_stage == "val":
metrics = {"val/" + name: value for name, value in metrics.items()}
self.metrics.update(metrics)
def _call_callback_hooks(self, hook_name: str, *args: Any, **kwargs: Any) -> None:
for callback in self.callbacks:
fn = getattr(callback, hook_name)
fn(self, self.vessel, *args, **kwargs)
def _min_loglevel(self):
if not self.loggers:
return LogLevel.PERIODIC
else:
# To save bandwidth
return min(lg.loglevel for lg in self.loggers)
class TrainingVessel(TrainingVesselBase):
"""The default implementation of training vessel.
``__init__`` accepts a sequence of initial states so that iterator can be created.
``train``, ``validate``, ``test`` each do one collect (and also update in train).
By default, the train initial states will be repeated infinitely during training,
and collector will control the number of episodes for each iteration.
In validation and testing, the val / test initial states will be used exactly once.
Extra hyper-parameters (only used in train) include:
- ``buffer_size``: Size of replay buffer.
- ``episode_per_iter``: Episodes per collect at training. Can be overridden by fast dev run.
- ``update_kwargs``: Keyword arguments appearing in ``policy.update``.
For example, ``dict(repeat=10, batch_size=64)``.
"""
def __init__(
self,
*,
simulator_fn: Callable[[InitialStateType], Simulator[InitialStateType, StateType, ActType]],
state_interpreter: StateInterpreter[StateType, ObsType],
action_interpreter: ActionInterpreter[StateType, PolicyActType, ActType],
policy: BasePolicy,
reward: Reward,
train_initial_states: Sequence[InitialStateType] | None = None,
val_initial_states: Sequence[InitialStateType] | None = None,
test_initial_states: Sequence[InitialStateType] | None = None,
buffer_size: int = 20000,
episode_per_iter: int = 1000,
update_kwargs: Dict[str, Any] = cast(Dict[str, Any], None),
):
self.simulator_fn = simulator_fn # type: ignore
self.state_interpreter = state_interpreter
self.action_interpreter = action_interpreter
self.policy = policy
self.reward = reward
self.train_initial_states = train_initial_states
self.val_initial_states = val_initial_states
self.test_initial_states = test_initial_states
self.buffer_size = buffer_size
self.episode_per_iter = episode_per_iter
self.update_kwargs = update_kwargs or {}
def train_seed_iterator(self) -> ContextManager[Iterable[InitialStateType]] | Iterable[InitialStateType]:
if self.train_initial_states is not None:
_logger.info("Training initial states collection size: %d", len(self.train_initial_states))
# Implement fast_dev_run here.
train_initial_states = self._random_subset("train", self.train_initial_states, self.trainer.fast_dev_run)
return DataQueue(train_initial_states, repeat=-1, shuffle=True)
return super().train_seed_iterator()
def val_seed_iterator(self) -> ContextManager[Iterable[InitialStateType]] | Iterable[InitialStateType]:
if self.val_initial_states is not None:
_logger.info("Validation initial states collection size: %d", len(self.val_initial_states))
val_initial_states = self._random_subset("val", self.val_initial_states, self.trainer.fast_dev_run)
return DataQueue(val_initial_states, repeat=1)
return super().val_seed_iterator()
def test_seed_iterator(self) -> ContextManager[Iterable[InitialStateType]] | Iterable[InitialStateType]:
if self.test_initial_states is not None:
_logger.info("Testing initial states collection size: %d", len(self.test_initial_states))
test_initial_states = self._random_subset("test", self.test_initial_states, self.trainer.fast_dev_run)
return DataQueue(test_initial_states, repeat=1)
return super().test_seed_iterator()
def train(self, vector_env: FiniteVectorEnv) -> Dict[str, Any]:
"""Create a collector and collects ``episode_per_iter`` episodes.
Update the policy on the collected replay buffer.
"""
self.policy.train()
with vector_env.collector_guard():
collector = Collector(
self.policy, vector_env, VectorReplayBuffer(self.buffer_size, len(vector_env)), exploration_noise=True
)
# Number of episodes collected in each training iteration can be overridden by fast dev run.
if self.trainer.fast_dev_run is not None:
episodes = self.trainer.fast_dev_run
else:
episodes = self.episode_per_iter
col_result = collector.collect(n_episode=episodes)
update_result = self.policy.update(sample_size=0, buffer=collector.buffer, **self.update_kwargs)
res = {**col_result, **update_result}
self.log_dict(res)
return res
def validate(self, vector_env: FiniteVectorEnv) -> Dict[str, Any]:
self.policy.eval()
with vector_env.collector_guard():
test_collector = Collector(self.policy, vector_env)
res = test_collector.collect(n_step=INF * len(vector_env))
self.log_dict(res)
return res
def test(self, vector_env: FiniteVectorEnv) -> Dict[str, Any]:
self.policy.eval()
with vector_env.collector_guard():
test_collector = Collector(self.policy, vector_env)
res = test_collector.collect(n_step=INF * len(vector_env))
self.log_dict(res)
return res
def _random_subset(name: str, collection: Sequence[T], size: int | None) -> Sequence[T]:
if size is None:
# Size = None -> original collection
return collection
order = np.random.permutation(len(collection))
res = [collection[o] for o in order[:size]]
_logger.info(
"Fast running in development mode. Cut %s initial states from %d to %d.",
name,
len(collection),
len(res),
)
return res
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train( simulator_fn: Callable[[InitialStateType], Simulator], state_interpreter: StateInterpreter, action_interpreter: ActionInterpreter, initial_states: Sequence[InitialStateType], policy: BasePolicy, reward: Reward, vessel_kwargs: Dict[str, Any], trainer_kwargs: Dict[str, Any], ) -> None` to solve the following problem:
Train a policy with the parallelism provided by RL framework. Experimental API. Parameters might change shortly. Parameters ---------- simulator_fn Callable receiving initial seed, returning a simulator. state_interpreter Interprets the state of simulators. action_interpreter Interprets the policy actions. initial_states Initial states to iterate over. Every state will be run exactly once. policy Policy to train against. reward Reward function. vessel_kwargs Keyword arguments passed to :class:`TrainingVessel`, like ``episode_per_iter``. trainer_kwargs Keyword arguments passed to :class:`Trainer`, like ``finite_env_type``, ``concurrency``.
Here is the function:
def train(
simulator_fn: Callable[[InitialStateType], Simulator],
state_interpreter: StateInterpreter,
action_interpreter: ActionInterpreter,
initial_states: Sequence[InitialStateType],
policy: BasePolicy,
reward: Reward,
vessel_kwargs: Dict[str, Any],
trainer_kwargs: Dict[str, Any],
) -> None:
"""Train a policy with the parallelism provided by RL framework.
Experimental API. Parameters might change shortly.
Parameters
----------
simulator_fn
Callable receiving initial seed, returning a simulator.
state_interpreter
Interprets the state of simulators.
action_interpreter
Interprets the policy actions.
initial_states
Initial states to iterate over. Every state will be run exactly once.
policy
Policy to train against.
reward
Reward function.
vessel_kwargs
Keyword arguments passed to :class:`TrainingVessel`, like ``episode_per_iter``.
trainer_kwargs
Keyword arguments passed to :class:`Trainer`, like ``finite_env_type``, ``concurrency``.
"""
vessel = TrainingVessel(
simulator_fn=simulator_fn,
state_interpreter=state_interpreter,
action_interpreter=action_interpreter,
policy=policy,
train_initial_states=initial_states,
reward=reward, # ignore none
**vessel_kwargs,
)
trainer = Trainer(**trainer_kwargs)
trainer.fit(vessel) | Train a policy with the parallelism provided by RL framework. Experimental API. Parameters might change shortly. Parameters ---------- simulator_fn Callable receiving initial seed, returning a simulator. state_interpreter Interprets the state of simulators. action_interpreter Interprets the policy actions. initial_states Initial states to iterate over. Every state will be run exactly once. policy Policy to train against. reward Reward function. vessel_kwargs Keyword arguments passed to :class:`TrainingVessel`, like ``episode_per_iter``. trainer_kwargs Keyword arguments passed to :class:`Trainer`, like ``finite_env_type``, ``concurrency``. |
19,633 | from __future__ import annotations
from typing import Any, Callable, Dict, List, Sequence, cast
from tianshou.policy import BasePolicy
from qlib.rl.interpreter import ActionInterpreter, StateInterpreter
from qlib.rl.reward import Reward
from qlib.rl.simulator import InitialStateType, Simulator
from qlib.rl.utils import FiniteEnvType, LogWriter
from .trainer import Trainer
from .vessel import TrainingVessel
class StateInterpreter(Generic[StateType, ObsType], Interpreter):
"""State Interpreter that interpret execution result of qlib executor into rl env state"""
def observation_space(self) -> gym.Space:
raise NotImplementedError()
def __call__(self, simulator_state: StateType) -> ObsType:
obs = self.interpret(simulator_state)
self.validate(obs)
return obs
def validate(self, obs: ObsType) -> None:
"""Validate whether an observation belongs to the pre-defined observation space."""
_gym_space_contains(self.observation_space, obs)
def interpret(self, simulator_state: StateType) -> ObsType:
"""Interpret the state of simulator.
Parameters
----------
simulator_state
Retrieved with ``simulator.get_state()``.
Returns
-------
State needed by policy. Should conform with the state space defined in ``observation_space``.
"""
raise NotImplementedError("interpret is not implemented!")
class ActionInterpreter(Generic[StateType, PolicyActType, ActType], Interpreter):
"""Action Interpreter that interpret rl agent action into qlib orders"""
def action_space(self) -> gym.Space:
raise NotImplementedError()
def __call__(self, simulator_state: StateType, action: PolicyActType) -> ActType:
self.validate(action)
obs = self.interpret(simulator_state, action)
return obs
def validate(self, action: PolicyActType) -> None:
"""Validate whether an action belongs to the pre-defined action space."""
_gym_space_contains(self.action_space, action)
def interpret(self, simulator_state: StateType, action: PolicyActType) -> ActType:
"""Convert the policy action to simulator action.
Parameters
----------
simulator_state
Retrieved with ``simulator.get_state()``.
action
Raw action given by policy.
Returns
-------
The action needed by simulator,
"""
raise NotImplementedError("interpret is not implemented!")
class Reward(Generic[SimulatorState]):
"""
Reward calculation component that takes a single argument: state of simulator. Returns a real number: reward.
Subclass should implement ``reward(simulator_state)`` to implement their own reward calculation recipe.
"""
env: Optional[EnvWrapper] = None
def __call__(self, simulator_state: SimulatorState) -> float:
return self.reward(simulator_state)
def reward(self, simulator_state: SimulatorState) -> float:
"""Implement this method for your own reward."""
raise NotImplementedError("Implement reward calculation recipe in `reward()`.")
def log(self, name: str, value: Any) -> None:
assert self.env is not None
self.env.logger.add_scalar(name, value)
class Simulator(Generic[InitialStateType, StateType, ActType]):
"""
Simulator that resets with ``__init__``, and transits with ``step(action)``.
To make the data-flow clear, we make the following restrictions to Simulator:
1. The only way to modify the inner status of a simulator is by using ``step(action)``.
2. External modules can *read* the status of a simulator by using ``simulator.get_state()``,
and check whether the simulator is in the ending state by calling ``simulator.done()``.
A simulator is defined to be bounded with three types:
- *InitialStateType* that is the type of the data used to create the simulator.
- *StateType* that is the type of the **status** (state) of the simulator.
- *ActType* that is the type of the **action**, which is the input received in each step.
Different simulators might share the same StateType. For example, when they are dealing with the same task,
but with different simulation implementation. With the same type, they can safely share other components in the MDP.
Simulators are ephemeral. The lifecycle of a simulator starts with an initial state, and ends with the trajectory.
In another word, when the trajectory ends, simulator is recycled.
If simulators want to share context between (e.g., for speed-up purposes),
this could be done by accessing the weak reference of environment wrapper.
Attributes
----------
env
A reference of env-wrapper, which could be useful in some corner cases.
Simulators are discouraged to use this, because it's prone to induce errors.
"""
env: Optional[EnvWrapper] = None
def __init__(self, initial: InitialStateType, **kwargs: Any) -> None:
pass
def step(self, action: ActType) -> None:
"""Receives an action of ActType.
Simulator should update its internal state, and return None.
The updated state can be retrieved with ``simulator.get_state()``.
"""
raise NotImplementedError()
def get_state(self) -> StateType:
raise NotImplementedError()
def done(self) -> bool:
"""Check whether the simulator is in a "done" state.
When simulator is in a "done" state,
it should no longer receives any ``step`` request.
As simulators are ephemeral, to reset the simulator,
the old one should be destroyed and a new simulator can be created.
"""
raise NotImplementedError()
class Trainer:
"""
Utility to train a policy on a particular task.
Different from traditional DL trainer, the iteration of this trainer is "collect",
rather than "epoch", or "mini-batch".
In each collect, :class:`Collector` collects a number of policy-env interactions, and accumulates
them into a replay buffer. This buffer is used as the "data" to train the policy.
At the end of each collect, the policy is *updated* several times.
The API has some resemblence with `PyTorch Lightning <https://pytorch-lightning.readthedocs.io/>`__,
but it's essentially different because this trainer is built for RL applications, and thus
most configurations are under RL context.
We are still looking for ways to incorporate existing trainer libraries, because it looks like
big efforts to build a trainer as powerful as those libraries, and also, that's not our primary goal.
It's essentially different
`tianshou's built-in trainers <https://tianshou.readthedocs.io/en/master/api/tianshou.trainer.html>`__,
as it's far much more complicated than that.
Parameters
----------
max_iters
Maximum iterations before stopping.
val_every_n_iters
Perform validation every n iterations (i.e., training collects).
logger
Logger to record the backtest results. Logger must be present because
without logger, all information will be lost.
finite_env_type
Type of finite env implementation.
concurrency
Parallel workers.
fast_dev_run
Create a subset for debugging.
How this is implemented depends on the implementation of training vessel.
For :class:`~qlib.rl.vessel.TrainingVessel`, if greater than zero,
a random subset sized ``fast_dev_run`` will be used
instead of ``train_initial_states`` and ``val_initial_states``.
"""
should_stop: bool
"""Set to stop the training."""
metrics: dict
"""Numeric metrics of produced in train/val/test.
In the middle of training / validation, metrics will be of the latest episode.
When each iteration of training / validation finishes, metrics will be the aggregation
of all episodes encountered in this iteration.
Cleared on every new iteration of training.
In fit, validation metrics will be prefixed with ``val/``.
"""
current_iter: int
"""Current iteration (collect) of training."""
loggers: List[LogWriter]
"""A list of log writers."""
def __init__(
self,
*,
max_iters: int | None = None,
val_every_n_iters: int | None = None,
loggers: LogWriter | List[LogWriter] | None = None,
callbacks: List[Callback] | None = None,
finite_env_type: FiniteEnvType = "subproc",
concurrency: int = 2,
fast_dev_run: int | None = None,
):
self.max_iters = max_iters
self.val_every_n_iters = val_every_n_iters
if isinstance(loggers, list):
self.loggers = loggers
elif isinstance(loggers, LogWriter):
self.loggers = [loggers]
else:
self.loggers = []
self.loggers.append(LogBuffer(self._metrics_callback, loglevel=self._min_loglevel()))
self.callbacks: List[Callback] = callbacks if callbacks is not None else []
self.finite_env_type = finite_env_type
self.concurrency = concurrency
self.fast_dev_run = fast_dev_run
self.current_stage: Literal["train", "val", "test"] = "train"
self.vessel: TrainingVesselBase = cast(TrainingVesselBase, None)
def initialize(self):
"""Initialize the whole training process.
The states here should be synchronized with state_dict.
"""
self.should_stop = False
self.current_iter = 0
self.current_episode = 0
self.current_stage = "train"
def initialize_iter(self):
"""Initialize one iteration / collect."""
self.metrics = {}
def state_dict(self) -> dict:
"""Putting every states of current training into a dict, at best effort.
It doesn't try to handle all the possible kinds of states in the middle of one training collect.
For most cases at the end of each iteration, things should be usually correct.
Note that it's also intended behavior that replay buffer data in the collector will be lost.
"""
return {
"vessel": self.vessel.state_dict(),
"callbacks": {name: callback.state_dict() for name, callback in self.named_callbacks().items()},
"loggers": {name: logger.state_dict() for name, logger in self.named_loggers().items()},
"should_stop": self.should_stop,
"current_iter": self.current_iter,
"current_episode": self.current_episode,
"current_stage": self.current_stage,
"metrics": self.metrics,
}
def get_policy_state_dict(ckpt_path: Path) -> OrderedDict:
state_dict = torch.load(ckpt_path, map_location="cpu")
if "vessel" in state_dict:
state_dict = state_dict["vessel"]["policy"]
return state_dict
def load_state_dict(self, state_dict: dict) -> None:
"""Load all states into current trainer."""
self.vessel.load_state_dict(state_dict["vessel"])
for name, callback in self.named_callbacks().items():
callback.load_state_dict(state_dict["callbacks"][name])
for name, logger in self.named_loggers().items():
logger.load_state_dict(state_dict["loggers"][name])
self.should_stop = state_dict["should_stop"]
self.current_iter = state_dict["current_iter"]
self.current_episode = state_dict["current_episode"]
self.current_stage = state_dict["current_stage"]
self.metrics = state_dict["metrics"]
def named_callbacks(self) -> Dict[str, Callback]:
"""Retrieve a collection of callbacks where each one has a name.
Useful when saving checkpoints.
"""
return _named_collection(self.callbacks)
def named_loggers(self) -> Dict[str, LogWriter]:
"""Retrieve a collection of loggers where each one has a name.
Useful when saving checkpoints.
"""
return _named_collection(self.loggers)
def fit(self, vessel: TrainingVesselBase, ckpt_path: Path | None = None) -> None:
"""Train the RL policy upon the defined simulator.
Parameters
----------
vessel
A bundle of all elements used in training.
ckpt_path
Load a pre-trained / paused training checkpoint.
"""
self.vessel = vessel
vessel.assign_trainer(self)
if ckpt_path is not None:
_logger.info("Resuming states from %s", str(ckpt_path))
self.load_state_dict(torch.load(ckpt_path))
else:
self.initialize()
self._call_callback_hooks("on_fit_start")
while not self.should_stop:
msg = f"\n{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\tTrain iteration {self.current_iter + 1}/{self.max_iters}"
_logger.info(msg)
self.initialize_iter()
self._call_callback_hooks("on_iter_start")
self.current_stage = "train"
self._call_callback_hooks("on_train_start")
# TODO
# Add a feature that supports reloading the training environment every few iterations.
with _wrap_context(vessel.train_seed_iterator()) as iterator:
vector_env = self.venv_from_iterator(iterator)
self.vessel.train(vector_env)
del vector_env # FIXME: Explicitly delete this object to avoid memory leak.
self._call_callback_hooks("on_train_end")
if self.val_every_n_iters is not None and (self.current_iter + 1) % self.val_every_n_iters == 0:
# Implementation of validation loop
self.current_stage = "val"
self._call_callback_hooks("on_validate_start")
with _wrap_context(vessel.val_seed_iterator()) as iterator:
vector_env = self.venv_from_iterator(iterator)
self.vessel.validate(vector_env)
del vector_env # FIXME: Explicitly delete this object to avoid memory leak.
self._call_callback_hooks("on_validate_end")
# This iteration is considered complete.
# Bumping the current iteration counter.
self.current_iter += 1
if self.max_iters is not None and self.current_iter >= self.max_iters:
self.should_stop = True
self._call_callback_hooks("on_iter_end")
self._call_callback_hooks("on_fit_end")
def test(self, vessel: TrainingVesselBase) -> None:
"""Test the RL policy against the simulator.
The simulator will be fed with data generated in ``test_seed_iterator``.
Parameters
----------
vessel
A bundle of all related elements.
"""
self.vessel = vessel
vessel.assign_trainer(self)
self.initialize_iter()
self.current_stage = "test"
self._call_callback_hooks("on_test_start")
with _wrap_context(vessel.test_seed_iterator()) as iterator:
vector_env = self.venv_from_iterator(iterator)
self.vessel.test(vector_env)
del vector_env # FIXME: Explicitly delete this object to avoid memory leak.
self._call_callback_hooks("on_test_end")
def venv_from_iterator(self, iterator: Iterable[InitialStateType]) -> FiniteVectorEnv:
"""Create a vectorized environment from iterator and the training vessel."""
def env_factory():
# FIXME: state_interpreter and action_interpreter are stateful (having a weakref of env),
# and could be thread unsafe.
# I'm not sure whether it's a design flaw.
# I'll rethink about this when designing the trainer.
if self.finite_env_type == "dummy":
# We could only experience the "threading-unsafe" problem in dummy.
state = copy.deepcopy(self.vessel.state_interpreter)
action = copy.deepcopy(self.vessel.action_interpreter)
rew = copy.deepcopy(self.vessel.reward)
else:
state = self.vessel.state_interpreter
action = self.vessel.action_interpreter
rew = self.vessel.reward
return EnvWrapper(
self.vessel.simulator_fn,
state,
action,
iterator,
rew,
logger=LogCollector(min_loglevel=self._min_loglevel()),
)
return vectorize_env(
env_factory,
self.finite_env_type,
self.concurrency,
self.loggers,
)
def _metrics_callback(self, on_episode: bool, on_collect: bool, log_buffer: LogBuffer) -> None:
if on_episode:
# Update the global counter.
self.current_episode = log_buffer.global_episode
metrics = log_buffer.episode_metrics()
elif on_collect:
# Update the latest metrics.
metrics = log_buffer.collect_metrics()
if self.current_stage == "val":
metrics = {"val/" + name: value for name, value in metrics.items()}
self.metrics.update(metrics)
def _call_callback_hooks(self, hook_name: str, *args: Any, **kwargs: Any) -> None:
for callback in self.callbacks:
fn = getattr(callback, hook_name)
fn(self, self.vessel, *args, **kwargs)
def _min_loglevel(self):
if not self.loggers:
return LogLevel.PERIODIC
else:
# To save bandwidth
return min(lg.loglevel for lg in self.loggers)
class TrainingVessel(TrainingVesselBase):
"""The default implementation of training vessel.
``__init__`` accepts a sequence of initial states so that iterator can be created.
``train``, ``validate``, ``test`` each do one collect (and also update in train).
By default, the train initial states will be repeated infinitely during training,
and collector will control the number of episodes for each iteration.
In validation and testing, the val / test initial states will be used exactly once.
Extra hyper-parameters (only used in train) include:
- ``buffer_size``: Size of replay buffer.
- ``episode_per_iter``: Episodes per collect at training. Can be overridden by fast dev run.
- ``update_kwargs``: Keyword arguments appearing in ``policy.update``.
For example, ``dict(repeat=10, batch_size=64)``.
"""
def __init__(
self,
*,
simulator_fn: Callable[[InitialStateType], Simulator[InitialStateType, StateType, ActType]],
state_interpreter: StateInterpreter[StateType, ObsType],
action_interpreter: ActionInterpreter[StateType, PolicyActType, ActType],
policy: BasePolicy,
reward: Reward,
train_initial_states: Sequence[InitialStateType] | None = None,
val_initial_states: Sequence[InitialStateType] | None = None,
test_initial_states: Sequence[InitialStateType] | None = None,
buffer_size: int = 20000,
episode_per_iter: int = 1000,
update_kwargs: Dict[str, Any] = cast(Dict[str, Any], None),
):
self.simulator_fn = simulator_fn # type: ignore
self.state_interpreter = state_interpreter
self.action_interpreter = action_interpreter
self.policy = policy
self.reward = reward
self.train_initial_states = train_initial_states
self.val_initial_states = val_initial_states
self.test_initial_states = test_initial_states
self.buffer_size = buffer_size
self.episode_per_iter = episode_per_iter
self.update_kwargs = update_kwargs or {}
def train_seed_iterator(self) -> ContextManager[Iterable[InitialStateType]] | Iterable[InitialStateType]:
if self.train_initial_states is not None:
_logger.info("Training initial states collection size: %d", len(self.train_initial_states))
# Implement fast_dev_run here.
train_initial_states = self._random_subset("train", self.train_initial_states, self.trainer.fast_dev_run)
return DataQueue(train_initial_states, repeat=-1, shuffle=True)
return super().train_seed_iterator()
def val_seed_iterator(self) -> ContextManager[Iterable[InitialStateType]] | Iterable[InitialStateType]:
if self.val_initial_states is not None:
_logger.info("Validation initial states collection size: %d", len(self.val_initial_states))
val_initial_states = self._random_subset("val", self.val_initial_states, self.trainer.fast_dev_run)
return DataQueue(val_initial_states, repeat=1)
return super().val_seed_iterator()
def test_seed_iterator(self) -> ContextManager[Iterable[InitialStateType]] | Iterable[InitialStateType]:
if self.test_initial_states is not None:
_logger.info("Testing initial states collection size: %d", len(self.test_initial_states))
test_initial_states = self._random_subset("test", self.test_initial_states, self.trainer.fast_dev_run)
return DataQueue(test_initial_states, repeat=1)
return super().test_seed_iterator()
def train(self, vector_env: FiniteVectorEnv) -> Dict[str, Any]:
"""Create a collector and collects ``episode_per_iter`` episodes.
Update the policy on the collected replay buffer.
"""
self.policy.train()
with vector_env.collector_guard():
collector = Collector(
self.policy, vector_env, VectorReplayBuffer(self.buffer_size, len(vector_env)), exploration_noise=True
)
# Number of episodes collected in each training iteration can be overridden by fast dev run.
if self.trainer.fast_dev_run is not None:
episodes = self.trainer.fast_dev_run
else:
episodes = self.episode_per_iter
col_result = collector.collect(n_episode=episodes)
update_result = self.policy.update(sample_size=0, buffer=collector.buffer, **self.update_kwargs)
res = {**col_result, **update_result}
self.log_dict(res)
return res
def validate(self, vector_env: FiniteVectorEnv) -> Dict[str, Any]:
self.policy.eval()
with vector_env.collector_guard():
test_collector = Collector(self.policy, vector_env)
res = test_collector.collect(n_step=INF * len(vector_env))
self.log_dict(res)
return res
def test(self, vector_env: FiniteVectorEnv) -> Dict[str, Any]:
self.policy.eval()
with vector_env.collector_guard():
test_collector = Collector(self.policy, vector_env)
res = test_collector.collect(n_step=INF * len(vector_env))
self.log_dict(res)
return res
def _random_subset(name: str, collection: Sequence[T], size: int | None) -> Sequence[T]:
if size is None:
# Size = None -> original collection
return collection
order = np.random.permutation(len(collection))
res = [collection[o] for o in order[:size]]
_logger.info(
"Fast running in development mode. Cut %s initial states from %d to %d.",
name,
len(collection),
len(res),
)
return res
The provided code snippet includes necessary dependencies for implementing the `backtest` function. Write a Python function `def backtest( simulator_fn: Callable[[InitialStateType], Simulator], state_interpreter: StateInterpreter, action_interpreter: ActionInterpreter, initial_states: Sequence[InitialStateType], policy: BasePolicy, logger: LogWriter | List[LogWriter], reward: Reward | None = None, finite_env_type: FiniteEnvType = "subproc", concurrency: int = 2, ) -> None` to solve the following problem:
Backtest with the parallelism provided by RL framework. Experimental API. Parameters might change shortly. Parameters ---------- simulator_fn Callable receiving initial seed, returning a simulator. state_interpreter Interprets the state of simulators. action_interpreter Interprets the policy actions. initial_states Initial states to iterate over. Every state will be run exactly once. policy Policy to test against. logger Logger to record the backtest results. Logger must be present because without logger, all information will be lost. reward Optional reward function. For backtest, this is for testing the rewards and logging them only. finite_env_type Type of finite env implementation. concurrency Parallel workers.
Here is the function:
def backtest(
simulator_fn: Callable[[InitialStateType], Simulator],
state_interpreter: StateInterpreter,
action_interpreter: ActionInterpreter,
initial_states: Sequence[InitialStateType],
policy: BasePolicy,
logger: LogWriter | List[LogWriter],
reward: Reward | None = None,
finite_env_type: FiniteEnvType = "subproc",
concurrency: int = 2,
) -> None:
"""Backtest with the parallelism provided by RL framework.
Experimental API. Parameters might change shortly.
Parameters
----------
simulator_fn
Callable receiving initial seed, returning a simulator.
state_interpreter
Interprets the state of simulators.
action_interpreter
Interprets the policy actions.
initial_states
Initial states to iterate over. Every state will be run exactly once.
policy
Policy to test against.
logger
Logger to record the backtest results. Logger must be present because
without logger, all information will be lost.
reward
Optional reward function. For backtest, this is for testing the rewards
and logging them only.
finite_env_type
Type of finite env implementation.
concurrency
Parallel workers.
"""
vessel = TrainingVessel(
simulator_fn=simulator_fn,
state_interpreter=state_interpreter,
action_interpreter=action_interpreter,
policy=policy,
test_initial_states=initial_states,
reward=cast(Reward, reward), # ignore none
)
trainer = Trainer(
finite_env_type=finite_env_type,
concurrency=concurrency,
loggers=logger,
)
trainer.test(vessel) | Backtest with the parallelism provided by RL framework. Experimental API. Parameters might change shortly. Parameters ---------- simulator_fn Callable receiving initial seed, returning a simulator. state_interpreter Interprets the state of simulators. action_interpreter Interprets the policy actions. initial_states Initial states to iterate over. Every state will be run exactly once. policy Policy to test against. logger Logger to record the backtest results. Logger must be present because without logger, all information will be lost. reward Optional reward function. For backtest, this is for testing the rewards and logging them only. finite_env_type Type of finite env implementation. concurrency Parallel workers. |
19,634 | from __future__ import annotations
import collections
import copy
from contextlib import AbstractContextManager, contextmanager
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Iterable, List, OrderedDict, Sequence, TypeVar, cast
import torch
from qlib.log import get_module_logger
from qlib.rl.simulator import InitialStateType
from qlib.rl.utils import EnvWrapper, FiniteEnvType, LogBuffer, LogCollector, LogLevel, LogWriter, vectorize_env
from qlib.rl.utils.finite_env import FiniteVectorEnv
from qlib.typehint import Literal
from .callbacks import Callback
from .vessel import TrainingVesselBase
The provided code snippet includes necessary dependencies for implementing the `_wrap_context` function. Write a Python function `def _wrap_context(obj)` to solve the following problem:
Make any object a (possibly dummy) context manager.
Here is the function:
def _wrap_context(obj):
"""Make any object a (possibly dummy) context manager."""
if isinstance(obj, AbstractContextManager):
# obj has __enter__ and __exit__
with obj as ctx:
yield ctx
else:
yield obj | Make any object a (possibly dummy) context manager. |
19,635 | from __future__ import annotations
import collections
import copy
from contextlib import AbstractContextManager, contextmanager
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Iterable, List, OrderedDict, Sequence, TypeVar, cast
import torch
from qlib.log import get_module_logger
from qlib.rl.simulator import InitialStateType
from qlib.rl.utils import EnvWrapper, FiniteEnvType, LogBuffer, LogCollector, LogLevel, LogWriter, vectorize_env
from qlib.rl.utils.finite_env import FiniteVectorEnv
from qlib.typehint import Literal
from .callbacks import Callback
from .vessel import TrainingVesselBase
T = TypeVar("T")
The provided code snippet includes necessary dependencies for implementing the `_named_collection` function. Write a Python function `def _named_collection(seq: Sequence[T]) -> Dict[str, T]` to solve the following problem:
Convert a list into a dict, where each item is named with its type.
Here is the function:
def _named_collection(seq: Sequence[T]) -> Dict[str, T]:
"""Convert a list into a dict, where each item is named with its type."""
res = {}
retry_cnt: collections.Counter = collections.Counter()
for item in seq:
typename = type(item).__name__.lower()
key = typename if retry_cnt[typename] == 0 else f"{typename}{retry_cnt[typename]}"
retry_cnt[typename] += 1
res[key] = item
return res | Convert a list into a dict, where each item is named with its type. |
19,636 | from __future__ import annotations
from pathlib import Path
from typing import cast, List
import cachetools
import pandas as pd
import pickle
import os
from qlib.backtest import Exchange, Order
from qlib.backtest.decision import TradeRange, TradeRangeByTime
from qlib.constant import EPS_T
from .base import BaseIntradayBacktestData, BaseIntradayProcessedData, ProcessedDataProvider
def get_ticks_slice(
ticks_index: pd.DatetimeIndex,
start: pd.Timestamp,
end: pd.Timestamp,
include_end: bool = False,
) -> pd.DatetimeIndex:
if not include_end:
end = end - EPS_T
return ticks_index[ticks_index.slice_indexer(start, end)]
class IntradayBacktestData(BaseIntradayBacktestData):
"""Backtest data for Qlib simulator"""
def __init__(
self,
order: Order,
exchange: Exchange,
ticks_index: pd.DatetimeIndex,
ticks_for_order: pd.DatetimeIndex,
) -> None:
self._order = order
self._exchange = exchange
self._start_time = ticks_for_order[0]
self._end_time = ticks_for_order[-1]
self.ticks_index = ticks_index
self.ticks_for_order = ticks_for_order
self._deal_price = cast(
pd.Series,
self._exchange.get_deal_price(
self._order.stock_id,
self._start_time,
self._end_time,
direction=self._order.direction,
method=None,
),
)
self._volume = cast(
pd.Series,
self._exchange.get_volume(
self._order.stock_id,
self._start_time,
self._end_time,
method=None,
),
)
def __repr__(self) -> str:
return (
f"Order: {self._order}, Exchange: {self._exchange}, "
f"Start time: {self._start_time}, End time: {self._end_time}"
)
def __len__(self) -> int:
return len(self._deal_price)
def get_deal_price(self) -> pd.Series:
return self._deal_price
def get_volume(self) -> pd.Series:
return self._volume
def get_time_index(self) -> pd.DatetimeIndex:
return pd.DatetimeIndex([e[1] for e in list(self._exchange.quote_df.index)])
class TradeRange:
def __call__(self, trade_calendar: TradeCalendarManager) -> Tuple[int, int]:
"""
This method will be call with following way
The outer strategy give a decision with with `TradeRange`
The decision will be checked by the inner decision.
inner decision will pass its trade_calendar as parameter when getting the trading range
- The framework's step is integer-index based.
Parameters
----------
trade_calendar : TradeCalendarManager
the trade_calendar is from inner strategy
Returns
-------
Tuple[int, int]:
the start index and end index which are tradable
Raises
------
NotImplementedError:
Exceptions are raised when no range limitation
"""
raise NotImplementedError(f"Please implement the `__call__` method")
def clip_time_range(self, start_time: pd.Timestamp, end_time: pd.Timestamp) -> Tuple[pd.Timestamp, pd.Timestamp]:
"""
Parameters
----------
start_time : pd.Timestamp
end_time : pd.Timestamp
Both sides (start_time, end_time) are closed
Returns
-------
Tuple[pd.Timestamp, pd.Timestamp]:
The tradable time range.
- It is intersection of [start_time, end_time] and the rule of TradeRange itself
"""
raise NotImplementedError(f"Please implement the `clip_time_range` method")
class TradeRangeByTime(TradeRange):
"""This is a helper function for make decisions"""
def __init__(self, start_time: str | time, end_time: str | time) -> None:
"""
This is a callable class.
**NOTE**:
- It is designed for minute-bar for intra-day trading!!!!!
- Both start_time and end_time are **closed** in the range
Parameters
----------
start_time : str | time
e.g. "9:30"
end_time : str | time
e.g. "14:30"
"""
self.start_time = pd.Timestamp(start_time).time() if isinstance(start_time, str) else start_time
self.end_time = pd.Timestamp(end_time).time() if isinstance(end_time, str) else end_time
assert self.start_time < self.end_time
def __call__(self, trade_calendar: TradeCalendarManager) -> Tuple[int, int]:
if trade_calendar is None:
raise NotImplementedError("trade_calendar is necessary for getting TradeRangeByTime.")
start_date = trade_calendar.start_time.date()
val_start, val_end = concat_date_time(start_date, self.start_time), concat_date_time(start_date, self.end_time)
return trade_calendar.get_range_idx(val_start, val_end)
def clip_time_range(self, start_time: pd.Timestamp, end_time: pd.Timestamp) -> Tuple[pd.Timestamp, pd.Timestamp]:
start_date = start_time.date()
val_start, val_end = concat_date_time(start_date, self.start_time), concat_date_time(start_date, self.end_time)
# NOTE: `end_date` should not be used. Because the `end_date` is for slicing. It may be in the next day
# Assumption: start_time and end_time is for intra-day trading. So it is OK for only using start_date
return max(val_start, start_time), min(val_end, end_time)
def load_backtest_data(
order: Order,
trade_exchange: Exchange,
trade_range: TradeRange,
) -> IntradayBacktestData:
ticks_index = pd.DatetimeIndex(trade_exchange.quote_df.reset_index()["datetime"])
ticks_index = ticks_index[order.start_time <= ticks_index]
ticks_index = ticks_index[ticks_index <= order.end_time]
if isinstance(trade_range, TradeRangeByTime):
ticks_for_order = get_ticks_slice(
ticks_index,
trade_range.start_time,
trade_range.end_time,
include_end=True,
)
else:
ticks_for_order = None # FIXME: implement this logic
backtest_data = IntradayBacktestData(
order=order,
exchange=trade_exchange,
ticks_index=ticks_index,
ticks_for_order=ticks_for_order,
)
return backtest_data | null |
19,637 | from __future__ import annotations
from pathlib import Path
from typing import cast, List
import cachetools
import pandas as pd
import pickle
import os
from qlib.backtest import Exchange, Order
from qlib.backtest.decision import TradeRange, TradeRangeByTime
from qlib.constant import EPS_T
from .base import BaseIntradayBacktestData, BaseIntradayProcessedData, ProcessedDataProvider
class HandlerIntradayProcessedData(BaseIntradayProcessedData):
"""Subclass of IntradayProcessedData. Used to handle handler (bin format) style data."""
def __init__(
self,
data_dir: Path,
stock_id: str,
date: pd.Timestamp,
feature_columns_today: List[str],
feature_columns_yesterday: List[str],
backtest: bool = False,
index_only: bool = False,
) -> None:
def _drop_stock_id(df: pd.DataFrame) -> pd.DataFrame:
df = df.reset_index()
if "instrument" in df.columns:
df = df.drop(columns=["instrument"])
return df.set_index(["datetime"])
path = os.path.join(data_dir, "backtest" if backtest else "feature", f"{stock_id}.pkl")
start_time, end_time = date.replace(hour=0, minute=0, second=0), date.replace(hour=23, minute=59, second=59)
with open(path, "rb") as fstream:
dataset = pickle.load(fstream)
data = dataset.handler.fetch(pd.IndexSlice[stock_id, start_time:end_time], level=None)
if index_only:
self.today = _drop_stock_id(data[[]])
self.yesterday = _drop_stock_id(data[[]])
else:
self.today = _drop_stock_id(data[feature_columns_today])
self.yesterday = _drop_stock_id(data[feature_columns_yesterday])
def __repr__(self) -> str:
with pd.option_context("memory_usage", False, "display.max_info_columns", 1, "display.large_repr", "info"):
return f"{self.__class__.__name__}({self.today}, {self.yesterday})"
cache=cachetools.LRUCache(100), # 100 * 50K = 5MB
key=lambda data_dir, stock_id, date, feature_columns_today, feature_columns_yesterday, backtest, index_only: (
stock_id,
date,
backtest,
index_only,
),
def load_handler_intraday_processed_data(
data_dir: Path,
stock_id: str,
date: pd.Timestamp,
feature_columns_today: List[str],
feature_columns_yesterday: List[str],
backtest: bool = False,
index_only: bool = False,
) -> HandlerIntradayProcessedData:
return HandlerIntradayProcessedData(
data_dir, stock_id, date, feature_columns_today, feature_columns_yesterday, backtest, index_only
) | null |
19,638 | from __future__ import annotations
from functools import lru_cache
from pathlib import Path
from typing import List, Sequence, cast
import cachetools
import numpy as np
import pandas as pd
from cachetools.keys import hashkey
from qlib.backtest.decision import Order, OrderDir
from qlib.rl.data.base import BaseIntradayBacktestData, BaseIntradayProcessedData, ProcessedDataProvider
from qlib.typehint import Literal
def _infer_processed_data_column_names(shape: int) -> List[str]:
if shape == 16:
return [
"$open",
"$high",
"$low",
"$close",
"$vwap",
"$bid",
"$ask",
"$volume",
"$bidV",
"$bidV1",
"$bidV3",
"$bidV5",
"$askV",
"$askV1",
"$askV3",
"$askV5",
]
if shape == 6:
return ["$high", "$low", "$open", "$close", "$vwap", "$volume"]
elif shape == 5:
return ["$high", "$low", "$open", "$close", "$volume"]
raise ValueError(f"Unrecognized data shape: {shape}") | null |
19,639 | from __future__ import annotations
from functools import lru_cache
from pathlib import Path
from typing import List, Sequence, cast
import cachetools
import numpy as np
import pandas as pd
from cachetools.keys import hashkey
from qlib.backtest.decision import Order, OrderDir
from qlib.rl.data.base import BaseIntradayBacktestData, BaseIntradayProcessedData, ProcessedDataProvider
from qlib.typehint import Literal
def _find_pickle(filename_without_suffix: Path) -> Path:
def _read_pickle(filename_without_suffix: Path) -> pd.DataFrame:
df = pd.read_pickle(_find_pickle(filename_without_suffix))
index_cols = df.index.names
df = df.reset_index()
for date_col_name in ["date", "datetime"]:
if date_col_name in df:
df[date_col_name] = pd.to_datetime(df[date_col_name])
df = df.set_index(index_cols)
return df | null |
19,640 | from __future__ import annotations
from functools import lru_cache
from pathlib import Path
from typing import List, Sequence, cast
import cachetools
import numpy as np
import pandas as pd
from cachetools.keys import hashkey
from qlib.backtest.decision import Order, OrderDir
from qlib.rl.data.base import BaseIntradayBacktestData, BaseIntradayProcessedData, ProcessedDataProvider
from qlib.typehint import Literal
DealPriceType = Literal["bid_or_ask", "bid_or_ask_fill", "close"]
class SimpleIntradayBacktestData(BaseIntradayBacktestData):
"""Backtest data for simple simulator"""
def __init__(
self,
data_dir: Path | str,
stock_id: str,
date: pd.Timestamp,
deal_price: DealPriceType = "close",
order_dir: int | None = None,
) -> None:
super(SimpleIntradayBacktestData, self).__init__()
backtest = _read_pickle((data_dir if isinstance(data_dir, Path) else Path(data_dir)) / stock_id)
backtest = backtest.loc[pd.IndexSlice[stock_id, :, date]]
# No longer need for pandas >= 1.4
# backtest = backtest.droplevel([0, 2])
self.data: pd.DataFrame = backtest
self.deal_price_type: DealPriceType = deal_price
self.order_dir = order_dir
def __repr__(self) -> str:
with pd.option_context("memory_usage", False, "display.max_info_columns", 1, "display.large_repr", "info"):
return f"{self.__class__.__name__}({self.data})"
def __len__(self) -> int:
return len(self.data)
def get_deal_price(self) -> pd.Series:
"""Return a pandas series that can be indexed with time.
See :attribute:`DealPriceType` for details."""
if self.deal_price_type in ("bid_or_ask", "bid_or_ask_fill"):
if self.order_dir is None:
raise ValueError("Order direction cannot be none when deal_price_type is not close.")
if self.order_dir == OrderDir.SELL:
col = "$bid0"
else: # BUY
col = "$ask0"
elif self.deal_price_type == "close":
col = "$close0"
else:
raise ValueError(f"Unsupported deal_price_type: {self.deal_price_type}")
price = self.data[col]
if self.deal_price_type == "bid_or_ask_fill":
if self.order_dir == OrderDir.SELL:
fill_col = "$ask0"
else:
fill_col = "$bid0"
price = price.replace(0, np.nan).fillna(self.data[fill_col])
return price
def get_volume(self) -> pd.Series:
"""Return a volume series that can be indexed with time."""
return self.data["$volume0"]
def get_time_index(self) -> pd.DatetimeIndex:
return cast(pd.DatetimeIndex, self.data.index)
def load_simple_intraday_backtest_data(
data_dir: Path,
stock_id: str,
date: pd.Timestamp,
deal_price: DealPriceType = "close",
order_dir: int | None = None,
) -> SimpleIntradayBacktestData:
return SimpleIntradayBacktestData(data_dir, stock_id, date, deal_price, order_dir) | null |
19,641 | from __future__ import annotations
from functools import lru_cache
from pathlib import Path
from typing import List, Sequence, cast
import cachetools
import numpy as np
import pandas as pd
from cachetools.keys import hashkey
from qlib.backtest.decision import Order, OrderDir
from qlib.rl.data.base import BaseIntradayBacktestData, BaseIntradayProcessedData, ProcessedDataProvider
from qlib.typehint import Literal
class PickleIntradayProcessedData(BaseIntradayProcessedData):
def __init__(
self,
data_dir: Path | str,
stock_id: str,
date: pd.Timestamp,
feature_dim: int,
time_index: pd.Index,
) -> None:
def __repr__(self) -> str:
class BaseIntradayProcessedData:
def load_pickle_intraday_processed_data(
data_dir: Path,
stock_id: str,
date: pd.Timestamp,
feature_dim: int,
time_index: pd.Index,
) -> BaseIntradayProcessedData:
return PickleIntradayProcessedData(data_dir, stock_id, date, feature_dim, time_index) | null |
19,642 | from __future__ import annotations
from functools import lru_cache
from pathlib import Path
from typing import List, Sequence, cast
import cachetools
import numpy as np
import pandas as pd
from cachetools.keys import hashkey
from qlib.backtest.decision import Order, OrderDir
from qlib.rl.data.base import BaseIntradayBacktestData, BaseIntradayProcessedData, ProcessedDataProvider
from qlib.typehint import Literal
class OrderDir(IntEnum):
# Order direction
SELL = 0
BUY = 1
class Order:
"""
stock_id : str
amount : float
start_time : pd.Timestamp
closed start time for order trading
end_time : pd.Timestamp
closed end time for order trading
direction : int
Order.SELL for sell; Order.BUY for buy
factor : float
presents the weight factor assigned in Exchange()
"""
# 1) time invariant values
# - they are set by users and is time-invariant.
stock_id: str
amount: float # `amount` is a non-negative and adjusted value
direction: OrderDir
# 2) time variant values:
# - Users may want to set these values when using lower level APIs
# - If users don't, TradeDecisionWO will help users to set them
# The interval of the order which belongs to (NOTE: this is not the expected order dealing range time)
start_time: pd.Timestamp
end_time: pd.Timestamp
# 3) results
# - users should not care about these values
# - they are set by the backtest system after finishing the results.
# What the value should be about in all kinds of cases
# - not tradable: the deal_amount == 0 , factor is None
# - the stock is suspended and the entire order fails. No cost for this order
# - dealt or partially dealt: deal_amount >= 0 and factor is not None
deal_amount: float = 0.0 # `deal_amount` is a non-negative value
factor: Optional[float] = None
# TODO:
# a status field to indicate the dealing result of the order
# FIXME:
# for compatible now.
# Please remove them in the future
SELL: ClassVar[OrderDir] = OrderDir.SELL
BUY: ClassVar[OrderDir] = OrderDir.BUY
def __post_init__(self) -> None:
if self.direction not in {Order.SELL, Order.BUY}:
raise NotImplementedError("direction not supported, `Order.SELL` for sell, `Order.BUY` for buy")
self.deal_amount = 0.0
self.factor = None
def amount_delta(self) -> float:
"""
return the delta of amount.
- Positive value indicates buying `amount` of share
- Negative value indicates selling `amount` of share
"""
return self.amount * self.sign
def deal_amount_delta(self) -> float:
"""
return the delta of deal_amount.
- Positive value indicates buying `deal_amount` of share
- Negative value indicates selling `deal_amount` of share
"""
return self.deal_amount * self.sign
def sign(self) -> int:
"""
return the sign of trading
- `+1` indicates buying
- `-1` value indicates selling
"""
return self.direction * 2 - 1
def parse_dir(direction: Union[str, int, np.integer, OrderDir, np.ndarray]) -> Union[OrderDir, np.ndarray]:
if isinstance(direction, OrderDir):
return direction
elif isinstance(direction, (int, float, np.integer, np.floating)):
return Order.BUY if direction > 0 else Order.SELL
elif isinstance(direction, str):
dl = direction.lower().strip()
if dl == "sell":
return OrderDir.SELL
elif dl == "buy":
return OrderDir.BUY
else:
raise NotImplementedError(f"This type of input is not supported")
elif isinstance(direction, np.ndarray):
direction_array = direction.copy()
direction_array[direction_array > 0] = Order.BUY
direction_array[direction_array <= 0] = Order.SELL
return direction_array
else:
raise NotImplementedError(f"This type of input is not supported")
def key_by_day(self) -> tuple:
"""A hashable & unique key to identify this order, under the granularity in day."""
return self.stock_id, self.date, self.direction
def key(self) -> tuple:
"""A hashable & unique key to identify this order."""
return self.stock_id, self.start_time, self.end_time, self.direction
def date(self) -> pd.Timestamp:
"""Date of the order."""
return pd.Timestamp(self.start_time.replace(hour=0, minute=0, second=0))
The provided code snippet includes necessary dependencies for implementing the `load_orders` function. Write a Python function `def load_orders( order_path: Path, start_time: pd.Timestamp = None, end_time: pd.Timestamp = None, ) -> Sequence[Order]` to solve the following problem:
Load orders, and set start time and end time for the orders.
Here is the function:
def load_orders(
order_path: Path,
start_time: pd.Timestamp = None,
end_time: pd.Timestamp = None,
) -> Sequence[Order]:
"""Load orders, and set start time and end time for the orders."""
start_time = start_time or pd.Timestamp("0:00:00")
end_time = end_time or pd.Timestamp("23:59:59")
if order_path.is_file():
order_df = pd.read_pickle(order_path)
else:
order_df = []
for file in order_path.iterdir():
order_data = pd.read_pickle(file)
order_df.append(order_data)
order_df = pd.concat(order_df)
order_df = order_df.reset_index()
# Legacy-style orders have "date" instead of "datetime"
if "date" in order_df.columns:
order_df = order_df.rename(columns={"date": "datetime"})
# Sometimes "date" are str rather than Timestamp
order_df["datetime"] = pd.to_datetime(order_df["datetime"])
orders: List[Order] = []
for _, row in order_df.iterrows():
# filter out orders with amount == 0
if row["amount"] <= 0:
continue
orders.append(
Order(
row["instrument"],
row["amount"],
OrderDir(int(row["order_type"])),
row["datetime"].replace(hour=start_time.hour, minute=start_time.minute, second=start_time.second),
row["datetime"].replace(hour=end_time.hour, minute=end_time.minute, second=end_time.second),
),
)
return orders | Load orders, and set start time and end time for the orders. |
19,643 | from __future__ import annotations
from pathlib import Path
import qlib
from qlib.constant import REG_CN
from qlib.contrib.ops.high_freq import BFillNan, Cut, Date, DayCumsum, DayLast, FFillNan, IsInf, IsNull, Select
REG_CN = "cn"
class DayCumsum(ElemOperator):
"""DayCumsum Operator during start time and end time.
Parameters
----------
feature : Expression
feature instance
start : str
the start time of backtest in one day.
!!!NOTE: "9:30" means the time period of (9:30, 9:31) is in transaction.
end : str
the end time of backtest in one day.
!!!NOTE: "14:59" means the time period of (14:59, 15:00) is in transaction,
but (15:00, 15:01) is not.
So start="9:30" and end="14:59" means trading all day.
Returns
----------
feature:
a series of that each value equals the cumsum value during start time and end time.
Otherwise, the value is zero.
"""
def __init__(self, feature, start: str = "9:30", end: str = "14:59", data_granularity: int = 1):
self.feature = feature
self.start = datetime.strptime(start, "%H:%M")
self.end = datetime.strptime(end, "%H:%M")
self.morning_open = datetime.strptime("9:30", "%H:%M")
self.morning_close = datetime.strptime("11:30", "%H:%M")
self.noon_open = datetime.strptime("13:00", "%H:%M")
self.noon_close = datetime.strptime("15:00", "%H:%M")
self.data_granularity = data_granularity
self.start_id = time_to_day_index(self.start) // self.data_granularity
self.end_id = time_to_day_index(self.end) // self.data_granularity
assert 240 % self.data_granularity == 0
def period_cusum(self, df):
df = df.copy()
assert len(df) == 240 // self.data_granularity
df.iloc[0 : self.start_id] = 0
df = df.cumsum()
df.iloc[self.end_id + 1 : 240 // self.data_granularity] = 0
return df
def _load_internal(self, instrument, start_index, end_index, freq):
_calendar = get_calendar_day(freq=freq)
series = self.feature.load(instrument, start_index, end_index, freq)
return series.groupby(_calendar[series.index]).transform(self.period_cusum)
class DayLast(ElemOperator):
"""DayLast Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
a series of that each value equals the last value of its day
"""
def _load_internal(self, instrument, start_index, end_index, freq):
_calendar = get_calendar_day(freq=freq)
series = self.feature.load(instrument, start_index, end_index, freq)
return series.groupby(_calendar[series.index]).transform("last")
class FFillNan(ElemOperator):
"""FFillNan Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
a forward fill nan feature
"""
def _load_internal(self, instrument, start_index, end_index, freq):
series = self.feature.load(instrument, start_index, end_index, freq)
return series.fillna(method="ffill")
class BFillNan(ElemOperator):
"""BFillNan Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
a backfoward fill nan feature
"""
def _load_internal(self, instrument, start_index, end_index, freq):
series = self.feature.load(instrument, start_index, end_index, freq)
return series.fillna(method="bfill")
class Date(ElemOperator):
"""Date Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
a series of that each value is the date corresponding to feature.index
"""
def _load_internal(self, instrument, start_index, end_index, freq):
_calendar = get_calendar_day(freq=freq)
series = self.feature.load(instrument, start_index, end_index, freq)
return pd.Series(_calendar[series.index], index=series.index)
class Select(PairOperator):
"""Select Operator
Parameters
----------
feature_left : Expression
feature instance, select condition
feature_right : Expression
feature instance, select value
Returns
----------
feature:
value(feature_right) that meets the condition(feature_left)
"""
def _load_internal(self, instrument, start_index, end_index, freq):
series_condition = self.feature_left.load(instrument, start_index, end_index, freq)
series_feature = self.feature_right.load(instrument, start_index, end_index, freq)
return series_feature.loc[series_condition]
class IsNull(ElemOperator):
"""IsNull Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
A series indicating whether the feature is nan
"""
def _load_internal(self, instrument, start_index, end_index, freq):
series = self.feature.load(instrument, start_index, end_index, freq)
return series.isnull()
class IsInf(ElemOperator):
"""IsInf Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
A series indicating whether the feature is inf
"""
def _load_internal(self, instrument, start_index, end_index, freq):
series = self.feature.load(instrument, start_index, end_index, freq)
return np.isinf(series)
class Cut(ElemOperator):
"""Cut Operator
Parameters
----------
feature : Expression
feature instance
l : int
l > 0, delete the first l elements of feature (default is None, which means 0)
r : int
r < 0, delete the last -r elements of feature (default is None, which means 0)
Returns
----------
feature:
A series with the first l and last -r elements deleted from the feature.
Note: It is deleted from the raw data, not the sliced data
"""
def __init__(self, feature, left=None, right=None):
self.left = left
self.right = right
if (self.left is not None and self.left <= 0) or (self.right is not None and self.right >= 0):
raise ValueError("Cut operator l shoud > 0 and r should < 0")
super(Cut, self).__init__(feature)
def _load_internal(self, instrument, start_index, end_index, freq):
series = self.feature.load(instrument, start_index, end_index, freq)
return series.iloc[self.left : self.right]
def get_extended_window_size(self):
ll = 0 if self.left is None else self.left
rr = 0 if self.right is None else abs(self.right)
lft_etd, rght_etd = self.feature.get_extended_window_size()
lft_etd = lft_etd + ll
rght_etd = rght_etd + rr
return lft_etd, rght_etd
The provided code snippet includes necessary dependencies for implementing the `init_qlib` function. Write a Python function `def init_qlib(qlib_config: dict) -> None` to solve the following problem:
Initialize necessary resource to launch the workflow, including data direction, feature columns, etc.. Parameters ---------- qlib_config: Qlib configuration. Example:: { "provider_uri_day": DATA_ROOT_DIR / "qlib_1d", "provider_uri_1min": DATA_ROOT_DIR / "qlib_1min", "feature_root_dir": DATA_ROOT_DIR / "qlib_handler_stock", "feature_columns_today": [ "$open", "$high", "$low", "$close", "$vwap", "$bid", "$ask", "$volume", "$bidV", "$bidV1", "$bidV3", "$bidV5", "$askV", "$askV1", "$askV3", "$askV5", ], "feature_columns_yesterday": [ "$open_1", "$high_1", "$low_1", "$close_1", "$vwap_1", "$bid_1", "$ask_1", "$volume_1", "$bidV_1", "$bidV1_1", "$bidV3_1", "$bidV5_1", "$askV_1", "$askV1_1", "$askV3_1", "$askV5_1", ], }
Here is the function:
def init_qlib(qlib_config: dict) -> None:
"""Initialize necessary resource to launch the workflow, including data direction, feature columns, etc..
Parameters
----------
qlib_config:
Qlib configuration.
Example::
{
"provider_uri_day": DATA_ROOT_DIR / "qlib_1d",
"provider_uri_1min": DATA_ROOT_DIR / "qlib_1min",
"feature_root_dir": DATA_ROOT_DIR / "qlib_handler_stock",
"feature_columns_today": [
"$open", "$high", "$low", "$close", "$vwap", "$bid", "$ask", "$volume",
"$bidV", "$bidV1", "$bidV3", "$bidV5", "$askV", "$askV1", "$askV3", "$askV5",
],
"feature_columns_yesterday": [
"$open_1", "$high_1", "$low_1", "$close_1", "$vwap_1", "$bid_1", "$ask_1", "$volume_1",
"$bidV_1", "$bidV1_1", "$bidV3_1", "$bidV5_1", "$askV_1", "$askV1_1", "$askV3_1", "$askV5_1",
],
}
"""
def _convert_to_path(path: str | Path) -> Path:
return path if isinstance(path, Path) else Path(path)
provider_uri_map = {}
for granularity in ["1min", "5min", "day"]:
if f"provider_uri_{granularity}" in qlib_config:
provider_uri_map[f"{granularity}"] = _convert_to_path(qlib_config[f"provider_uri_{granularity}"]).as_posix()
qlib.init(
region=REG_CN,
auto_mount=False,
custom_ops=[DayLast, FFillNan, BFillNan, Date, Select, IsNull, IsInf, Cut, DayCumsum],
expression_cache=None,
calendar_provider={
"class": "LocalCalendarProvider",
"module_path": "qlib.data.data",
"kwargs": {
"backend": {
"class": "FileCalendarStorage",
"module_path": "qlib.data.storage.file_storage",
"kwargs": {"provider_uri_map": provider_uri_map},
},
},
},
feature_provider={
"class": "LocalFeatureProvider",
"module_path": "qlib.data.data",
"kwargs": {
"backend": {
"class": "FileFeatureStorage",
"module_path": "qlib.data.storage.file_storage",
"kwargs": {"provider_uri_map": provider_uri_map},
},
},
},
provider_uri=provider_uri_map,
kernels=1,
redis_port=-1,
clear_mem_cache=False, # init_qlib will be called for multiple times. Keep the cache for improving performance
) | Initialize necessary resource to launch the workflow, including data direction, feature columns, etc.. Parameters ---------- qlib_config: Qlib configuration. Example:: { "provider_uri_day": DATA_ROOT_DIR / "qlib_1d", "provider_uri_1min": DATA_ROOT_DIR / "qlib_1min", "feature_root_dir": DATA_ROOT_DIR / "qlib_handler_stock", "feature_columns_today": [ "$open", "$high", "$low", "$close", "$vwap", "$bid", "$ask", "$volume", "$bidV", "$bidV1", "$bidV3", "$bidV5", "$askV", "$askV1", "$askV3", "$askV5", ], "feature_columns_yesterday": [ "$open_1", "$high_1", "$low_1", "$close_1", "$vwap_1", "$bid_1", "$ask_1", "$volume_1", "$bidV_1", "$bidV1_1", "$bidV3_1", "$bidV5_1", "$askV_1", "$askV1_1", "$askV3_1", "$askV5_1", ], } |
19,644 | from __future__ import annotations
from typing import Any, cast
import numpy as np
import pandas as pd
from qlib.backtest.decision import OrderDir
from qlib.backtest.executor import BaseExecutor, NestedExecutor, SimulatorExecutor
from qlib.constant import float_or_ndarray
def dataframe_append(df: pd.DataFrame, other: Any) -> pd.DataFrame:
# dataframe.append is deprecated
other_df = pd.DataFrame(other).set_index("datetime")
other_df.index.name = "datetime"
res = pd.concat([df, other_df], axis=0)
return res | null |
19,645 | from __future__ import annotations
from typing import Any, cast
import numpy as np
import pandas as pd
from qlib.backtest.decision import OrderDir
from qlib.backtest.executor import BaseExecutor, NestedExecutor, SimulatorExecutor
from qlib.constant import float_or_ndarray
class OrderDir(IntEnum):
# Order direction
SELL = 0
BUY = 1
float_or_ndarray = TypeVar("float_or_ndarray", float, np.ndarray)
def price_advantage(
exec_price: float_or_ndarray,
baseline_price: float,
direction: OrderDir | int,
) -> float_or_ndarray:
if baseline_price == 0: # something is wrong with data. Should be nan here
if isinstance(exec_price, float):
return 0.0
else:
return np.zeros_like(exec_price)
if direction == OrderDir.BUY:
res = (1 - exec_price / baseline_price) * 10000
elif direction == OrderDir.SELL:
res = (exec_price / baseline_price - 1) * 10000
else:
raise ValueError(f"Unexpected order direction: {direction}")
res_wo_nan: np.ndarray = np.nan_to_num(res, nan=0.0)
if res_wo_nan.size == 1:
return res_wo_nan.item()
else:
return cast(float_or_ndarray, res_wo_nan) | null |
19,646 | from __future__ import annotations
from typing import Any, cast
import numpy as np
import pandas as pd
from qlib.backtest.decision import OrderDir
from qlib.backtest.executor import BaseExecutor, NestedExecutor, SimulatorExecutor
from qlib.constant import float_or_ndarray
class BaseExecutor:
"""Base executor for trading"""
def __init__(
self,
time_per_step: str,
start_time: Union[str, pd.Timestamp] = None,
end_time: Union[str, pd.Timestamp] = None,
indicator_config: dict = {},
generate_portfolio_metrics: bool = False,
verbose: bool = False,
track_data: bool = False,
trade_exchange: Exchange | None = None,
common_infra: CommonInfrastructure | None = None,
settle_type: str = BasePosition.ST_NO,
**kwargs: Any,
) -> None:
"""
Parameters
----------
time_per_step : str
trade time per trading step, used for generate the trade calendar
show_indicator: bool, optional
whether to show indicators, :
- 'pa', the price advantage
- 'pos', the positive rate
- 'ffr', the fulfill rate
indicator_config: dict, optional
config for calculating trade indicator, including the following fields:
- 'show_indicator': whether to show indicators, optional, default by False. The indicators includes
- 'pa', the price advantage
- 'pos', the positive rate
- 'ffr', the fulfill rate
- 'pa_config': config for calculating price advantage(pa), optional
- 'base_price': the based price than which the trading price is advanced, Optional, default by 'twap'
- If 'base_price' is 'twap', the based price is the time weighted average price
- If 'base_price' is 'vwap', the based price is the volume weighted average price
- 'weight_method': weighted method when calculating total trading pa by different orders' pa in each
step, optional, default by 'mean'
- If 'weight_method' is 'mean', calculating mean value of different orders' pa
- If 'weight_method' is 'amount_weighted', calculating amount weighted average value of different
orders' pa
- If 'weight_method' is 'value_weighted', calculating value weighted average value of different
orders' pa
- 'ffr_config': config for calculating fulfill rate(ffr), optional
- 'weight_method': weighted method when calculating total trading ffr by different orders' ffr in each
step, optional, default by 'mean'
- If 'weight_method' is 'mean', calculating mean value of different orders' ffr
- If 'weight_method' is 'amount_weighted', calculating amount weighted average value of different
orders' ffr
- If 'weight_method' is 'value_weighted', calculating value weighted average value of different
orders' ffr
Example:
{
'show_indicator': True,
'pa_config': {
"agg": "twap", # "vwap"
"price": "$close", # default to use deal price of the exchange
},
'ffr_config':{
'weight_method': 'value_weighted',
}
}
generate_portfolio_metrics : bool, optional
whether to generate portfolio_metrics, by default False
verbose : bool, optional
whether to print trading info, by default False
track_data : bool, optional
whether to generate trade_decision, will be used when training rl agent
- If `self.track_data` is true, when making data for training, the input `trade_decision` of `execute` will
be generated by `collect_data`
- Else, `trade_decision` will not be generated
trade_exchange : Exchange
exchange that provides market info, used to generate portfolio_metrics
- If generate_portfolio_metrics is None, trade_exchange will be ignored
- Else If `trade_exchange` is None, self.trade_exchange will be set with common_infra
common_infra : CommonInfrastructure, optional:
common infrastructure for backtesting, may including:
- trade_account : Account, optional
trade account for trading
- trade_exchange : Exchange, optional
exchange that provides market info
settle_type : str
Please refer to the docs of BasePosition.settle_start
"""
self.time_per_step = time_per_step
self.indicator_config = indicator_config
self.generate_portfolio_metrics = generate_portfolio_metrics
self.verbose = verbose
self.track_data = track_data
self._trade_exchange = trade_exchange
self.level_infra = LevelInfrastructure()
self.level_infra.reset_infra(common_infra=common_infra, executor=self)
self._settle_type = settle_type
self.reset(start_time=start_time, end_time=end_time, common_infra=common_infra)
if common_infra is None:
get_module_logger("BaseExecutor").warning(f"`common_infra` is not set for {self}")
# record deal order amount in one day
self.dealt_order_amount: Dict[str, float] = defaultdict(float)
self.deal_day = None
def reset_common_infra(self, common_infra: CommonInfrastructure, copy_trade_account: bool = False) -> None:
"""
reset infrastructure for trading
- reset trade_account
"""
if not hasattr(self, "common_infra"):
self.common_infra = common_infra
else:
self.common_infra.update(common_infra)
self.level_infra.reset_infra(common_infra=self.common_infra)
if common_infra.has("trade_account"):
# NOTE: there is a trick in the code.
# shallow copy is used instead of deepcopy.
# 1. So positions are shared
# 2. Others are not shared, so each level has it own metrics (portfolio and trading metrics)
self.trade_account: Account = (
copy.copy(common_infra.get("trade_account"))
if copy_trade_account
else common_infra.get("trade_account")
)
self.trade_account.reset(freq=self.time_per_step, port_metr_enabled=self.generate_portfolio_metrics)
def trade_exchange(self) -> Exchange:
"""get trade exchange in a prioritized order"""
return getattr(self, "_trade_exchange", None) or self.common_infra.get("trade_exchange")
def trade_calendar(self) -> TradeCalendarManager:
"""
Though trade calendar can be accessed from multiple sources, but managing in a centralized way will make the
code easier
"""
return self.level_infra.get("trade_calendar")
def reset(self, common_infra: CommonInfrastructure | None = None, **kwargs: Any) -> None:
"""
- reset `start_time` and `end_time`, used in trade calendar
- reset `common_infra`, used to reset `trade_account`, `trade_exchange`, .etc
"""
if "start_time" in kwargs or "end_time" in kwargs:
start_time = kwargs.get("start_time")
end_time = kwargs.get("end_time")
self.level_infra.reset_cal(freq=self.time_per_step, start_time=start_time, end_time=end_time)
if common_infra is not None:
self.reset_common_infra(common_infra)
def get_level_infra(self) -> LevelInfrastructure:
return self.level_infra
def finished(self) -> bool:
return self.trade_calendar.finished()
def execute(self, trade_decision: BaseTradeDecision, level: int = 0) -> List[object]:
"""execute the trade decision and return the executed result
NOTE: this function is never used directly in the framework. Should we delete it?
Parameters
----------
trade_decision : BaseTradeDecision
level : int
the level of current executor
Returns
----------
execute_result : List[object]
the executed result for trade decision
"""
return_value: dict = {}
for _decision in self.collect_data(trade_decision, return_value=return_value, level=level):
pass
return cast(list, return_value.get("execute_result"))
def _collect_data(
self,
trade_decision: BaseTradeDecision,
level: int = 0,
) -> Union[Generator[Any, Any, Tuple[List[object], dict]], Tuple[List[object], dict]]:
"""
Please refer to the doc of collect_data
The only difference between `_collect_data` and `collect_data` is that some common steps are moved into
collect_data
Parameters
----------
Please refer to the doc of collect_data
Returns
-------
Tuple[List[object], dict]:
(<the executed result for trade decision>, <the extra kwargs for `self.trade_account.update_bar_end`>)
"""
def collect_data(
self,
trade_decision: BaseTradeDecision,
return_value: dict | None = None,
level: int = 0,
) -> Generator[Any, Any, List[object]]:
"""Generator for collecting the trade decision data for rl training
his function will make a step forward
Parameters
----------
trade_decision : BaseTradeDecision
level : int
the level of current executor. 0 indicates the top level
return_value : dict
the mem address to return the value
e.g. {"return_value": <the executed result>}
Returns
----------
execute_result : List[object]
the executed result for trade decision.
** NOTE!!!! **:
1) This is necessary, The return value of generator will be used in NestedExecutor
2) Please note the executed results are not merged.
Yields
-------
object
trade decision
"""
if self.track_data:
yield trade_decision
atomic = not issubclass(self.__class__, NestedExecutor) # issubclass(A, A) is True
if atomic and trade_decision.get_range_limit(default_value=None) is not None:
raise ValueError("atomic executor doesn't support specify `range_limit`")
if self._settle_type != BasePosition.ST_NO:
self.trade_account.current_position.settle_start(self._settle_type)
obj = self._collect_data(trade_decision=trade_decision, level=level)
if isinstance(obj, GeneratorType):
yield_res = yield from obj
assert isinstance(yield_res, tuple) and len(yield_res) == 2
res, kwargs = yield_res
else:
# Some concrete executor don't have inner decisions
res, kwargs = obj
trade_start_time, trade_end_time = self.trade_calendar.get_step_time()
# Account will not be changed in this function
self.trade_account.update_bar_end(
trade_start_time,
trade_end_time,
self.trade_exchange,
atomic=atomic,
outer_trade_decision=trade_decision,
indicator_config=self.indicator_config,
**kwargs,
)
self.trade_calendar.step()
if self._settle_type != BasePosition.ST_NO:
self.trade_account.current_position.settle_commit()
if return_value is not None:
return_value.update({"execute_result": res})
return res
def get_all_executors(self) -> List[BaseExecutor]:
"""get all executors"""
return [self]
class NestedExecutor(BaseExecutor):
"""
Nested Executor with inner strategy and executor
- At each time `execute` is called, it will call the inner strategy and executor to execute the `trade_decision`
in a higher frequency env.
"""
def __init__(
self,
time_per_step: str,
inner_executor: Union[BaseExecutor, dict],
inner_strategy: Union[BaseStrategy, dict],
start_time: Union[str, pd.Timestamp] = None,
end_time: Union[str, pd.Timestamp] = None,
indicator_config: dict = {},
generate_portfolio_metrics: bool = False,
verbose: bool = False,
track_data: bool = False,
skip_empty_decision: bool = True,
align_range_limit: bool = True,
common_infra: CommonInfrastructure | None = None,
**kwargs: Any,
) -> None:
"""
Parameters
----------
inner_executor : BaseExecutor
trading env in each trading bar.
inner_strategy : BaseStrategy
trading strategy in each trading bar
skip_empty_decision: bool
Will the executor skip call inner loop when the decision is empty.
It should be False in following cases
- The decisions may be updated by steps
- The inner executor may not follow the decisions from the outer strategy
align_range_limit: bool
force to align the trade_range decision
It is only for nested executor, because range_limit is given by outer strategy
"""
self.inner_executor: BaseExecutor = init_instance_by_config(
inner_executor,
common_infra=common_infra,
accept_types=BaseExecutor,
)
self.inner_strategy: BaseStrategy = init_instance_by_config(
inner_strategy,
common_infra=common_infra,
accept_types=BaseStrategy,
)
self._skip_empty_decision = skip_empty_decision
self._align_range_limit = align_range_limit
super(NestedExecutor, self).__init__(
time_per_step=time_per_step,
start_time=start_time,
end_time=end_time,
indicator_config=indicator_config,
generate_portfolio_metrics=generate_portfolio_metrics,
verbose=verbose,
track_data=track_data,
common_infra=common_infra,
**kwargs,
)
def reset_common_infra(self, common_infra: CommonInfrastructure, copy_trade_account: bool = False) -> None:
"""
reset infrastructure for trading
- reset inner_strategy and inner_executor common infra
"""
# NOTE: please refer to the docs of BaseExecutor.reset_common_infra for the meaning of `copy_trade_account`
# The first level follow the `copy_trade_account` from the upper level
super(NestedExecutor, self).reset_common_infra(common_infra, copy_trade_account=copy_trade_account)
# The lower level have to copy the trade_account
self.inner_executor.reset_common_infra(common_infra, copy_trade_account=True)
self.inner_strategy.reset_common_infra(common_infra)
def _init_sub_trading(self, trade_decision: BaseTradeDecision) -> None:
trade_start_time, trade_end_time = self.trade_calendar.get_step_time()
self.inner_executor.reset(start_time=trade_start_time, end_time=trade_end_time)
sub_level_infra = self.inner_executor.get_level_infra()
self.level_infra.set_sub_level_infra(sub_level_infra)
self.inner_strategy.reset(level_infra=sub_level_infra, outer_trade_decision=trade_decision)
def _update_trade_decision(self, trade_decision: BaseTradeDecision) -> BaseTradeDecision:
# outer strategy have chance to update decision each iterator
updated_trade_decision = trade_decision.update(self.inner_executor.trade_calendar)
if updated_trade_decision is not None: # TODO: always is None for now?
trade_decision = updated_trade_decision
# NEW UPDATE
# create a hook for inner strategy to update outer decision
trade_decision = self.inner_strategy.alter_outer_trade_decision(trade_decision)
return trade_decision
def _collect_data(
self,
trade_decision: BaseTradeDecision,
level: int = 0,
) -> Generator[Any, Any, Tuple[List[object], dict]]:
execute_result = []
inner_order_indicators = []
decision_list = []
# NOTE:
# - this is necessary to calculating the steps in sub level
# - more detailed information will be set into trade decision
self._init_sub_trading(trade_decision)
_inner_execute_result = None
while not self.inner_executor.finished():
trade_decision = self._update_trade_decision(trade_decision)
if trade_decision.empty() and self._skip_empty_decision:
# give one chance for outer strategy to update the strategy
# - For updating some information in the sub executor (the strategy have no knowledge of the inner
# executor when generating the decision)
break
sub_cal: TradeCalendarManager = self.inner_executor.trade_calendar
# NOTE: make sure get_start_end_idx is after `self._update_trade_decision`
start_idx, end_idx = get_start_end_idx(sub_cal, trade_decision)
if not self._align_range_limit or start_idx <= sub_cal.get_trade_step() <= end_idx:
# if force align the range limit, skip the steps outside the decision range limit
res = self.inner_strategy.generate_trade_decision(_inner_execute_result)
# NOTE: !!!!!
# the two lines below is for a special case in RL
# To solve the conflicts below
# - Normally, user will create a strategy and embed it into Qlib's executor and simulator interaction
# loop For a _nested qlib example_, (Qlib Strategy) <=> (Qlib Executor[(inner Qlib Strategy) <=>
# (inner Qlib Executor)])
# - However, RL-based framework has it's own script to run the loop
# For an _RL learning example_, (RL Policy) <=> (RL Env[(inner Qlib Executor)])
# To make it possible to run _nested qlib example_ and _RL learning example_ together, the solution
# below is proposed
# - The entry script follow the example of _RL learning example_ to be compatible with all kinds of
# RL Framework
# - Each step of (RL Env) will make (inner Qlib Executor) one step forward
# - (inner Qlib Strategy) is a proxy strategy, it will give the program control right to (RL Env)
# by `yield from` and wait for the action from the policy
# So the two lines below is the implementation of yielding control rights
if isinstance(res, GeneratorType):
res = yield from res
_inner_trade_decision: BaseTradeDecision = res
trade_decision.mod_inner_decision(_inner_trade_decision) # propagate part of decision information
# NOTE sub_cal.get_step_time() must be called before collect_data in case of step shifting
decision_list.append((_inner_trade_decision, *sub_cal.get_step_time()))
# NOTE: Trade Calendar will step forward in the follow line
_inner_execute_result = yield from self.inner_executor.collect_data(
trade_decision=_inner_trade_decision,
level=level + 1,
)
assert isinstance(_inner_execute_result, list)
self.post_inner_exe_step(_inner_execute_result)
execute_result.extend(_inner_execute_result)
inner_order_indicators.append(
self.inner_executor.trade_account.get_trade_indicator().get_order_indicator(raw=True),
)
else:
# do nothing and just step forward
sub_cal.step()
# Let inner strategy know that the outer level execution is done.
self.inner_strategy.post_upper_level_exe_step()
return execute_result, {"inner_order_indicators": inner_order_indicators, "decision_list": decision_list}
def post_inner_exe_step(self, inner_exe_res: List[object]) -> None:
"""
A hook for doing sth after each step of inner strategy
Parameters
----------
inner_exe_res :
the execution result of inner task
"""
self.inner_strategy.post_exe_step(inner_exe_res)
def get_all_executors(self) -> List[BaseExecutor]:
"""get all executors, including self and inner_executor.get_all_executors()"""
return [self, *self.inner_executor.get_all_executors()]
class SimulatorExecutor(BaseExecutor):
"""Executor that simulate the true market"""
# TODO: TT_SERIAL & TT_PARAL will be replaced by feature fix_pos now.
# Please remove them in the future.
# available trade_types
TT_SERIAL = "serial"
# The orders will be executed serially in a sequence
# In each trading step, it is possible that users sell instruments first and use the money to buy new instruments
TT_PARAL = "parallel"
# The orders will be executed in parallel
# In each trading step, if users try to sell instruments first and buy new instruments with money, failure will
# occur
def __init__(
self,
time_per_step: str,
start_time: Union[str, pd.Timestamp] = None,
end_time: Union[str, pd.Timestamp] = None,
indicator_config: dict = {},
generate_portfolio_metrics: bool = False,
verbose: bool = False,
track_data: bool = False,
common_infra: CommonInfrastructure | None = None,
trade_type: str = TT_SERIAL,
**kwargs: Any,
) -> None:
"""
Parameters
----------
trade_type: str
please refer to the doc of `TT_SERIAL` & `TT_PARAL`
"""
super(SimulatorExecutor, self).__init__(
time_per_step=time_per_step,
start_time=start_time,
end_time=end_time,
indicator_config=indicator_config,
generate_portfolio_metrics=generate_portfolio_metrics,
verbose=verbose,
track_data=track_data,
common_infra=common_infra,
**kwargs,
)
self.trade_type = trade_type
def _get_order_iterator(self, trade_decision: BaseTradeDecision) -> List[Order]:
"""
Parameters
----------
trade_decision : BaseTradeDecision
the trade decision given by the strategy
Returns
-------
List[Order]:
get a list orders according to `self.trade_type`
"""
orders = _retrieve_orders_from_decision(trade_decision)
if self.trade_type == self.TT_SERIAL:
# Orders will be traded in a parallel way
order_it = orders
elif self.trade_type == self.TT_PARAL:
# NOTE: !!!!!!!
# Assumption: there will not be orders in different trading direction in a single step of a strategy !!!!
# The parallel trading failure will be caused only by the conflicts of money
# Therefore, make the buying go first will make sure the conflicts happen.
# It equals to parallel trading after sorting the order by direction
order_it = sorted(orders, key=lambda order: -order.direction)
else:
raise NotImplementedError(f"This type of input is not supported")
return order_it
def _collect_data(self, trade_decision: BaseTradeDecision, level: int = 0) -> Tuple[List[object], dict]:
trade_start_time, _ = self.trade_calendar.get_step_time()
execute_result: list = []
for order in self._get_order_iterator(trade_decision):
# Each time we move into a new date, clear `self.dealt_order_amount` since it only maintains intraday
# information.
now_deal_day = self.trade_calendar.get_step_time()[0].floor(freq="D")
if self.deal_day is None or now_deal_day > self.deal_day:
self.dealt_order_amount = defaultdict(float)
self.deal_day = now_deal_day
# execute the order.
# NOTE: The trade_account will be changed in this function
trade_val, trade_cost, trade_price = self.trade_exchange.deal_order(
order,
trade_account=self.trade_account,
dealt_order_amount=self.dealt_order_amount,
)
execute_result.append((order, trade_val, trade_cost, trade_price))
self.dealt_order_amount[order.stock_id] += order.deal_amount
if self.verbose:
print(
"[I {:%Y-%m-%d %H:%M:%S}]: {} {}, price {:.2f}, amount {}, deal_amount {}, factor {}, "
"value {:.2f}, cash {:.2f}.".format(
trade_start_time,
"sell" if order.direction == Order.SELL else "buy",
order.stock_id,
trade_price,
order.amount,
order.deal_amount,
order.factor,
trade_val,
self.trade_account.get_cash(),
),
)
return execute_result, {"trade_info": execute_result}
def get_simulator_executor(executor: BaseExecutor) -> SimulatorExecutor:
while isinstance(executor, NestedExecutor):
executor = executor.inner_executor
assert isinstance(executor, SimulatorExecutor)
return executor | null |
19,647 | from __future__ import annotations
import math
from typing import Any, List, Optional, cast
import numpy as np
import pandas as pd
from gym import spaces
from qlib.constant import EPS
from qlib.rl.data.base import ProcessedDataProvider
from qlib.rl.interpreter import ActionInterpreter, StateInterpreter
from qlib.rl.order_execution.state import SAOEState
from qlib.typehint import TypedDict
from qlib.utils import init_instance_by_config
The provided code snippet includes necessary dependencies for implementing the `canonicalize` function. Write a Python function `def canonicalize(value: int | float | np.ndarray | pd.DataFrame | dict) -> np.ndarray | dict` to solve the following problem:
To 32-bit numeric types. Recursively.
Here is the function:
def canonicalize(value: int | float | np.ndarray | pd.DataFrame | dict) -> np.ndarray | dict:
"""To 32-bit numeric types. Recursively."""
if isinstance(value, pd.DataFrame):
return value.to_numpy()
if isinstance(value, (float, np.floating)) or (isinstance(value, np.ndarray) and value.dtype.kind == "f"):
return np.array(value, dtype=np.float32)
elif isinstance(value, (int, bool, np.integer)) or (isinstance(value, np.ndarray) and value.dtype.kind == "i"):
return np.array(value, dtype=np.int32)
elif isinstance(value, dict):
return {k: canonicalize(v) for k, v in value.items()}
else:
return value | To 32-bit numeric types. Recursively. |
19,648 | from __future__ import annotations
import math
from typing import Any, List, Optional, cast
import numpy as np
import pandas as pd
from gym import spaces
from qlib.constant import EPS
from qlib.rl.data.base import ProcessedDataProvider
from qlib.rl.interpreter import ActionInterpreter, StateInterpreter
from qlib.rl.order_execution.state import SAOEState
from qlib.typehint import TypedDict
from qlib.utils import init_instance_by_config
def _to_int32(val):
return np.array(int(val), dtype=np.int32) | null |
19,649 | from __future__ import annotations
import math
from typing import Any, List, Optional, cast
import numpy as np
import pandas as pd
from gym import spaces
from qlib.constant import EPS
from qlib.rl.data.base import ProcessedDataProvider
from qlib.rl.interpreter import ActionInterpreter, StateInterpreter
from qlib.rl.order_execution.state import SAOEState
from qlib.typehint import TypedDict
from qlib.utils import init_instance_by_config
def _to_float32(val):
return np.array(val, dtype=np.float32) | null |
19,650 | from __future__ import annotations
from typing import Any, cast, List, Optional
import numpy as np
import pandas as pd
from pathlib import Path
from qlib.backtest.decision import Order, OrderDir
from qlib.constant import EPS, EPS_T, float_or_ndarray
from qlib.rl.data.base import BaseIntradayBacktestData
from qlib.rl.data.native import DataframeIntradayBacktestData, load_handler_intraday_processed_data
from qlib.rl.data.pickle_styled import load_simple_intraday_backtest_data
from qlib.rl.simulator import Simulator
from qlib.rl.utils import LogLevel
from .state import SAOEMetrics, SAOEState
class OrderDir(IntEnum):
float_or_ndarray = TypeVar("float_or_ndarray", float, np.ndarray)
def price_advantage(
exec_price: float_or_ndarray,
baseline_price: float,
direction: OrderDir | int,
) -> float_or_ndarray:
if baseline_price == 0: # something is wrong with data. Should be nan here
if isinstance(exec_price, float):
return 0.0
else:
return np.zeros_like(exec_price)
if direction == OrderDir.BUY:
res = (1 - exec_price / baseline_price) * 10000
elif direction == OrderDir.SELL:
res = (exec_price / baseline_price - 1) * 10000
else:
raise ValueError(f"Unexpected order direction: {direction}")
res_wo_nan: np.ndarray = np.nan_to_num(res, nan=0.0)
if res_wo_nan.size == 1:
return res_wo_nan.item()
else:
return cast(float_or_ndarray, res_wo_nan) | null |
19,651 | from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, Generator, Iterable, Optional, OrderedDict, Tuple, cast
import gym
import numpy as np
import torch
import torch.nn as nn
from gym.spaces import Discrete
from tianshou.data import Batch, ReplayBuffer, to_torch
from tianshou.policy import BasePolicy, PPOPolicy, DQNPolicy
from qlib.rl.trainer.trainer import Trainer
def auto_device(module: nn.Module) -> torch.device:
for param in module.parameters():
return param.device
return torch.device("cpu") # fallback to cpu | null |
19,652 | from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, Generator, Iterable, Optional, OrderedDict, Tuple, cast
import gym
import numpy as np
import torch
import torch.nn as nn
from gym.spaces import Discrete
from tianshou.data import Batch, ReplayBuffer, to_torch
from tianshou.policy import BasePolicy, PPOPolicy, DQNPolicy
from qlib.rl.trainer.trainer import Trainer
def set_weight(policy: nn.Module, loaded_weight: OrderedDict) -> None:
try:
policy.load_state_dict(loaded_weight)
except RuntimeError:
# try again by loading the converted weight
# https://github.com/thu-ml/tianshou/issues/468
for k in list(loaded_weight):
loaded_weight["_actor_critic." + k] = loaded_weight[k]
policy.load_state_dict(loaded_weight) | null |
19,653 | from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, Generator, Iterable, Optional, OrderedDict, Tuple, cast
import gym
import numpy as np
import torch
import torch.nn as nn
from gym.spaces import Discrete
from tianshou.data import Batch, ReplayBuffer, to_torch
from tianshou.policy import BasePolicy, PPOPolicy, DQNPolicy
from qlib.rl.trainer.trainer import Trainer
def chain_dedup(*iterables: Iterable) -> Generator[Any, None, None]:
seen = set()
for iterable in iterables:
for i in iterable:
if i not in seen:
seen.add(i)
yield i | null |
19,654 | from __future__ import annotations
import collections
from types import GeneratorType
from typing import Any, Callable, cast, Dict, Generator, List, Optional, Tuple, Union
import warnings
import numpy as np
import pandas as pd
import torch
from tianshou.data import Batch
from tianshou.policy import BasePolicy
from qlib.backtest import CommonInfrastructure, Order
from qlib.backtest.decision import BaseTradeDecision, TradeDecisionWithDetails, TradeDecisionWO, TradeRange
from qlib.backtest.exchange import Exchange
from qlib.backtest.executor import BaseExecutor
from qlib.backtest.utils import LevelInfrastructure, get_start_end_idx
from qlib.constant import EPS, ONE_MIN, REG_CN
from qlib.rl.data.native import IntradayBacktestData, load_backtest_data
from qlib.rl.interpreter import ActionInterpreter, StateInterpreter
from qlib.rl.order_execution.state import SAOEMetrics, SAOEState
from qlib.rl.order_execution.utils import dataframe_append, price_advantage
from qlib.strategy.base import RLStrategy
from qlib.utils import init_instance_by_config
from qlib.utils.index_data import IndexData
from qlib.utils.time import get_day_min_idx_range
ONE_MIN = pd.Timedelta("1min")
def _get_all_timestamps(
start: pd.Timestamp,
end: pd.Timestamp,
granularity: pd.Timedelta = ONE_MIN,
include_end: bool = True,
) -> pd.DatetimeIndex:
ret = []
while start <= end:
ret.append(start)
start += granularity
if ret[-1] > end:
ret.pop()
if ret[-1] == end and not include_end:
ret.pop()
return pd.DatetimeIndex(ret) | null |
19,655 | from __future__ import annotations
import collections
from types import GeneratorType
from typing import Any, Callable, cast, Dict, Generator, List, Optional, Tuple, Union
import warnings
import numpy as np
import pandas as pd
import torch
from tianshou.data import Batch
from tianshou.policy import BasePolicy
from qlib.backtest import CommonInfrastructure, Order
from qlib.backtest.decision import BaseTradeDecision, TradeDecisionWithDetails, TradeDecisionWO, TradeRange
from qlib.backtest.exchange import Exchange
from qlib.backtest.executor import BaseExecutor
from qlib.backtest.utils import LevelInfrastructure, get_start_end_idx
from qlib.constant import EPS, ONE_MIN, REG_CN
from qlib.rl.data.native import IntradayBacktestData, load_backtest_data
from qlib.rl.interpreter import ActionInterpreter, StateInterpreter
from qlib.rl.order_execution.state import SAOEMetrics, SAOEState
from qlib.rl.order_execution.utils import dataframe_append, price_advantage
from qlib.strategy.base import RLStrategy
from qlib.utils import init_instance_by_config
from qlib.utils.index_data import IndexData
from qlib.utils.time import get_day_min_idx_range
The provided code snippet includes necessary dependencies for implementing the `fill_missing_data` function. Write a Python function `def fill_missing_data( original_data: np.ndarray, fill_method: Callable = np.nanmedian, ) -> np.ndarray` to solve the following problem:
Fill missing data. Parameters ---------- original_data Original data without missing values. fill_method Method used to fill the missing data. Returns ------- The filled data.
Here is the function:
def fill_missing_data(
original_data: np.ndarray,
fill_method: Callable = np.nanmedian,
) -> np.ndarray:
"""Fill missing data.
Parameters
----------
original_data
Original data without missing values.
fill_method
Method used to fill the missing data.
Returns
-------
The filled data.
"""
return np.nan_to_num(original_data, nan=fill_method(original_data)) | Fill missing data. Parameters ---------- original_data Original data without missing values. fill_method Method used to fill the missing data. Returns ------- The filled data. |
19,656 | from __future__ import annotations
import copy
import warnings
from contextlib import contextmanager
from typing import Any, Callable, Dict, Generator, List, Optional, Set, Tuple, Type, Union, cast
import gym
import numpy as np
from tianshou.env import BaseVectorEnv, DummyVectorEnv, ShmemVectorEnv, SubprocVectorEnv
from qlib.typehint import Literal
from .log import LogWriter
def fill_invalid(obj: int | float | bool | T) -> T:
if isinstance(obj, (int, float, bool)):
return fill_invalid(np.array(obj))
if hasattr(obj, "dtype"):
if isinstance(obj, np.ndarray):
if np.issubdtype(obj.dtype, np.floating):
return np.full_like(obj, np.nan)
return np.full_like(obj, np.iinfo(obj.dtype).max)
# dealing with corner cases that numpy number is not supported by tianshou's sharray
return fill_invalid(np.array(obj))
elif isinstance(obj, dict):
return {k: fill_invalid(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [fill_invalid(v) for v in obj]
elif isinstance(obj, tuple):
return tuple(fill_invalid(v) for v in obj)
raise ValueError(f"Unsupported value to fill with invalid: {obj}")
The provided code snippet includes necessary dependencies for implementing the `generate_nan_observation` function. Write a Python function `def generate_nan_observation(obs_space: gym.Space) -> Any` to solve the following problem:
The NaN observation that indicates the environment receives no seed. We assume that obs is complex and there must be something like float. Otherwise this logic doesn't work.
Here is the function:
def generate_nan_observation(obs_space: gym.Space) -> Any:
"""The NaN observation that indicates the environment receives no seed.
We assume that obs is complex and there must be something like float.
Otherwise this logic doesn't work.
"""
sample = obs_space.sample()
sample = fill_invalid(sample)
return sample | The NaN observation that indicates the environment receives no seed. We assume that obs is complex and there must be something like float. Otherwise this logic doesn't work. |
19,657 | from __future__ import annotations
import copy
import warnings
from contextlib import contextmanager
from typing import Any, Callable, Dict, Generator, List, Optional, Set, Tuple, Type, Union, cast
import gym
import numpy as np
from tianshou.env import BaseVectorEnv, DummyVectorEnv, ShmemVectorEnv, SubprocVectorEnv
from qlib.typehint import Literal
from .log import LogWriter
def is_invalid(arr: int | float | bool | T) -> bool:
if isinstance(arr, np.ndarray):
if np.issubdtype(arr.dtype, np.floating):
return np.isnan(arr).all()
return cast(bool, cast(np.ndarray, np.iinfo(arr.dtype).max == arr).all())
if isinstance(arr, dict):
return all(is_invalid(o) for o in arr.values())
if isinstance(arr, (list, tuple)):
return all(is_invalid(o) for o in arr)
if isinstance(arr, (int, float, bool, np.number)):
return is_invalid(np.array(arr))
return True
The provided code snippet includes necessary dependencies for implementing the `check_nan_observation` function. Write a Python function `def check_nan_observation(obs: Any) -> bool` to solve the following problem:
Check whether obs is generated by :func:`generate_nan_observation`.
Here is the function:
def check_nan_observation(obs: Any) -> bool:
"""Check whether obs is generated by :func:`generate_nan_observation`."""
return is_invalid(obs) | Check whether obs is generated by :func:`generate_nan_observation`. |
19,658 | from __future__ import annotations
import copy
import warnings
from contextlib import contextmanager
from typing import Any, Callable, Dict, Generator, List, Optional, Set, Tuple, Type, Union, cast
import gym
import numpy as np
from tianshou.env import BaseVectorEnv, DummyVectorEnv, ShmemVectorEnv, SubprocVectorEnv
from qlib.typehint import Literal
from .log import LogWriter
FiniteEnvType = Literal["dummy", "subproc", "shmem"]
class FiniteVectorEnv(BaseVectorEnv):
"""To allow the paralleled env workers consume a single DataQueue until it's exhausted.
See `tianshou issue #322 <https://github.com/thu-ml/tianshou/issues/322>`_.
The requirement is to make every possible seed (stored in :class:`qlib.rl.utils.DataQueue` in our case)
consumed by exactly one environment. This is not possible by tianshou's native VectorEnv and Collector,
because tianshou is unaware of this "exactly one" constraint, and might launch extra workers.
Consider a corner case, where concurrency is 2, but there is only one seed in DataQueue.
The reset of two workers must be both called according to the logic in collect.
The returned results of two workers are collected, regardless of what they are.
The problem is, one of the reset result must be invalid, or repeated,
because there's only one need in queue, and collector isn't aware of such situation.
Luckily, we can hack the vector env, and make a protocol between single env and vector env.
The single environment (should be :class:`qlib.rl.utils.EnvWrapper` in our case) is responsible for
reading from queue, and generate a special observation when the queue is exhausted. The special obs
is called "nan observation", because simply using none causes problems in shared-memory vector env.
:class:`FiniteVectorEnv` then read the observations from all workers, and select those non-nan
observation. It also maintains an ``_alive_env_ids`` to track which workers should never be
called again. When also the environments are exhausted, it will raise StopIteration exception.
The usage of this vector env in collector are two parts:
1. If the data queue is finite (usually when inference), collector should collect "infinity" number of
episodes, until the vector env exhausts by itself.
2. If the data queue is infinite (usually in training), collector can set number of episodes / steps.
In this case, data would be randomly ordered, and some repetitions wouldn't matter.
One extra function of this vector env is that it has a logger that explicitly collects logs
from child workers. See :class:`qlib.rl.utils.LogWriter`.
"""
_logger: list[LogWriter]
def __init__(
self, logger: LogWriter | list[LogWriter] | None, env_fns: list[Callable[..., gym.Env]], **kwargs: Any
) -> None:
super().__init__(env_fns, **kwargs)
if isinstance(logger, list):
self._logger = logger
elif isinstance(logger, LogWriter):
self._logger = [logger]
else:
self._logger = []
self._alive_env_ids: Set[int] = set()
self._reset_alive_envs()
self._default_obs = self._default_info = self._default_rew = None
self._zombie = False
self._collector_guarded: bool = False
def _reset_alive_envs(self) -> None:
if not self._alive_env_ids:
# starting or running out
self._alive_env_ids = set(range(self.env_num))
# to workaround with tianshou's buffer and batch
def _set_default_obs(self, obs: Any) -> None:
if obs is not None and self._default_obs is None:
self._default_obs = copy.deepcopy(obs)
def _set_default_info(self, info: Any) -> None:
if info is not None and self._default_info is None:
self._default_info = copy.deepcopy(info)
def _set_default_rew(self, rew: Any) -> None:
if rew is not None and self._default_rew is None:
self._default_rew = copy.deepcopy(rew)
def _get_default_obs(self) -> Any:
return copy.deepcopy(self._default_obs)
def _get_default_info(self) -> Any:
return copy.deepcopy(self._default_info)
def _get_default_rew(self) -> Any:
return copy.deepcopy(self._default_rew)
# END
def _postproc_env_obs(obs: Any) -> Optional[Any]:
# reserved for shmem vector env to restore empty observation
if obs is None or check_nan_observation(obs):
return None
return obs
def collector_guard(self) -> Generator[FiniteVectorEnv, None, None]:
"""Guard the collector. Recommended to guard every collect.
This guard is for two purposes.
1. Catch and ignore the StopIteration exception, which is the stopping signal
thrown by FiniteEnv to let tianshou know that ``collector.collect()`` should exit.
2. Notify the loggers that the collect is ready / done what it's ready / done.
Examples
--------
>>> with finite_env.collector_guard():
... collector.collect(n_episode=INF)
"""
self._collector_guarded = True
for logger in self._logger:
logger.on_env_all_ready()
try:
yield self
except StopIteration:
pass
finally:
self._collector_guarded = False
# At last trigger the loggers
for logger in self._logger:
logger.on_env_all_done()
def reset(
self,
id: int | List[int] | np.ndarray | None = None,
) -> np.ndarray:
assert not self._zombie
# Check whether it's guarded by collector_guard()
if not self._collector_guarded:
warnings.warn(
"Collector is not guarded by FiniteEnv. "
"This may cause unexpected problems, like unexpected StopIteration exception, "
"or missing logs.",
RuntimeWarning,
)
wrapped_id = self._wrap_id(id)
self._reset_alive_envs()
# ask super to reset alive envs and remap to current index
request_id = [i for i in wrapped_id if i in self._alive_env_ids]
obs = [None] * len(wrapped_id)
id2idx = {i: k for k, i in enumerate(wrapped_id)}
if request_id:
for i, o in zip(request_id, super().reset(request_id)):
obs[id2idx[i]] = self._postproc_env_obs(o)
for i, o in zip(wrapped_id, obs):
if o is None and i in self._alive_env_ids:
self._alive_env_ids.remove(i)
# logging
for i, o in zip(wrapped_id, obs):
if i in self._alive_env_ids:
for logger in self._logger:
logger.on_env_reset(i, obs)
# fill empty observation with default(fake) observation
for o in obs:
self._set_default_obs(o)
for i, o in enumerate(obs):
if o is None:
obs[i] = self._get_default_obs()
if not self._alive_env_ids:
# comment this line so that the env becomes indispensable
# self.reset()
self._zombie = True
raise StopIteration
return np.stack(obs)
def step(
self,
action: np.ndarray,
id: int | List[int] | np.ndarray | None = None,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
assert not self._zombie
wrapped_id = self._wrap_id(id)
id2idx = {i: k for k, i in enumerate(wrapped_id)}
request_id = list(filter(lambda i: i in self._alive_env_ids, wrapped_id))
result = [[None, None, False, None] for _ in range(len(wrapped_id))]
# ask super to step alive envs and remap to current index
if request_id:
valid_act = np.stack([action[id2idx[i]] for i in request_id])
for i, r in zip(request_id, zip(*super().step(valid_act, request_id))):
result[id2idx[i]] = list(r)
result[id2idx[i]][0] = self._postproc_env_obs(result[id2idx[i]][0])
# logging
for i, r in zip(wrapped_id, result):
if i in self._alive_env_ids:
for logger in self._logger:
logger.on_env_step(i, *r)
# fill empty observation/info with default(fake)
for _, r, ___, i in result:
self._set_default_info(i)
self._set_default_rew(r)
for i, r in enumerate(result):
if r[0] is None:
result[i][0] = self._get_default_obs()
if r[1] is None:
result[i][1] = self._get_default_rew()
if r[3] is None:
result[i][3] = self._get_default_info()
ret = list(map(np.stack, zip(*result)))
return cast(Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray], ret)
class FiniteDummyVectorEnv(FiniteVectorEnv, DummyVectorEnv):
pass
class FiniteSubprocVectorEnv(FiniteVectorEnv, SubprocVectorEnv):
pass
class FiniteShmemVectorEnv(FiniteVectorEnv, ShmemVectorEnv):
pass
class LogWriter(Generic[ObsType, ActType]):
"""Base class for log writers, triggered at every reset and step by finite env.
What to do with a specific log depends on the implementation of subclassing :class:`LogWriter`.
The general principle is that, it should handle logs above its loglevel (inclusive),
and discard logs that are not acceptable. For instance, console loggers obviously can't handle an image.
"""
episode_count: int
"""Counter of episodes."""
step_count: int
"""Counter of steps."""
global_step: int
"""Counter of steps. Won"t be cleared in ``clear``."""
global_episode: int
"""Counter of episodes. Won"t be cleared in ``clear``."""
active_env_ids: Set[int]
"""Active environment ids in vector env."""
episode_lengths: Dict[int, int]
"""Map from environment id to episode length."""
episode_rewards: Dict[int, List[float]]
"""Map from environment id to episode total reward."""
episode_logs: Dict[int, list]
"""Map from environment id to episode logs."""
def __init__(self, loglevel: int | LogLevel = LogLevel.PERIODIC) -> None:
self.loglevel = loglevel
self.global_step = 0
self.global_episode = 0
# Information, logs of one episode is stored here.
# This assumes that episode is not too long to fit into the memory.
self.episode_lengths = dict()
self.episode_rewards = dict()
self.episode_logs = dict()
self.clear()
def clear(self):
"""Clear all the metrics for a fresh start.
To make the logger instance reusable.
"""
self.episode_count = self.step_count = 0
self.active_env_ids = set()
def state_dict(self) -> dict:
"""Save the states of the logger to a dict."""
return {
"episode_count": self.episode_count,
"step_count": self.step_count,
"global_step": self.global_step,
"global_episode": self.global_episode,
"active_env_ids": self.active_env_ids,
"episode_lengths": self.episode_lengths,
"episode_rewards": self.episode_rewards,
"episode_logs": self.episode_logs,
}
def load_state_dict(self, state_dict: dict) -> None:
"""Load the states of current logger from a dict."""
self.episode_count = state_dict["episode_count"]
self.step_count = state_dict["step_count"]
self.global_step = state_dict["global_step"]
self.global_episode = state_dict["global_episode"]
# These are runtime infos.
# Though they are loaded, I don't think it really helps.
self.active_env_ids = state_dict["active_env_ids"]
self.episode_lengths = state_dict["episode_lengths"]
self.episode_rewards = state_dict["episode_rewards"]
self.episode_logs = state_dict["episode_logs"]
def aggregation(array: Sequence[Any], name: str | None = None) -> Any:
"""Aggregation function from step-wise to episode-wise.
If it's a sequence of float, take the mean.
Otherwise, take the first element.
If a name is specified and,
- if it's ``reward``, the reduction will be sum.
"""
assert len(array) > 0, "The aggregated array must be not empty."
if all(isinstance(v, float) for v in array):
if name == "reward":
return np.sum(array)
return np.mean(array)
else:
return array[0]
def log_episode(self, length: int, rewards: List[float], contents: List[Dict[str, Any]]) -> None:
"""This is triggered at the end of each trajectory.
Parameters
----------
length
Length of this trajectory.
rewards
A list of rewards at each step of this episode.
contents
Logged contents for every step.
"""
def log_step(self, reward: float, contents: Dict[str, Any]) -> None:
"""This is triggered at each step.
Parameters
----------
reward
Reward for this step.
contents
Logged contents for this step.
"""
def on_env_step(self, env_id: int, obs: ObsType, rew: float, done: bool, info: InfoDict) -> None:
"""Callback for finite env, on each step."""
# Update counter
self.global_step += 1
self.step_count += 1
self.active_env_ids.add(env_id)
self.episode_lengths[env_id] += 1
# TODO: reward can be a list of list for MARL
self.episode_rewards[env_id].append(rew)
values: Dict[str, Any] = {}
for key, (loglevel, value) in info["log"].items():
if loglevel >= self.loglevel: # FIXME: this is actually incorrect (see last FIXME)
values[key] = value
self.episode_logs[env_id].append(values)
self.log_step(rew, values)
if done:
# Update counter
self.global_episode += 1
self.episode_count += 1
self.log_episode(self.episode_lengths[env_id], self.episode_rewards[env_id], self.episode_logs[env_id])
def on_env_reset(self, env_id: int, _: ObsType) -> None:
"""Callback for finite env.
Reset episode statistics. Nothing task-specific is logged here because of
`a limitation of tianshou <https://github.com/thu-ml/tianshou/issues/605>`__.
"""
self.episode_lengths[env_id] = 0
self.episode_rewards[env_id] = []
self.episode_logs[env_id] = []
def on_env_all_ready(self) -> None:
"""When all environments are ready to run.
Usually, loggers should be reset here.
"""
self.clear()
def on_env_all_done(self) -> None:
"""All done. Time for cleanup."""
The provided code snippet includes necessary dependencies for implementing the `vectorize_env` function. Write a Python function `def vectorize_env( env_factory: Callable[..., gym.Env], env_type: FiniteEnvType, concurrency: int, logger: LogWriter | List[LogWriter], ) -> FiniteVectorEnv` to solve the following problem:
Helper function to create a vector env. Can be used to replace usual VectorEnv. For example, once you wrote: :: DummyVectorEnv([lambda: gym.make(task) for _ in range(env_num)]) Now you can replace it with: :: finite_env_factory(lambda: gym.make(task), "dummy", env_num, my_logger) By doing such replacement, you have two additional features enabled (compared to normal VectorEnv): 1. The vector env will check for NaN observation and kill the worker when its found. See :class:`FiniteVectorEnv` for why we need this. 2. A logger to explicit collect logs from environment workers. Parameters ---------- env_factory Callable to instantiate one single ``gym.Env``. All concurrent workers will have the same ``env_factory``. env_type dummy or subproc or shmem. Corresponding to `parallelism in tianshou <https://tianshou.readthedocs.io/en/master/api/tianshou.env.html#vectorenv>`_. concurrency Concurrent environment workers. logger Log writers. Warnings -------- Please do not use lambda expression here for ``env_factory`` as it may create incorrectly-shared instances. Don't do: :: vectorize_env(lambda: EnvWrapper(...), ...) Please do: :: def env_factory(): ... vectorize_env(env_factory, ...)
Here is the function:
def vectorize_env(
env_factory: Callable[..., gym.Env],
env_type: FiniteEnvType,
concurrency: int,
logger: LogWriter | List[LogWriter],
) -> FiniteVectorEnv:
"""Helper function to create a vector env. Can be used to replace usual VectorEnv.
For example, once you wrote: ::
DummyVectorEnv([lambda: gym.make(task) for _ in range(env_num)])
Now you can replace it with: ::
finite_env_factory(lambda: gym.make(task), "dummy", env_num, my_logger)
By doing such replacement, you have two additional features enabled (compared to normal VectorEnv):
1. The vector env will check for NaN observation and kill the worker when its found.
See :class:`FiniteVectorEnv` for why we need this.
2. A logger to explicit collect logs from environment workers.
Parameters
----------
env_factory
Callable to instantiate one single ``gym.Env``.
All concurrent workers will have the same ``env_factory``.
env_type
dummy or subproc or shmem. Corresponding to
`parallelism in tianshou <https://tianshou.readthedocs.io/en/master/api/tianshou.env.html#vectorenv>`_.
concurrency
Concurrent environment workers.
logger
Log writers.
Warnings
--------
Please do not use lambda expression here for ``env_factory`` as it may create incorrectly-shared instances.
Don't do: ::
vectorize_env(lambda: EnvWrapper(...), ...)
Please do: ::
def env_factory(): ...
vectorize_env(env_factory, ...)
"""
env_type_cls_mapping: Dict[str, Type[FiniteVectorEnv]] = {
"dummy": FiniteDummyVectorEnv,
"subproc": FiniteSubprocVectorEnv,
"shmem": FiniteShmemVectorEnv,
}
finite_env_cls = env_type_cls_mapping[env_type]
return finite_env_cls(logger, [env_factory for _ in range(concurrency)]) | Helper function to create a vector env. Can be used to replace usual VectorEnv. For example, once you wrote: :: DummyVectorEnv([lambda: gym.make(task) for _ in range(env_num)]) Now you can replace it with: :: finite_env_factory(lambda: gym.make(task), "dummy", env_num, my_logger) By doing such replacement, you have two additional features enabled (compared to normal VectorEnv): 1. The vector env will check for NaN observation and kill the worker when its found. See :class:`FiniteVectorEnv` for why we need this. 2. A logger to explicit collect logs from environment workers. Parameters ---------- env_factory Callable to instantiate one single ``gym.Env``. All concurrent workers will have the same ``env_factory``. env_type dummy or subproc or shmem. Corresponding to `parallelism in tianshou <https://tianshou.readthedocs.io/en/master/api/tianshou.env.html#vectorenv>`_. concurrency Concurrent environment workers. logger Log writers. Warnings -------- Please do not use lambda expression here for ``env_factory`` as it may create incorrectly-shared instances. Don't do: :: vectorize_env(lambda: EnvWrapper(...), ...) Please do: :: def env_factory(): ... vectorize_env(env_factory, ...) |
19,659 | from __future__ import division
from __future__ import print_function
import re
import abc
import copy
import queue
import bisect
import numpy as np
import pandas as pd
from typing import List, Union, Optional
from joblib import delayed
from .cache import H
from ..config import C
from .inst_processor import InstProcessor
from ..log import get_module_logger
from .cache import DiskDatasetCache
from ..utils import (
Wrapper,
init_instance_by_config,
register_wrapper,
get_module_by_module_path,
parse_field,
hash_args,
normalize_cache_fields,
code_to_fname,
time_to_slc_point,
read_period_data,
get_period_list,
)
from ..utils.paral import ParallelExt
from .ops import Operators
import sys
Cal: CalendarProviderWrapper = Wrapper()
Inst: InstrumentProviderWrapper = Wrapper()
FeatureD: FeatureProviderWrapper = Wrapper()
PITD: PITProviderWrapper = Wrapper()
ExpressionD: ExpressionProviderWrapper = Wrapper()
DatasetD: DatasetProviderWrapper = Wrapper()
D: BaseProviderWrapper = Wrapper()
get_module_logger = _QLibLoggerManager()
def register_wrapper(wrapper, cls_or_obj, module_path=None):
"""register_wrapper
:param wrapper: A wrapper.
:param cls_or_obj: A class or class name or object instance.
"""
if isinstance(cls_or_obj, str):
module = get_module_by_module_path(module_path)
cls_or_obj = getattr(module, cls_or_obj)
obj = cls_or_obj() if isinstance(cls_or_obj, type) else cls_or_obj
wrapper.register(obj)
The provided code snippet includes necessary dependencies for implementing the `register_all_wrappers` function. Write a Python function `def register_all_wrappers(C)` to solve the following problem:
register_all_wrappers
Here is the function:
def register_all_wrappers(C):
"""register_all_wrappers"""
logger = get_module_logger("data")
module = get_module_by_module_path("qlib.data")
_calendar_provider = init_instance_by_config(C.calendar_provider, module)
if getattr(C, "calendar_cache", None) is not None:
_calendar_provider = init_instance_by_config(C.calendar_cache, module, provide=_calendar_provider)
register_wrapper(Cal, _calendar_provider, "qlib.data")
logger.debug(f"registering Cal {C.calendar_provider}-{C.calendar_cache}")
_instrument_provider = init_instance_by_config(C.instrument_provider, module)
register_wrapper(Inst, _instrument_provider, "qlib.data")
logger.debug(f"registering Inst {C.instrument_provider}")
if getattr(C, "feature_provider", None) is not None:
feature_provider = init_instance_by_config(C.feature_provider, module)
register_wrapper(FeatureD, feature_provider, "qlib.data")
logger.debug(f"registering FeatureD {C.feature_provider}")
if getattr(C, "pit_provider", None) is not None:
pit_provider = init_instance_by_config(C.pit_provider, module)
register_wrapper(PITD, pit_provider, "qlib.data")
logger.debug(f"registering PITD {C.pit_provider}")
if getattr(C, "expression_provider", None) is not None:
# This provider is unnecessary in client provider
_eprovider = init_instance_by_config(C.expression_provider, module)
if getattr(C, "expression_cache", None) is not None:
_eprovider = init_instance_by_config(C.expression_cache, module, provider=_eprovider)
register_wrapper(ExpressionD, _eprovider, "qlib.data")
logger.debug(f"registering ExpressionD {C.expression_provider}-{C.expression_cache}")
_dprovider = init_instance_by_config(C.dataset_provider, module)
if getattr(C, "dataset_cache", None) is not None:
_dprovider = init_instance_by_config(C.dataset_cache, module, provider=_dprovider)
register_wrapper(DatasetD, _dprovider, "qlib.data")
logger.debug(f"registering DatasetD {C.dataset_provider}-{C.dataset_cache}")
register_wrapper(D, C.provider, "qlib.data")
logger.debug(f"registering D {C.provider}") | register_all_wrappers |
19,660 | from __future__ import annotations
import pandas as pd
from typing import Union, List, TYPE_CHECKING
from qlib.utils import init_instance_by_config
def get_level_index(df: pd.DataFrame, level=Union[str, int]) -> int:
"""
get the level index of `df` given `level`
Parameters
----------
df : pd.DataFrame
data
level : Union[str, int]
index level
Returns
-------
int:
The level index in the multiple index
"""
if isinstance(level, str):
try:
return df.index.names.index(level)
except (AttributeError, ValueError):
# NOTE: If level index is not given in the data, the default level index will be ('datetime', 'instrument')
return ("datetime", "instrument").index(level)
elif isinstance(level, int):
return level
else:
raise NotImplementedError(f"This type of input is not supported")
The provided code snippet includes necessary dependencies for implementing the `fetch_df_by_index` function. Write a Python function `def fetch_df_by_index( df: pd.DataFrame, selector: Union[pd.Timestamp, slice, str, list, pd.Index], level: Union[str, int], fetch_orig=True, ) -> pd.DataFrame` to solve the following problem:
fetch data from `data` with `selector` and `level` selector are assumed to be well processed. `fetch_df_by_index` is only responsible for get the right level Parameters ---------- selector : Union[pd.Timestamp, slice, str, list] selector level : Union[int, str] the level to use the selector Returns ------- Data of the given index.
Here is the function:
def fetch_df_by_index(
df: pd.DataFrame,
selector: Union[pd.Timestamp, slice, str, list, pd.Index],
level: Union[str, int],
fetch_orig=True,
) -> pd.DataFrame:
"""
fetch data from `data` with `selector` and `level`
selector are assumed to be well processed.
`fetch_df_by_index` is only responsible for get the right level
Parameters
----------
selector : Union[pd.Timestamp, slice, str, list]
selector
level : Union[int, str]
the level to use the selector
Returns
-------
Data of the given index.
"""
# level = None -> use selector directly
if level is None or isinstance(selector, pd.MultiIndex):
return df.loc(axis=0)[selector]
# Try to get the right index
idx_slc = (selector, slice(None, None))
if get_level_index(df, level) == 1:
idx_slc = idx_slc[1], idx_slc[0]
if fetch_orig:
for slc in idx_slc:
if slc != slice(None, None):
return df.loc[pd.IndexSlice[idx_slc],] # noqa: E231
else: # pylint: disable=W0120
return df
else:
return df.loc[pd.IndexSlice[idx_slc],] # noqa: E231 | fetch data from `data` with `selector` and `level` selector are assumed to be well processed. `fetch_df_by_index` is only responsible for get the right level Parameters ---------- selector : Union[pd.Timestamp, slice, str, list] selector level : Union[int, str] the level to use the selector Returns ------- Data of the given index. |
19,661 | from __future__ import annotations
import pandas as pd
from typing import Union, List, TYPE_CHECKING
from qlib.utils import init_instance_by_config
class DataHandler(Serializable):
def __init__(
self,
instruments=None,
start_time=None,
end_time=None,
data_loader: Union[dict, str, DataLoader] = None,
init_data=True,
fetch_orig=True,
):
def config(self, **kwargs):
def setup_data(self, enable_cache: bool = False):
def fetch(
self,
selector: Union[pd.Timestamp, slice, str, pd.Index] = slice(None, None),
level: Union[str, int] = "datetime",
col_set: Union[str, List[str]] = CS_ALL,
squeeze: bool = False,
proc_func: Callable = None,
) -> pd.DataFrame:
def _fetch_data(
self,
data_storage,
selector: Union[pd.Timestamp, slice, str, pd.Index] = slice(None, None),
level: Union[str, int] = "datetime",
col_set: Union[str, List[str]] = CS_ALL,
squeeze: bool = False,
proc_func: Callable = None,
):
def get_cols(self, col_set=CS_ALL) -> list:
def get_range_selector(self, cur_date: Union[pd.Timestamp, str], periods: int) -> slice:
def get_range_iterator(
self, periods: int, min_periods: Optional[int] = None, **kwargs
) -> Iterator[Tuple[pd.Timestamp, pd.DataFrame]]:
def fetch_df_by_col(df: pd.DataFrame, col_set: Union[str, List[str]]) -> pd.DataFrame:
from .handler import DataHandler # pylint: disable=C0415
if not isinstance(df.columns, pd.MultiIndex) or col_set == DataHandler.CS_RAW:
return df
elif col_set == DataHandler.CS_ALL:
return df.droplevel(axis=1, level=0)
else:
return df.loc(axis=1)[col_set] | null |
19,662 | from __future__ import annotations
import pandas as pd
from typing import Union, List, TYPE_CHECKING
from qlib.utils import init_instance_by_config
def get_level_index(df: pd.DataFrame, level=Union[str, int]) -> int:
"""
get the level index of `df` given `level`
Parameters
----------
df : pd.DataFrame
data
level : Union[str, int]
index level
Returns
-------
int:
The level index in the multiple index
"""
if isinstance(level, str):
try:
return df.index.names.index(level)
except (AttributeError, ValueError):
# NOTE: If level index is not given in the data, the default level index will be ('datetime', 'instrument')
return ("datetime", "instrument").index(level)
elif isinstance(level, int):
return level
else:
raise NotImplementedError(f"This type of input is not supported")
The provided code snippet includes necessary dependencies for implementing the `convert_index_format` function. Write a Python function `def convert_index_format(df: Union[pd.DataFrame, pd.Series], level: str = "datetime") -> Union[pd.DataFrame, pd.Series]` to solve the following problem:
Convert the format of df.MultiIndex according to the following rules: - If `level` is the first level of df.MultiIndex, do nothing - If `level` is the second level of df.MultiIndex, swap the level of index. NOTE: the number of levels of df.MultiIndex should be 2 Parameters ---------- df : Union[pd.DataFrame, pd.Series] raw DataFrame/Series level : str, optional the level that will be converted to the first one, by default "datetime" Returns ------- Union[pd.DataFrame, pd.Series] converted DataFrame/Series
Here is the function:
def convert_index_format(df: Union[pd.DataFrame, pd.Series], level: str = "datetime") -> Union[pd.DataFrame, pd.Series]:
"""
Convert the format of df.MultiIndex according to the following rules:
- If `level` is the first level of df.MultiIndex, do nothing
- If `level` is the second level of df.MultiIndex, swap the level of index.
NOTE:
the number of levels of df.MultiIndex should be 2
Parameters
----------
df : Union[pd.DataFrame, pd.Series]
raw DataFrame/Series
level : str, optional
the level that will be converted to the first one, by default "datetime"
Returns
-------
Union[pd.DataFrame, pd.Series]
converted DataFrame/Series
"""
if get_level_index(df, level=level) == 1:
df = df.swaplevel().sort_index()
return df | Convert the format of df.MultiIndex according to the following rules: - If `level` is the first level of df.MultiIndex, do nothing - If `level` is the second level of df.MultiIndex, swap the level of index. NOTE: the number of levels of df.MultiIndex should be 2 Parameters ---------- df : Union[pd.DataFrame, pd.Series] raw DataFrame/Series level : str, optional the level that will be converted to the first one, by default "datetime" Returns ------- Union[pd.DataFrame, pd.Series] converted DataFrame/Series |
19,663 | from __future__ import annotations
import pandas as pd
from typing import Union, List, TYPE_CHECKING
from qlib.utils import init_instance_by_config
class DataHandler(Serializable):
"""
The steps to using a handler
1. initialized data handler (call by `init`).
2. use the data.
The data handler try to maintain a handler with 2 level.
`datetime` & `instruments`.
Any order of the index level can be supported (The order will be implied in the data).
The order <`datetime`, `instruments`> will be used when the dataframe index name is missed.
Example of the data:
The multi-index of the columns is optional.
.. code-block:: text
feature label
$close $volume Ref($close, 1) Mean($close, 3) $high-$low LABEL0
datetime instrument
2010-01-04 SH600000 81.807068 17145150.0 83.737389 83.016739 2.741058 0.0032
SH600004 13.313329 11800983.0 13.313329 13.317701 0.183632 0.0042
SH600005 37.796539 12231662.0 38.258602 37.919757 0.970325 0.0289
Tips for improving the performance of datahandler
- Fetching data with `col_set=CS_RAW` will return the raw data and may avoid pandas from copying the data when calling `loc`
"""
_data: pd.DataFrame # underlying data.
def __init__(
self,
instruments=None,
start_time=None,
end_time=None,
data_loader: Union[dict, str, DataLoader] = None,
init_data=True,
fetch_orig=True,
):
"""
Parameters
----------
instruments :
The stock list to retrieve.
start_time :
start_time of the original data.
end_time :
end_time of the original data.
data_loader : Union[dict, str, DataLoader]
data loader to load the data.
init_data :
initialize the original data in the constructor.
fetch_orig : bool
Return the original data instead of copy if possible.
"""
# Setup data loader
assert data_loader is not None # to make start_time end_time could have None default value
# what data source to load data
self.data_loader = init_instance_by_config(
data_loader,
None if (isinstance(data_loader, dict) and "module_path" in data_loader) else data_loader_module,
accept_types=DataLoader,
)
# what data to be loaded from data source
# For IDE auto-completion.
self.instruments = instruments
self.start_time = start_time
self.end_time = end_time
self.fetch_orig = fetch_orig
if init_data:
with TimeInspector.logt("Init data"):
self.setup_data()
super().__init__()
def config(self, **kwargs):
"""
configuration of data.
# what data to be loaded from data source
This method will be used when loading pickled handler from dataset.
The data will be initialized with different time range.
"""
attr_list = {"instruments", "start_time", "end_time"}
for k, v in kwargs.items():
if k in attr_list:
setattr(self, k, v)
for attr in attr_list:
if attr in kwargs:
kwargs.pop(attr)
super().config(**kwargs)
def setup_data(self, enable_cache: bool = False):
"""
Set Up the data in case of running initialization for multiple time
It is responsible for maintaining following variable
1) self._data
Parameters
----------
enable_cache : bool
default value is false:
- if `enable_cache` == True:
the processed data will be saved on disk, and handler will load the cached data from the disk directly
when we call `init` next time
"""
# Setup data.
# _data may be with multiple column index level. The outer level indicates the feature set name
with TimeInspector.logt("Loading data"):
# make sure the fetch method is based on an index-sorted pd.DataFrame
self._data = lazy_sort_index(self.data_loader.load(self.instruments, self.start_time, self.end_time))
# TODO: cache
CS_ALL = "__all" # return all columns with single-level index column
CS_RAW = "__raw" # return raw data with multi-level index column
def fetch(
self,
selector: Union[pd.Timestamp, slice, str, pd.Index] = slice(None, None),
level: Union[str, int] = "datetime",
col_set: Union[str, List[str]] = CS_ALL,
squeeze: bool = False,
proc_func: Callable = None,
) -> pd.DataFrame:
"""
fetch data from underlying data source
Design motivation:
- providing a unified interface for underlying data.
- Potential to make the interface more friendly.
- User can improve performance when fetching data in this extra layer
Parameters
----------
selector : Union[pd.Timestamp, slice, str]
describe how to select data by index
It can be categories as following
- fetch single index
- fetch a range of index
- a slice range
- pd.Index for specific indexes
Following conflicts may occur
- Does ["20200101", "20210101"] mean selecting this slice or these two days?
- slice have higher priorities
level : Union[str, int]
which index level to select the data
col_set : Union[str, List[str]]
- if isinstance(col_set, str):
select a set of meaningful, pd.Index columns.(e.g. features, columns)
- if col_set == CS_RAW:
the raw dataset will be returned.
- if isinstance(col_set, List[str]):
select several sets of meaningful columns, the returned data has multiple levels
proc_func: Callable
- Give a hook for processing data before fetching
- An example to explain the necessity of the hook:
- A Dataset learned some processors to process data which is related to data segmentation
- It will apply them every time when preparing data.
- The learned processor require the dataframe remains the same format when fitting and applying
- However the data format will change according to the parameters.
- So the processors should be applied to the underlayer data.
squeeze : bool
whether squeeze columns and index
Returns
-------
pd.DataFrame.
"""
return self._fetch_data(
data_storage=self._data,
selector=selector,
level=level,
col_set=col_set,
squeeze=squeeze,
proc_func=proc_func,
)
def _fetch_data(
self,
data_storage,
selector: Union[pd.Timestamp, slice, str, pd.Index] = slice(None, None),
level: Union[str, int] = "datetime",
col_set: Union[str, List[str]] = CS_ALL,
squeeze: bool = False,
proc_func: Callable = None,
):
# This method is extracted for sharing in subclasses
from .storage import BaseHandlerStorage # pylint: disable=C0415
# Following conflicts may occur
# - Does [20200101", "20210101"] mean selecting this slice or these two days?
# To solve this issue
# - slice have higher priorities (except when level is none)
if isinstance(selector, (tuple, list)) and level is not None:
# when level is None, the argument will be passed in directly
# we don't have to convert it into slice
try:
selector = slice(*selector)
except ValueError:
get_module_logger("DataHandlerLP").info(f"Fail to converting to query to slice. It will used directly")
if isinstance(data_storage, pd.DataFrame):
data_df = data_storage
if proc_func is not None:
# FIXME: fetching by time first will be more friendly to `proc_func`
# Copy in case of `proc_func` changing the data inplace....
data_df = proc_func(fetch_df_by_index(data_df, selector, level, fetch_orig=self.fetch_orig).copy())
data_df = fetch_df_by_col(data_df, col_set)
else:
# Fetch column first will be more friendly to SepDataFrame
data_df = fetch_df_by_col(data_df, col_set)
data_df = fetch_df_by_index(data_df, selector, level, fetch_orig=self.fetch_orig)
elif isinstance(data_storage, BaseHandlerStorage):
if not data_storage.is_proc_func_supported():
if proc_func is not None:
raise ValueError(f"proc_func is not supported by the storage {type(data_storage)}")
data_df = data_storage.fetch(
selector=selector, level=level, col_set=col_set, fetch_orig=self.fetch_orig
)
else:
data_df = data_storage.fetch(
selector=selector, level=level, col_set=col_set, fetch_orig=self.fetch_orig, proc_func=proc_func
)
else:
raise TypeError(f"data_storage should be pd.DataFrame|HashingStockStorage, not {type(data_storage)}")
if squeeze:
# squeeze columns
data_df = data_df.squeeze()
# squeeze index
if isinstance(selector, (str, pd.Timestamp)):
data_df = data_df.reset_index(level=level, drop=True)
return data_df
def get_cols(self, col_set=CS_ALL) -> list:
"""
get the column names
Parameters
----------
col_set : str
select a set of meaningful columns.(e.g. features, columns)
Returns
-------
list:
list of column names
"""
df = self._data.head()
df = fetch_df_by_col(df, col_set)
return df.columns.to_list()
def get_range_selector(self, cur_date: Union[pd.Timestamp, str], periods: int) -> slice:
"""
get range selector by number of periods
Args:
cur_date (pd.Timestamp or str): current date
periods (int): number of periods
"""
trading_dates = self._data.index.unique(level="datetime")
cur_loc = trading_dates.get_loc(cur_date)
pre_loc = cur_loc - periods + 1
if pre_loc < 0:
warnings.warn("`periods` is too large. the first date will be returned.")
pre_loc = 0
ref_date = trading_dates[pre_loc]
return slice(ref_date, cur_date)
def get_range_iterator(
self, periods: int, min_periods: Optional[int] = None, **kwargs
) -> Iterator[Tuple[pd.Timestamp, pd.DataFrame]]:
"""
get an iterator of sliced data with given periods
Args:
periods (int): number of periods.
min_periods (int): minimum periods for sliced dataframe.
kwargs (dict): will be passed to `self.fetch`.
"""
trading_dates = self._data.index.unique(level="datetime")
if min_periods is None:
min_periods = periods
for cur_date in trading_dates[min_periods:]:
selector = self.get_range_selector(cur_date, periods)
yield cur_date, self.fetch(selector, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `init_task_handler` function. Write a Python function `def init_task_handler(task: dict) -> DataHandler` to solve the following problem:
initialize the handler part of the task **inplace** Parameters ---------- task : dict the task to be handled Returns ------- Union[DataHandler, None]: returns
Here is the function:
def init_task_handler(task: dict) -> DataHandler:
"""
initialize the handler part of the task **inplace**
Parameters
----------
task : dict
the task to be handled
Returns
-------
Union[DataHandler, None]:
returns
"""
# avoid recursive import
from .handler import DataHandler # pylint: disable=C0415
h_conf = task["dataset"]["kwargs"].get("handler")
if h_conf is not None:
handler = init_instance_by_config(h_conf, accept_types=DataHandler)
task["dataset"]["kwargs"]["handler"] = handler
return handler
else:
raise ValueError("The task does not contains a handler part.") | initialize the handler part of the task **inplace** Parameters ---------- task : dict the task to be handled Returns ------- Union[DataHandler, None]: returns |
19,664 | import abc
from typing import Union, Text, Optional
import numpy as np
import pandas as pd
from qlib.utils.data import robust_zscore, zscore
from ...constant import EPS
from .utils import fetch_df_by_index
from ...utils.serial import Serializable
from ...utils.paral import datetime_groupby_apply
from qlib.data.inst_processor import InstProcessor
from qlib.data import D
The provided code snippet includes necessary dependencies for implementing the `get_group_columns` function. Write a Python function `def get_group_columns(df: pd.DataFrame, group: Union[Text, None])` to solve the following problem:
get a group of columns from multi-index columns DataFrame Parameters ---------- df : pd.DataFrame with multi of columns. group : str the name of the feature group, i.e. the first level value of the group index.
Here is the function:
def get_group_columns(df: pd.DataFrame, group: Union[Text, None]):
"""
get a group of columns from multi-index columns DataFrame
Parameters
----------
df : pd.DataFrame
with multi of columns.
group : str
the name of the feature group, i.e. the first level value of the group index.
"""
if group is None:
return df.columns
else:
return df.columns[df.columns.get_loc(group)] | get a group of columns from multi-index columns DataFrame Parameters ---------- df : pd.DataFrame with multi of columns. group : str the name of the feature group, i.e. the first level value of the group index. |
19,665 | from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from typing import Union, List, Type
from scipy.stats import percentileofscore
from .base import Expression, ExpressionOps, Feature, PFeature
from ..log import get_module_logger
from ..utils import get_callable_kwargs
OpsList = [
ChangeInstrument,
Rolling,
Ref,
Max,
Min,
Sum,
Mean,
Std,
Var,
Skew,
Kurt,
Med,
Mad,
Slope,
Rsquare,
Resi,
Rank,
Quantile,
Count,
EMA,
WMA,
Corr,
Cov,
Delta,
Abs,
Sign,
Log,
Power,
Add,
Sub,
Mul,
Div,
Greater,
Less,
And,
Or,
Not,
Gt,
Ge,
Lt,
Le,
Eq,
Ne,
Mask,
IdxMax,
IdxMin,
If,
Feature,
PFeature,
] + [TResample]
Operators = OpsWrapper()
get_module_logger = _QLibLoggerManager()
class P(ElemOperator):
def _load_internal(self, instrument, start_index, end_index, freq):
_calendar = Cal.calendar(freq=freq)
resample_data = np.empty(end_index - start_index + 1, dtype="float32")
for cur_index in range(start_index, end_index + 1):
cur_time = _calendar[cur_index]
# To load expression accurately, more historical data are required
start_ws, end_ws = self.feature.get_extended_window_size()
if end_ws > 0:
raise ValueError(
"PIT database does not support referring to future period (e.g. expressions like `Ref('$$roewa_q', -1)` are not supported"
)
# The calculated value will always the last element, so the end_offset is zero.
try:
s = self._load_feature(instrument, -start_ws, 0, cur_time)
resample_data[cur_index - start_index] = s.iloc[-1] if len(s) > 0 else np.nan
except FileNotFoundError:
get_module_logger("base").warning(f"WARN: period data not found for {str(self)}")
return pd.Series(dtype="float32", name=str(self))
resample_series = pd.Series(
resample_data, index=pd.RangeIndex(start_index, end_index + 1), dtype="float32", name=str(self)
)
return resample_series
def _load_feature(self, instrument, start_index, end_index, cur_time):
return self.feature.load(instrument, start_index, end_index, cur_time)
def get_longest_back_rolling(self):
# The period data will collapse as a normal feature. So no extending and looking back
return 0
def get_extended_window_size(self):
# The period data will collapse as a normal feature. So no extending and looking back
return 0, 0
class PRef(P):
def __init__(self, feature, period):
super().__init__(feature)
self.period = period
def __str__(self):
return f"{super().__str__()}[{self.period}]"
def _load_feature(self, instrument, start_index, end_index, cur_time):
return self.feature.load(instrument, start_index, end_index, cur_time, self.period)
The provided code snippet includes necessary dependencies for implementing the `register_all_ops` function. Write a Python function `def register_all_ops(C)` to solve the following problem:
register all operator
Here is the function:
def register_all_ops(C):
"""register all operator"""
logger = get_module_logger("ops")
from qlib.data.pit import P, PRef # pylint: disable=C0415
Operators.reset()
Operators.register(OpsList + [P, PRef])
if getattr(C, "custom_ops", None) is not None:
Operators.register(C.custom_ops)
logger.debug("register custom operator {}".format(C.custom_ops)) | register all operator |
19,666 | import logging
import sys
import os
from pathlib import Path
import qlib
import fire
import ruamel.yaml as yaml
from qlib.config import C
from qlib.model.trainer import task_train
from qlib.utils.data import update_config
from qlib.log import get_module_logger
from qlib.utils import set_log_with_config
def workflow(config_path, experiment_name="workflow", uri_folder="mlruns"):
"""
This is a Qlib CLI entrance.
User can run the whole Quant research workflow defined by a configure file
- the code is located here ``qlib/workflow/cli.py`
User can specify a base_config file in your workflow.yml file by adding "BASE_CONFIG_PATH".
Qlib will load the configuration in BASE_CONFIG_PATH first, and the user only needs to update the custom fields
in their own workflow.yml file.
For examples:
qlib_init:
provider_uri: "~/.qlib/qlib_data/cn_data"
region: cn
BASE_CONFIG_PATH: "workflow_config_lightgbm_Alpha158_csi500.yaml"
market: csi300
"""
with open(config_path) as fp:
config = yaml.safe_load(fp)
base_config_path = config.get("BASE_CONFIG_PATH", None)
if base_config_path:
logger.info(f"Use BASE_CONFIG_PATH: {base_config_path}")
base_config_path = Path(base_config_path)
# it will find config file in absolute path and relative path
if base_config_path.exists():
path = base_config_path
else:
logger.info(
f"Can't find BASE_CONFIG_PATH base on: {Path.cwd()}, "
f"try using relative path to config path: {Path(config_path).absolute()}"
)
relative_path = Path(config_path).absolute().parent.joinpath(base_config_path)
if relative_path.exists():
path = relative_path
else:
raise FileNotFoundError(f"Can't find the BASE_CONFIG file: {base_config_path}")
with open(path) as fp:
base_config = yaml.safe_load(fp)
logger.info(f"Load BASE_CONFIG_PATH succeed: {path.resolve()}")
config = update_config(base_config, config)
# config the `sys` section
sys_config(config, config_path)
if "exp_manager" in config.get("qlib_init"):
qlib.init(**config.get("qlib_init"))
else:
exp_manager = C["exp_manager"]
exp_manager["kwargs"]["uri"] = "file:" + str(Path(os.getcwd()).resolve() / uri_folder)
qlib.init(**config.get("qlib_init"), exp_manager=exp_manager)
if "experiment_name" in config:
experiment_name = config["experiment_name"]
recorder = task_train(config.get("task"), experiment_name=experiment_name)
recorder.save_objects(config=config)
def run():
fire.Fire(workflow) | null |
19,667 | import atexit
import logging
import sys
import traceback
from ..log import get_module_logger
from . import R
from .recorder import Recorder
def experiment_exception_hook(exc_type, value, tb):
"""
End an experiment with status to be "FAILED". This exception tries to catch those uncaught exception
and end the experiment automatically.
Parameters
exc_type: Exception type
value: Exception's value
tb: Exception's traceback
"""
logger.error(f"An exception has been raised[{exc_type.__name__}: {value}].")
# Same as original format
traceback.print_tb(tb)
print(f"{exc_type.__name__}: {value}")
R.end_exp(recorder_status=Recorder.STATUS_FA)
import sys
if sys.version_info >= (3, 9):
from typing import Annotated
QlibRecorderWrapper = Annotated[QlibRecorder, RecorderWrapper]
else:
QlibRecorderWrapper = QlibRecorder
class Recorder:
"""
This is the `Recorder` class for logging the experiments. The API is designed similar to mlflow.
(The link: https://mlflow.org/docs/latest/python_api/mlflow.html)
The status of the recorder can be SCHEDULED, RUNNING, FINISHED, FAILED.
"""
# status type
STATUS_S = "SCHEDULED"
STATUS_R = "RUNNING"
STATUS_FI = "FINISHED"
STATUS_FA = "FAILED"
def __init__(self, experiment_id, name):
self.id = None
self.name = name
self.experiment_id = experiment_id
self.start_time = None
self.end_time = None
self.status = Recorder.STATUS_S
def __repr__(self):
return "{name}(info={info})".format(name=self.__class__.__name__, info=self.info)
def __str__(self):
return str(self.info)
def __hash__(self) -> int:
return hash(self.info["id"])
def info(self):
output = dict()
output["class"] = "Recorder"
output["id"] = self.id
output["name"] = self.name
output["experiment_id"] = self.experiment_id
output["start_time"] = self.start_time
output["end_time"] = self.end_time
output["status"] = self.status
return output
def set_recorder_name(self, rname):
self.recorder_name = rname
def save_objects(self, local_path=None, artifact_path=None, **kwargs):
"""
Save objects such as prediction file or model checkpoints to the artifact URI. User
can save object through keywords arguments (name:value).
Please refer to the docs of qlib.workflow:R.save_objects
Parameters
----------
local_path : str
if provided, them save the file or directory to the artifact URI.
artifact_path=None : str
the relative path for the artifact to be stored in the URI.
"""
raise NotImplementedError(f"Please implement the `save_objects` method.")
def load_object(self, name):
"""
Load objects such as prediction file or model checkpoints.
Parameters
----------
name : str
name of the file to be loaded.
Returns
-------
The saved object.
"""
raise NotImplementedError(f"Please implement the `load_object` method.")
def start_run(self):
"""
Start running or resuming the Recorder. The return value can be used as a context manager within a `with` block;
otherwise, you must call end_run() to terminate the current run. (See `ActiveRun` class in mlflow)
Returns
-------
An active running object (e.g. mlflow.ActiveRun object).
"""
raise NotImplementedError(f"Please implement the `start_run` method.")
def end_run(self):
"""
End an active Recorder.
"""
raise NotImplementedError(f"Please implement the `end_run` method.")
def log_params(self, **kwargs):
"""
Log a batch of params for the current run.
Parameters
----------
keyword arguments
key, value pair to be logged as parameters.
"""
raise NotImplementedError(f"Please implement the `log_params` method.")
def log_metrics(self, step=None, **kwargs):
"""
Log multiple metrics for the current run.
Parameters
----------
keyword arguments
key, value pair to be logged as metrics.
"""
raise NotImplementedError(f"Please implement the `log_metrics` method.")
def log_artifact(self, local_path: str, artifact_path: Optional[str] = None):
"""
Log a local file or directory as an artifact of the currently active run.
Parameters
----------
local_path : str
Path to the file to write.
artifact_path : Optional[str]
If provided, the directory in ``artifact_uri`` to write to.
"""
raise NotImplementedError(f"Please implement the `log_metrics` method.")
def set_tags(self, **kwargs):
"""
Log a batch of tags for the current run.
Parameters
----------
keyword arguments
key, value pair to be logged as tags.
"""
raise NotImplementedError(f"Please implement the `set_tags` method.")
def delete_tags(self, *keys):
"""
Delete some tags from a run.
Parameters
----------
keys : series of strs of the keys
all the name of the tag to be deleted.
"""
raise NotImplementedError(f"Please implement the `delete_tags` method.")
def list_artifacts(self, artifact_path: str = None):
"""
List all the artifacts of a recorder.
Parameters
----------
artifact_path : str
the relative path for the artifact to be stored in the URI.
Returns
-------
A list of artifacts information (name, path, etc.) that being stored.
"""
raise NotImplementedError(f"Please implement the `list_artifacts` method.")
def download_artifact(self, path: str, dst_path: Optional[str] = None) -> str:
"""
Download an artifact file or directory from a run to a local directory if applicable,
and return a local path for it.
Parameters
----------
path : str
Relative source path to the desired artifact.
dst_path : Optional[str]
Absolute path of the local filesystem destination directory to which to
download the specified artifacts. This directory must already exist.
If unspecified, the artifacts will either be downloaded to a new
uniquely-named directory on the local filesystem.
Returns
-------
str
Local path of desired artifact.
"""
raise NotImplementedError(f"Please implement the `list_artifacts` method.")
def list_metrics(self):
"""
List all the metrics of a recorder.
Returns
-------
A dictionary of metrics that being stored.
"""
raise NotImplementedError(f"Please implement the `list_metrics` method.")
def list_params(self):
"""
List all the params of a recorder.
Returns
-------
A dictionary of params that being stored.
"""
raise NotImplementedError(f"Please implement the `list_params` method.")
def list_tags(self):
"""
List all the tags of a recorder.
Returns
-------
A dictionary of tags that being stored.
"""
raise NotImplementedError(f"Please implement the `list_tags` method.")
The provided code snippet includes necessary dependencies for implementing the `experiment_exit_handler` function. Write a Python function `def experiment_exit_handler()` to solve the following problem:
Method for handling the experiment when any unusual program ending occurs. The `atexit` handler should be put in the last, since, as long as the program ends, it will be called. Thus, if any exception or user interruption occurs beforehand, we should handle them first. Once `R` is ended, another call of `R.end_exp` will not take effect. Limitations: - If pdb is used in your program, excepthook will not be triggered when it ends. The status will be finished
Here is the function:
def experiment_exit_handler():
"""
Method for handling the experiment when any unusual program ending occurs.
The `atexit` handler should be put in the last, since, as long as the program ends, it will be called.
Thus, if any exception or user interruption occurs beforehand, we should handle them first. Once `R` is
ended, another call of `R.end_exp` will not take effect.
Limitations:
- If pdb is used in your program, excepthook will not be triggered when it ends. The status will be finished
"""
sys.excepthook = experiment_exception_hook # handle uncaught exception
atexit.register(R.end_exp, recorder_status=Recorder.STATUS_FI) # will not take effect if experiment ends | Method for handling the experiment when any unusual program ending occurs. The `atexit` handler should be put in the last, since, as long as the program ends, it will be called. Thus, if any exception or user interruption occurs beforehand, we should handle them first. Once `R` is ended, another call of `R.end_exp` will not take effect. Limitations: - If pdb is used in your program, excepthook will not be triggered when it ends. The status will be finished |
19,668 | import bisect
from copy import deepcopy
import pandas as pd
from qlib.data import D
from qlib.utils import hash_args
from qlib.utils.mod import init_instance_by_config
from qlib.workflow import R
from qlib.config import C
from qlib.log import get_module_logger
from pymongo import MongoClient
from pymongo.database import Database
from typing import Union
from pathlib import Path
C = QlibConfig(_default_config)
get_module_logger = _QLibLoggerManager()
The provided code snippet includes necessary dependencies for implementing the `get_mongodb` function. Write a Python function `def get_mongodb() -> Database` to solve the following problem:
Get database in MongoDB, which means you need to declare the address and the name of a database at first. For example: Using qlib.init(): .. code-block:: python mongo_conf = { "task_url": task_url, # your MongoDB url "task_db_name": task_db_name, # database name } qlib.init(..., mongo=mongo_conf) After qlib.init(): .. code-block:: python C["mongo"] = { "task_url" : "mongodb://localhost:27017/", "task_db_name" : "rolling_db" } Returns: Database: the Database instance
Here is the function:
def get_mongodb() -> Database:
"""
Get database in MongoDB, which means you need to declare the address and the name of a database at first.
For example:
Using qlib.init():
.. code-block:: python
mongo_conf = {
"task_url": task_url, # your MongoDB url
"task_db_name": task_db_name, # database name
}
qlib.init(..., mongo=mongo_conf)
After qlib.init():
.. code-block:: python
C["mongo"] = {
"task_url" : "mongodb://localhost:27017/",
"task_db_name" : "rolling_db"
}
Returns:
Database: the Database instance
"""
try:
cfg = C["mongo"]
except KeyError:
get_module_logger("task").error("Please configure `C['mongo']` before using TaskManager")
raise
get_module_logger("task").info(f"mongo config:{cfg}")
client = MongoClient(cfg["task_url"])
return client.get_database(name=cfg["task_db_name"]) | Get database in MongoDB, which means you need to declare the address and the name of a database at first. For example: Using qlib.init(): .. code-block:: python mongo_conf = { "task_url": task_url, # your MongoDB url "task_db_name": task_db_name, # database name } qlib.init(..., mongo=mongo_conf) After qlib.init(): .. code-block:: python C["mongo"] = { "task_url" : "mongodb://localhost:27017/", "task_db_name" : "rolling_db" } Returns: Database: the Database instance |
19,669 | import bisect
from copy import deepcopy
import pandas as pd
from qlib.data import D
from qlib.utils import hash_args
from qlib.utils.mod import init_instance_by_config
from qlib.workflow import R
from qlib.config import C
from qlib.log import get_module_logger
from pymongo import MongoClient
from pymongo.database import Database
from typing import Union
from pathlib import Path
R: QlibRecorderWrapper = RecorderWrapper()
The provided code snippet includes necessary dependencies for implementing the `list_recorders` function. Write a Python function `def list_recorders(experiment, rec_filter_func=None)` to solve the following problem:
List all recorders which can pass the filter in an experiment. Args: experiment (str or Experiment): the name of an Experiment or an instance rec_filter_func (Callable, optional): return True to retain the given recorder. Defaults to None. Returns: dict: a dict {rid: recorder} after filtering.
Here is the function:
def list_recorders(experiment, rec_filter_func=None):
"""
List all recorders which can pass the filter in an experiment.
Args:
experiment (str or Experiment): the name of an Experiment or an instance
rec_filter_func (Callable, optional): return True to retain the given recorder. Defaults to None.
Returns:
dict: a dict {rid: recorder} after filtering.
"""
if isinstance(experiment, str):
experiment = R.get_exp(experiment_name=experiment)
recs = experiment.list_recorders()
recs_flt = {}
for rid, rec in recs.items():
if rec_filter_func is None or rec_filter_func(rec):
recs_flt[rid] = rec
return recs_flt | List all recorders which can pass the filter in an experiment. Args: experiment (str or Experiment): the name of an Experiment or an instance rec_filter_func (Callable, optional): return True to retain the given recorder. Defaults to None. Returns: dict: a dict {rid: recorder} after filtering. |
19,670 | import bisect
from copy import deepcopy
import pandas as pd
from qlib.data import D
from qlib.utils import hash_args
from qlib.utils.mod import init_instance_by_config
from qlib.workflow import R
from qlib.config import C
from qlib.log import get_module_logger
from pymongo import MongoClient
from pymongo.database import Database
from typing import Union
from pathlib import Path
def hash_args(*args):
# json.dumps will keep the dict keys always sorted.
string = json.dumps(args, sort_keys=True, default=str) # frozenset
return hashlib.md5(string.encode()).hexdigest()
def init_instance_by_config(
config: InstConf,
default_module=None,
accept_types: Union[type, Tuple[type]] = (),
try_kwargs: Dict = {},
**kwargs,
) -> Any:
"""
get initialized instance with config
Parameters
----------
config : InstConf
default_module : Python module
Optional. It should be a python module.
NOTE: the "module_path" will be override by `module` arguments
This function will load class from the config['module_path'] first.
If config['module_path'] doesn't exists, it will load the class from default_module.
accept_types: Union[type, Tuple[type]]
Optional. If the config is a instance of specific type, return the config directly.
This will be passed into the second parameter of isinstance.
try_kwargs: Dict
Try to pass in kwargs in `try_kwargs` when initialized the instance
If error occurred, it will fail back to initialization without try_kwargs.
Returns
-------
object:
An initialized object based on the config info
"""
if isinstance(config, accept_types):
return config
if isinstance(config, (str, Path)):
if isinstance(config, str):
# path like 'file:///<path to pickle file>/obj.pkl'
pr = urlparse(config)
if pr.scheme == "file":
pr_path = os.path.join(pr.netloc, pr.path) if bool(pr.path) else pr.netloc
with open(os.path.normpath(pr_path), "rb") as f:
return pickle.load(f)
else:
with config.open("rb") as f:
return pickle.load(f)
klass, cls_kwargs = get_callable_kwargs(config, default_module=default_module)
try:
return klass(**cls_kwargs, **try_kwargs, **kwargs)
except (TypeError,):
# TypeError for handling errors like
# 1: `XXX() got multiple values for keyword argument 'YYY'`
# 2: `XXX() got an unexpected keyword argument 'YYY'
return klass(**cls_kwargs, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `replace_task_handler_with_cache` function. Write a Python function `def replace_task_handler_with_cache(task: dict, cache_dir: Union[str, Path] = ".") -> dict` to solve the following problem:
Replace the handler in task with a cache handler. It will automatically cache the file and save it in cache_dir. >>> import qlib >>> qlib.auto_init() >>> import datetime >>> # it is simplified task >>> task = {"dataset": {"kwargs":{'handler': {'class': 'Alpha158', 'module_path': 'qlib.contrib.data.handler', 'kwargs': {'start_time': datetime.date(2008, 1, 1), 'end_time': datetime.date(2020, 8, 1), 'fit_start_time': datetime.date(2008, 1, 1), 'fit_end_time': datetime.date(2014, 12, 31), 'instruments': 'CSI300'}}}}} >>> new_task = replace_task_handler_with_cache(task) >>> print(new_task) {'dataset': {'kwargs': {'handler': 'file...Alpha158.3584f5f8b4.pkl'}}}
Here is the function:
def replace_task_handler_with_cache(task: dict, cache_dir: Union[str, Path] = ".") -> dict:
"""
Replace the handler in task with a cache handler.
It will automatically cache the file and save it in cache_dir.
>>> import qlib
>>> qlib.auto_init()
>>> import datetime
>>> # it is simplified task
>>> task = {"dataset": {"kwargs":{'handler': {'class': 'Alpha158', 'module_path': 'qlib.contrib.data.handler', 'kwargs': {'start_time': datetime.date(2008, 1, 1), 'end_time': datetime.date(2020, 8, 1), 'fit_start_time': datetime.date(2008, 1, 1), 'fit_end_time': datetime.date(2014, 12, 31), 'instruments': 'CSI300'}}}}}
>>> new_task = replace_task_handler_with_cache(task)
>>> print(new_task)
{'dataset': {'kwargs': {'handler': 'file...Alpha158.3584f5f8b4.pkl'}}}
"""
cache_dir = Path(cache_dir)
task = deepcopy(task)
handler = task["dataset"]["kwargs"]["handler"]
if isinstance(handler, dict):
hash = hash_args(handler)
h_path = cache_dir / f"{handler['class']}.{hash[:10]}.pkl"
if not h_path.exists():
h = init_instance_by_config(handler)
h.to_pickle(h_path, dump_all=True)
task["dataset"]["kwargs"]["handler"] = f"file://{h_path}"
return task | Replace the handler in task with a cache handler. It will automatically cache the file and save it in cache_dir. >>> import qlib >>> qlib.auto_init() >>> import datetime >>> # it is simplified task >>> task = {"dataset": {"kwargs":{'handler': {'class': 'Alpha158', 'module_path': 'qlib.contrib.data.handler', 'kwargs': {'start_time': datetime.date(2008, 1, 1), 'end_time': datetime.date(2020, 8, 1), 'fit_start_time': datetime.date(2008, 1, 1), 'fit_end_time': datetime.date(2014, 12, 31), 'instruments': 'CSI300'}}}}} >>> new_task = replace_task_handler_with_cache(task) >>> print(new_task) {'dataset': {'kwargs': {'handler': 'file...Alpha158.3584f5f8b4.pkl'}}} |
19,671 | import abc
import copy
import pandas as pd
from typing import Dict, List, Union, Callable
from qlib.utils import transform_end_date
from .utils import TimeAdjuster
class TaskGen(metaclass=abc.ABCMeta):
"""
The base class for generating different tasks
Example 1:
input: a specific task template and rolling steps
output: rolling version of the tasks
Example 2:
input: a specific task template and losses list
output: a set of tasks with different losses
"""
def generate(self, task: dict) -> List[dict]:
"""
Generate different tasks based on a task template
Parameters
----------
task: dict
a task template
Returns
-------
typing.List[dict]:
A list of tasks
"""
def __call__(self, *args, **kwargs):
"""
This is just a syntactic sugar for generate
"""
return self.generate(*args, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `task_generator` function. Write a Python function `def task_generator(tasks, generators) -> list` to solve the following problem:
Use a list of TaskGen and a list of task templates to generate different tasks. For examples: There are 3 task templates a,b,c and 2 TaskGen A,B. A will generates 2 tasks from a template and B will generates 3 tasks from a template. task_generator([a, b, c], [A, B]) will finally generate 3*2*3 = 18 tasks. Parameters ---------- tasks : List[dict] or dict a list of task templates or a single task generators : List[TaskGen] or TaskGen a list of TaskGen or a single TaskGen Returns ------- list a list of tasks
Here is the function:
def task_generator(tasks, generators) -> list:
"""
Use a list of TaskGen and a list of task templates to generate different tasks.
For examples:
There are 3 task templates a,b,c and 2 TaskGen A,B. A will generates 2 tasks from a template and B will generates 3 tasks from a template.
task_generator([a, b, c], [A, B]) will finally generate 3*2*3 = 18 tasks.
Parameters
----------
tasks : List[dict] or dict
a list of task templates or a single task
generators : List[TaskGen] or TaskGen
a list of TaskGen or a single TaskGen
Returns
-------
list
a list of tasks
"""
if isinstance(tasks, dict):
tasks = [tasks]
if isinstance(generators, TaskGen):
generators = [generators]
# generate gen_task_list
for gen in generators:
new_task_list = []
for task in tasks:
new_task_list.extend(gen.generate(task))
tasks = new_task_list
return tasks | Use a list of TaskGen and a list of task templates to generate different tasks. For examples: There are 3 task templates a,b,c and 2 TaskGen A,B. A will generates 2 tasks from a template and B will generates 3 tasks from a template. task_generator([a, b, c], [A, B]) will finally generate 3*2*3 = 18 tasks. Parameters ---------- tasks : List[dict] or dict a list of task templates or a single task generators : List[TaskGen] or TaskGen a list of TaskGen or a single TaskGen Returns ------- list a list of tasks |
19,672 | import abc
import copy
import pandas as pd
from typing import Dict, List, Union, Callable
from qlib.utils import transform_end_date
from .utils import TimeAdjuster
The provided code snippet includes necessary dependencies for implementing the `handler_mod` function. Write a Python function `def handler_mod(task: dict, rolling_gen)` to solve the following problem:
Help to modify the handler end time when using RollingGen It try to handle the following case - Hander's data end_time is earlier than dataset's test_data's segments. - To handle this, handler's data's end_time is extended. If the handler's end_time is None, then it is not necessary to change it's end time. Args: task (dict): a task template rg (RollingGen): an instance of RollingGen
Here is the function:
def handler_mod(task: dict, rolling_gen):
"""
Help to modify the handler end time when using RollingGen
It try to handle the following case
- Hander's data end_time is earlier than dataset's test_data's segments.
- To handle this, handler's data's end_time is extended.
If the handler's end_time is None, then it is not necessary to change it's end time.
Args:
task (dict): a task template
rg (RollingGen): an instance of RollingGen
"""
try:
interval = rolling_gen.ta.cal_interval(
task["dataset"]["kwargs"]["handler"]["kwargs"]["end_time"],
task["dataset"]["kwargs"]["segments"][rolling_gen.test_key][1],
)
# if end_time < the end of test_segments, then change end_time to allow load more data
if interval < 0:
task["dataset"]["kwargs"]["handler"]["kwargs"]["end_time"] = copy.deepcopy(
task["dataset"]["kwargs"]["segments"][rolling_gen.test_key][1]
)
except KeyError:
# Maybe dataset do not have handler, then do nothing.
pass
except TypeError:
# May be the handler is a string. `"handler.pkl"["kwargs"]` will raise TypeError
# e.g. a dumped file like file:///<file>/
pass | Help to modify the handler end time when using RollingGen It try to handle the following case - Hander's data end_time is earlier than dataset's test_data's segments. - To handle this, handler's data's end_time is extended. If the handler's end_time is None, then it is not necessary to change it's end time. Args: task (dict): a task template rg (RollingGen): an instance of RollingGen |
19,673 | import abc
import copy
import pandas as pd
from typing import Dict, List, Union, Callable
from qlib.utils import transform_end_date
from .utils import TimeAdjuster
class TimeAdjuster:
"""
Find appropriate date and adjust date.
"""
def __init__(self, future=True, end_time=None):
self._future = future
self.cals = D.calendar(future=future, end_time=end_time)
def set_end_time(self, end_time=None):
"""
Set end time. None for use calendar's end time.
Args:
end_time
"""
self.cals = D.calendar(future=self._future, end_time=end_time)
def get(self, idx: int):
"""
Get datetime by index.
Parameters
----------
idx : int
index of the calendar
"""
if idx is None or idx >= len(self.cals):
return None
return self.cals[idx]
def max(self) -> pd.Timestamp:
"""
Return the max calendar datetime
"""
return max(self.cals)
def align_idx(self, time_point, tp_type="start") -> int:
"""
Align the index of time_point in the calendar.
Parameters
----------
time_point
tp_type : str
Returns
-------
index : int
"""
if time_point is None:
# `None` indicates unbounded index/boarder
return None
time_point = pd.Timestamp(time_point)
if tp_type == "start":
idx = bisect.bisect_left(self.cals, time_point)
elif tp_type == "end":
idx = bisect.bisect_right(self.cals, time_point) - 1
else:
raise NotImplementedError(f"This type of input is not supported")
return idx
def cal_interval(self, time_point_A, time_point_B) -> int:
"""
Calculate the trading day interval (time_point_A - time_point_B)
Args:
time_point_A : time_point_A
time_point_B : time_point_B (is the past of time_point_A)
Returns:
int: the interval between A and B
"""
return self.align_idx(time_point_A) - self.align_idx(time_point_B)
def align_time(self, time_point, tp_type="start") -> pd.Timestamp:
"""
Align time_point to trade date of calendar
Args:
time_point
Time point
tp_type : str
time point type (`"start"`, `"end"`)
Returns:
pd.Timestamp
"""
if time_point is None:
return None
return self.cals[self.align_idx(time_point, tp_type=tp_type)]
def align_seg(self, segment: Union[dict, tuple]) -> Union[dict, tuple]:
"""
Align the given date to the trade date
for example:
.. code-block:: python
input: {'train': ('2008-01-01', '2014-12-31'), 'valid': ('2015-01-01', '2016-12-31'), 'test': ('2017-01-01', '2020-08-01')}
output: {'train': (Timestamp('2008-01-02 00:00:00'), Timestamp('2014-12-31 00:00:00')),
'valid': (Timestamp('2015-01-05 00:00:00'), Timestamp('2016-12-30 00:00:00')),
'test': (Timestamp('2017-01-03 00:00:00'), Timestamp('2020-07-31 00:00:00'))}
Parameters
----------
segment
Returns
-------
Union[dict, tuple]: the start and end trade date (pd.Timestamp) between the given start and end date.
"""
if isinstance(segment, dict):
return {k: self.align_seg(seg) for k, seg in segment.items()}
elif isinstance(segment, (tuple, list)):
return self.align_time(segment[0], tp_type="start"), self.align_time(segment[1], tp_type="end")
else:
raise NotImplementedError(f"This type of input is not supported")
def truncate(self, segment: tuple, test_start, days: int) -> tuple:
"""
Truncate the segment based on the test_start date
Parameters
----------
segment : tuple
time segment
test_start
days : int
The trading days to be truncated
the data in this segment may need 'days' data
`days` are based on the `test_start`.
For example, if the label contains the information of 2 days in the near future, the prediction horizon 1 day.
(e.g. the prediction target is `Ref($close, -2)/Ref($close, -1) - 1`)
the days should be 2 + 1 == 3 days.
Returns
---------
tuple: new segment
"""
test_idx = self.align_idx(test_start)
if isinstance(segment, tuple):
new_seg = []
for time_point in segment:
tp_idx = min(self.align_idx(time_point), test_idx - days)
assert tp_idx > 0
new_seg.append(self.get(tp_idx))
return tuple(new_seg)
else:
raise NotImplementedError(f"This type of input is not supported")
SHIFT_SD = "sliding"
SHIFT_EX = "expanding"
def _add_step(self, index, step):
if index is None:
return None
return index + step
def shift(self, seg: tuple, step: int, rtype=SHIFT_SD) -> tuple:
"""
Shift the datatime of segment
If there are None (which indicates unbounded index) in the segment, this method will return None.
Parameters
----------
seg :
datetime segment
step : int
rolling step
rtype : str
rolling type ("sliding" or "expanding")
Returns
--------
tuple: new segment
Raises
------
KeyError:
shift will raise error if the index(both start and end) is out of self.cal
"""
if isinstance(seg, tuple):
start_idx, end_idx = self.align_idx(seg[0], tp_type="start"), self.align_idx(seg[1], tp_type="end")
if rtype == self.SHIFT_SD:
start_idx = self._add_step(start_idx, step)
end_idx = self._add_step(end_idx, step)
elif rtype == self.SHIFT_EX:
end_idx = self._add_step(end_idx, step)
else:
raise NotImplementedError(f"This type of input is not supported")
if start_idx is not None and start_idx > len(self.cals):
raise KeyError("The segment is out of valid calendar")
return self.get(start_idx), self.get(end_idx)
else:
raise NotImplementedError(f"This type of input is not supported")
The provided code snippet includes necessary dependencies for implementing the `trunc_segments` function. Write a Python function `def trunc_segments(ta: TimeAdjuster, segments: Dict[str, pd.Timestamp], days, test_key="test")` to solve the following problem:
To avoid the leakage of future information, the segments should be truncated according to the test start_time NOTE: This function will change segments **inplace**
Here is the function:
def trunc_segments(ta: TimeAdjuster, segments: Dict[str, pd.Timestamp], days, test_key="test"):
"""
To avoid the leakage of future information, the segments should be truncated according to the test start_time
NOTE:
This function will change segments **inplace**
"""
# adjust segment
test_start = min(t for t in segments[test_key] if t is not None)
for k in list(segments.keys()):
if k != test_key:
segments[k] = ta.truncate(segments[k], test_start, days) | To avoid the leakage of future information, the segments should be truncated according to the test start_time NOTE: This function will change segments **inplace** |
19,674 | import concurrent
import pickle
import time
from contextlib import contextmanager
from typing import Callable, List
import fire
import pymongo
from bson.binary import Binary
from bson.objectid import ObjectId
from pymongo.errors import InvalidDocument
from qlib import auto_init, get_module_logger
from tqdm.cli import tqdm
from .utils import get_mongodb
from ...config import C
class TaskManager:
"""
TaskManager
Here is what will a task looks like when it created by TaskManager
.. code-block:: python
{
'def': pickle serialized task definition. using pickle will make it easier
'filter': json-like data. This is for filtering the tasks.
'status': 'waiting' | 'running' | 'done'
'res': pickle serialized task result,
}
The tasks manager assumes that you will only update the tasks you fetched.
The mongo fetch one and update will make it date updating secure.
This class can be used as a tool from commandline. Here are several examples.
You can view the help of manage module with the following commands:
python -m qlib.workflow.task.manage -h # show manual of manage module CLI
python -m qlib.workflow.task.manage wait -h # show manual of the wait command of manage
.. code-block:: shell
python -m qlib.workflow.task.manage -t <pool_name> wait
python -m qlib.workflow.task.manage -t <pool_name> task_stat
.. note::
Assumption: the data in MongoDB was encoded and the data out of MongoDB was decoded
Here are four status which are:
STATUS_WAITING: waiting for training
STATUS_RUNNING: training
STATUS_PART_DONE: finished some step and waiting for next step
STATUS_DONE: all work done
"""
STATUS_WAITING = "waiting"
STATUS_RUNNING = "running"
STATUS_DONE = "done"
STATUS_PART_DONE = "part_done"
ENCODE_FIELDS_PREFIX = ["def", "res"]
def __init__(self, task_pool: str):
"""
Init Task Manager, remember to make the statement of MongoDB url and database name firstly.
A TaskManager instance serves a specific task pool.
The static method of this module serves the whole MongoDB.
Parameters
----------
task_pool: str
the name of Collection in MongoDB
"""
self.task_pool: pymongo.collection.Collection = getattr(get_mongodb(), task_pool)
self.logger = get_module_logger(self.__class__.__name__)
self.logger.info(f"task_pool:{task_pool}")
def list() -> list:
"""
List the all collection(task_pool) of the db.
Returns:
list
"""
return get_mongodb().list_collection_names()
def _encode_task(self, task):
for prefix in self.ENCODE_FIELDS_PREFIX:
for k in list(task.keys()):
if k.startswith(prefix):
task[k] = Binary(pickle.dumps(task[k], protocol=C.dump_protocol_version))
return task
def _decode_task(self, task):
"""
_decode_task is Serialization tool.
Mongodb needs JSON, so it needs to convert Python objects into JSON objects through pickle
Parameters
----------
task : dict
task information
Returns
-------
dict
JSON required by mongodb
"""
for prefix in self.ENCODE_FIELDS_PREFIX:
for k in list(task.keys()):
if k.startswith(prefix):
task[k] = pickle.loads(task[k])
return task
def _dict_to_str(self, flt):
return {k: str(v) for k, v in flt.items()}
def _decode_query(self, query):
"""
If the query includes any `_id`, then it needs `ObjectId` to decode.
For example, when using TrainerRM, it needs query `{"_id": {"$in": _id_list}}`. Then we need to `ObjectId` every `_id` in `_id_list`.
Args:
query (dict): query dict. Defaults to {}.
Returns:
dict: the query after decoding.
"""
if "_id" in query:
if isinstance(query["_id"], dict):
for key in query["_id"]:
query["_id"][key] = [ObjectId(i) for i in query["_id"][key]]
else:
query["_id"] = ObjectId(query["_id"])
return query
def replace_task(self, task, new_task):
"""
Use a new task to replace a old one
Args:
task: old task
new_task: new task
"""
new_task = self._encode_task(new_task)
query = {"_id": ObjectId(task["_id"])}
try:
self.task_pool.replace_one(query, new_task)
except InvalidDocument:
task["filter"] = self._dict_to_str(task["filter"])
self.task_pool.replace_one(query, new_task)
def insert_task(self, task):
"""
Insert a task.
Args:
task: the task waiting for insert
Returns:
pymongo.results.InsertOneResult
"""
try:
insert_result = self.task_pool.insert_one(task)
except InvalidDocument:
task["filter"] = self._dict_to_str(task["filter"])
insert_result = self.task_pool.insert_one(task)
return insert_result
def insert_task_def(self, task_def):
"""
Insert a task to task_pool
Parameters
----------
task_def: dict
the task definition
Returns
-------
pymongo.results.InsertOneResult
"""
task = self._encode_task(
{
"def": task_def,
"filter": task_def, # FIXME: catch the raised error
"status": self.STATUS_WAITING,
}
)
insert_result = self.insert_task(task)
return insert_result
def create_task(self, task_def_l, dry_run=False, print_nt=False) -> List[str]:
"""
If the tasks in task_def_l are new, then insert new tasks into the task_pool, and record inserted_id.
If a task is not new, then just query its _id.
Parameters
----------
task_def_l: list
a list of task
dry_run: bool
if insert those new tasks to task pool
print_nt: bool
if print new task
Returns
-------
List[str]
a list of the _id of task_def_l
"""
new_tasks = []
_id_list = []
for t in task_def_l:
try:
r = self.task_pool.find_one({"filter": t})
except InvalidDocument:
r = self.task_pool.find_one({"filter": self._dict_to_str(t)})
# When r is none, it indicates that r s a new task
if r is None:
new_tasks.append(t)
if not dry_run:
insert_result = self.insert_task_def(t)
_id_list.append(insert_result.inserted_id)
else:
_id_list.append(None)
else:
_id_list.append(self._decode_task(r)["_id"])
self.logger.info(f"Total Tasks: {len(task_def_l)}, New Tasks: {len(new_tasks)}")
if print_nt: # print new task
for t in new_tasks:
print(t)
if dry_run:
return []
return _id_list
def fetch_task(self, query={}, status=STATUS_WAITING) -> dict:
"""
Use query to fetch tasks.
Args:
query (dict, optional): query dict. Defaults to {}.
status (str, optional): [description]. Defaults to STATUS_WAITING.
Returns:
dict: a task(document in collection) after decoding
"""
query = query.copy()
query = self._decode_query(query)
query.update({"status": status})
task = self.task_pool.find_one_and_update(
query, {"$set": {"status": self.STATUS_RUNNING}}, sort=[("priority", pymongo.DESCENDING)]
)
# null will be at the top after sorting when using ASCENDING, so the larger the number higher, the higher the priority
if task is None:
return None
task["status"] = self.STATUS_RUNNING
return self._decode_task(task)
def safe_fetch_task(self, query={}, status=STATUS_WAITING):
"""
Fetch task from task_pool using query with contextmanager
Parameters
----------
query: dict
the dict of query
Returns
-------
dict: a task(document in collection) after decoding
"""
task = self.fetch_task(query=query, status=status)
try:
yield task
except (Exception, KeyboardInterrupt): # KeyboardInterrupt is not a subclass of Exception
if task is not None:
self.logger.info("Returning task before raising error")
self.return_task(task, status=status) # return task as the original status
self.logger.info("Task returned")
raise
def task_fetcher_iter(self, query={}):
while True:
with self.safe_fetch_task(query=query) as task:
if task is None:
break
yield task
def query(self, query={}, decode=True):
"""
Query task in collection.
This function may raise exception `pymongo.errors.CursorNotFound: cursor id not found` if it takes too long to iterate the generator
python -m qlib.workflow.task.manage -t <your task pool> query '{"_id": "615498be837d0053acbc5d58"}'
Parameters
----------
query: dict
the dict of query
decode: bool
Returns
-------
dict: a task(document in collection) after decoding
"""
query = query.copy()
query = self._decode_query(query)
for t in self.task_pool.find(query):
yield self._decode_task(t)
def re_query(self, _id) -> dict:
"""
Use _id to query task.
Args:
_id (str): _id of a document
Returns:
dict: a task(document in collection) after decoding
"""
t = self.task_pool.find_one({"_id": ObjectId(_id)})
return self._decode_task(t)
def commit_task_res(self, task, res, status=STATUS_DONE):
"""
Commit the result to task['res'].
Args:
task ([type]): [description]
res (object): the result you want to save
status (str, optional): STATUS_WAITING, STATUS_RUNNING, STATUS_DONE, STATUS_PART_DONE. Defaults to STATUS_DONE.
"""
# A workaround to use the class attribute.
if status is None:
status = TaskManager.STATUS_DONE
self.task_pool.update_one(
{"_id": task["_id"]},
{"$set": {"status": status, "res": Binary(pickle.dumps(res, protocol=C.dump_protocol_version))}},
)
def return_task(self, task, status=STATUS_WAITING):
"""
Return a task to status. Always using in error handling.
Args:
task ([type]): [description]
status (str, optional): STATUS_WAITING, STATUS_RUNNING, STATUS_DONE, STATUS_PART_DONE. Defaults to STATUS_WAITING.
"""
if status is None:
status = TaskManager.STATUS_WAITING
update_dict = {"$set": {"status": status}}
self.task_pool.update_one({"_id": task["_id"]}, update_dict)
def remove(self, query={}):
"""
Remove the task using query
Parameters
----------
query: dict
the dict of query
"""
query = query.copy()
query = self._decode_query(query)
self.task_pool.delete_many(query)
def task_stat(self, query={}) -> dict:
"""
Count the tasks in every status.
Args:
query (dict, optional): the query dict. Defaults to {}.
Returns:
dict
"""
query = query.copy()
query = self._decode_query(query)
tasks = self.query(query=query, decode=False)
status_stat = {}
for t in tasks:
status_stat[t["status"]] = status_stat.get(t["status"], 0) + 1
return status_stat
def reset_waiting(self, query={}):
"""
Reset all running task into waiting status. Can be used when some running task exit unexpected.
Args:
query (dict, optional): the query dict. Defaults to {}.
"""
query = query.copy()
# default query
if "status" not in query:
query["status"] = self.STATUS_RUNNING
return self.reset_status(query=query, status=self.STATUS_WAITING)
def reset_status(self, query, status):
query = query.copy()
query = self._decode_query(query)
print(self.task_pool.update_many(query, {"$set": {"status": status}}))
def prioritize(self, task, priority: int):
"""
Set priority for task
Parameters
----------
task : dict
The task query from the database
priority : int
the target priority
"""
update_dict = {"$set": {"priority": priority}}
self.task_pool.update_one({"_id": task["_id"]}, update_dict)
def _get_undone_n(self, task_stat):
return (
task_stat.get(self.STATUS_WAITING, 0)
+ task_stat.get(self.STATUS_RUNNING, 0)
+ task_stat.get(self.STATUS_PART_DONE, 0)
)
def _get_total(self, task_stat):
return sum(task_stat.values())
def wait(self, query={}):
"""
When multiprocessing, the main progress may fetch nothing from TaskManager because there are still some running tasks.
So main progress should wait until all tasks are trained well by other progress or machines.
Args:
query (dict, optional): the query dict. Defaults to {}.
"""
task_stat = self.task_stat(query)
total = self._get_total(task_stat)
last_undone_n = self._get_undone_n(task_stat)
if last_undone_n == 0:
return
self.logger.warning(f"Waiting for {last_undone_n} undone tasks. Please make sure they are running.")
with tqdm(total=total, initial=total - last_undone_n) as pbar:
while True:
time.sleep(10)
undone_n = self._get_undone_n(self.task_stat(query))
pbar.update(last_undone_n - undone_n)
last_undone_n = undone_n
if undone_n == 0:
break
def __str__(self):
return f"TaskManager({self.task_pool})"
The provided code snippet includes necessary dependencies for implementing the `run_task` function. Write a Python function `def run_task( task_func: Callable, task_pool: str, query: dict = {}, force_release: bool = False, before_status: str = TaskManager.STATUS_WAITING, after_status: str = TaskManager.STATUS_DONE, **kwargs, )` to solve the following problem:
r""" While the task pool is not empty (has WAITING tasks), use task_func to fetch and run tasks in task_pool After running this method, here are 4 situations (before_status -> after_status): STATUS_WAITING -> STATUS_DONE: use task["def"] as `task_func` param, it means that the task has not been started STATUS_WAITING -> STATUS_PART_DONE: use task["def"] as `task_func` param STATUS_PART_DONE -> STATUS_PART_DONE: use task["res"] as `task_func` param, it means that the task has been started but not completed STATUS_PART_DONE -> STATUS_DONE: use task["res"] as `task_func` param Parameters ---------- task_func : Callable def (task_def, \**kwargs) -> <res which will be committed> the function to run the task task_pool : str the name of the task pool (Collection in MongoDB) query: dict will use this dict to query task_pool when fetching task force_release : bool will the program force to release the resource before_status : str: the tasks in before_status will be fetched and trained. Can be STATUS_WAITING, STATUS_PART_DONE. after_status : str: the tasks after trained will become after_status. Can be STATUS_WAITING, STATUS_PART_DONE. kwargs the params for `task_func`
Here is the function:
def run_task(
task_func: Callable,
task_pool: str,
query: dict = {},
force_release: bool = False,
before_status: str = TaskManager.STATUS_WAITING,
after_status: str = TaskManager.STATUS_DONE,
**kwargs,
):
r"""
While the task pool is not empty (has WAITING tasks), use task_func to fetch and run tasks in task_pool
After running this method, here are 4 situations (before_status -> after_status):
STATUS_WAITING -> STATUS_DONE: use task["def"] as `task_func` param, it means that the task has not been started
STATUS_WAITING -> STATUS_PART_DONE: use task["def"] as `task_func` param
STATUS_PART_DONE -> STATUS_PART_DONE: use task["res"] as `task_func` param, it means that the task has been started but not completed
STATUS_PART_DONE -> STATUS_DONE: use task["res"] as `task_func` param
Parameters
----------
task_func : Callable
def (task_def, \**kwargs) -> <res which will be committed>
the function to run the task
task_pool : str
the name of the task pool (Collection in MongoDB)
query: dict
will use this dict to query task_pool when fetching task
force_release : bool
will the program force to release the resource
before_status : str:
the tasks in before_status will be fetched and trained. Can be STATUS_WAITING, STATUS_PART_DONE.
after_status : str:
the tasks after trained will become after_status. Can be STATUS_WAITING, STATUS_PART_DONE.
kwargs
the params for `task_func`
"""
tm = TaskManager(task_pool)
ever_run = False
while True:
with tm.safe_fetch_task(status=before_status, query=query) as task:
if task is None:
break
get_module_logger("run_task").info(task["def"])
# when fetching `WAITING` task, use task["def"] to train
if before_status == TaskManager.STATUS_WAITING:
param = task["def"]
# when fetching `PART_DONE` task, use task["res"] to train because the middle result has been saved to task["res"]
elif before_status == TaskManager.STATUS_PART_DONE:
param = task["res"]
else:
raise ValueError("The fetched task must be `STATUS_WAITING` or `STATUS_PART_DONE`!")
if force_release:
with concurrent.futures.ProcessPoolExecutor(max_workers=1) as executor:
res = executor.submit(task_func, param, **kwargs).result()
else:
res = task_func(param, **kwargs)
tm.commit_task_res(task, res, status=after_status)
ever_run = True
return ever_run | r""" While the task pool is not empty (has WAITING tasks), use task_func to fetch and run tasks in task_pool After running this method, here are 4 situations (before_status -> after_status): STATUS_WAITING -> STATUS_DONE: use task["def"] as `task_func` param, it means that the task has not been started STATUS_WAITING -> STATUS_PART_DONE: use task["def"] as `task_func` param STATUS_PART_DONE -> STATUS_PART_DONE: use task["res"] as `task_func` param, it means that the task has been started but not completed STATUS_PART_DONE -> STATUS_DONE: use task["res"] as `task_func` param Parameters ---------- task_func : Callable def (task_def, \**kwargs) -> <res which will be committed> the function to run the task task_pool : str the name of the task pool (Collection in MongoDB) query: dict will use this dict to query task_pool when fetching task force_release : bool will the program force to release the resource before_status : str: the tasks in before_status will be fetched and trained. Can be STATUS_WAITING, STATUS_PART_DONE. after_status : str: the tasks after trained will become after_status. Can be STATUS_WAITING, STATUS_PART_DONE. kwargs the params for `task_func` |
19,675 | from abc import ABCMeta, abstractmethod
from typing import Optional
import pandas as pd
from qlib import get_module_logger
from qlib.data import D
from qlib.data.dataset import Dataset, DatasetH, TSDatasetH
from qlib.data.dataset.handler import DataHandlerLP
from qlib.model import Model
from qlib.utils import get_date_by_shift
from qlib.workflow.recorder import Recorder
from qlib.workflow.record_temp import SignalRecord
def _replace_range(data, new_data):
dates = new_data.index.get_level_values("datetime")
data = data.sort_index()
data = data.drop(data.loc[dates.min() : dates.max()].index)
cb_data = pd.concat([data, new_data], axis=0)
cb_data = cb_data[~cb_data.index.duplicated(keep="last")].sort_index()
return cb_data | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.