File size: 6,298 Bytes
fce236e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 |
import yaml
import os
import logging
import pandas as pd
from logger import setup_logger
# Import modules
from data_loader import DataLoader
from pair_selector import PairSelector
from kalman_hedge import KalmanHedge
from signal_generator import SignalGenerator
from backtester import Backtester
from risk_engine import RiskEngine
from portfolio_optimizer import PortfolioOptimizer
# Set up root logger
logger = setup_logger("PairTradingStrategy")
def load_config(path: str) -> dict:
with open(path, "r") as f:
cfg = yaml.safe_load(f)
return cfg
def calc_portfolio_metrics(returns):
returns = returns.fillna(0)
ann_return = (1 + returns).prod() ** (252 / len(returns)) - 1
ann_vol = returns.std() * (252 ** 0.5)
sharpe = ann_return / ann_vol if ann_vol != 0 else float("nan")
cum = (1 + returns).cumprod()
peak = cum.cummax()
drawdown = (cum - peak) / peak
max_dd = drawdown.min()
return {
"annual_return": ann_return,
"annual_vol": ann_vol,
"sharpe": sharpe,
"max_drawdown": max_dd,
}
def main():
# 1) Load configuration
config_path = os.path.join(os.path.dirname(__file__), "../config.yaml")
cfg = load_config(config_path)
logger.info("Configuration loaded.")
# 2) Fetch data
data_cfg = cfg["data"]
dl = DataLoader(
tickers=data_cfg["tickers"],
start_date=data_cfg["start_date"],
end_date=data_cfg["end_date"],
interval=data_cfg["interval"]
)
prices, volume = dl.fetch_data()
# 3) Select pairs
ps_cfg = cfg["pair_selector"]
pair_selector = PairSelector(
prices=prices,
cluster_size=ps_cfg["cluster_size"],
coint_pval_threshold=ps_cfg["coint_pval_threshold"],
rolling_window=ps_cfg["rolling_window"],
rolling_step=ps_cfg["rolling_step"],
min_valid_periods=ps_cfg["min_valid_periods"]
)
pairs_df = pair_selector.select_pairs()
if pairs_df.empty:
logger.error("No suitable pairs found. Exiting.")
return
logger.info(f"Number of selected pairs: {len(pairs_df)}")
# 4) For each selected pair, run Kalman hedge, generate signals, backtest
all_pair_returns = {}
results_summary = []
for idx, row in pairs_df.iterrows():
t1 = row["ticker1"]
t2 = row["ticker2"]
logger.info(f"Processing pair {t1}-{t2}.")
s1 = prices[t1]
s2 = prices[t2]
# 4a) Kalman hedge
km_cfg = cfg["kalman"]
kh = KalmanHedge(
observation_series=s1,
control_series=s2,
initial_state_cov=km_cfg["initial_state_cov"],
transition_cov=km_cfg["transition_cov"],
observation_cov=km_cfg["observation_cov"],
em_iterations=km_cfg["em_iterations"]
)
kalman_df = kh.run_filter()
# 4b) Signal generation
sig_cfg = cfg["signal"]
sg = SignalGenerator(
price1=s1,
price2=s2,
kalman_df=kalman_df,
config=sig_cfg
)
trade_df = sg.generate(costs=cfg["costs"], volume=volume[[t1, t2]])
# 4c) Backtest
bt = Backtester(
trade_df=trade_df,
costs=cfg["costs"],
volume=volume[[t1, t2]],
ticker1=t1,
ticker2=t2,
)
bt_results = bt.run()
metrics = bt.performance_metrics(bt_results)
logger.info(f"Pair {t1}-{t2} metrics: {metrics}")
# Store returns series for portfolio optimization
all_pair_returns[f"{t1}/{t2}"] = bt_results["strategy_return"]
# Summarize
results_summary.append({
"pair": f"{t1}/{t2}",
**metrics,
"half_life": row["half_life"]
})
# 5) Aggregate pair returns into DataFrame
pair_returns_df = (
pd.DataFrame(all_pair_returns)
.dropna(how="all")
)
# 6) Save pair_summary.csv
summary_df = pd.DataFrame(results_summary)
output_dir = os.path.join(os.path.dirname(__file__), "../output")
os.makedirs(output_dir, exist_ok=True)
summary_path = os.path.join(output_dir, "pair_summary.csv")
summary_df.to_csv(summary_path, index=False)
logger.info(f"Saved pair summary to {summary_path}.")
# ====== 选取 Sharpe > 0 的配对 ======
selected_pairs = summary_df[summary_df["sharpe"] > 0]["pair"].tolist()
if not selected_pairs:
logger.warning("No pairs with Sharpe > 0 were found. Portfolio will not be constructed.")
return
pair_returns_df_selected = pair_returns_df[selected_pairs]
logger.info(f"Selected pairs with Sharpe > 0: {selected_pairs}")
# 7) Portfolio optimization只用Sharpe>0的pair
port_cfg = cfg["portfolio"]
po = PortfolioOptimizer(
pair_returns=pair_returns_df_selected,
min_weight=port_cfg["min_weight"],
max_weight=port_cfg["max_weight"]
)
weights = po.min_variance()
# 8) Compute portfolio P&L
portfolio_ret = (pair_returns_df_selected * weights).sum(axis=1)
re = RiskEngine(returns=portfolio_ret, config=cfg["risk"])
var_h = re.historical_var()
var_p = re.parametric_var()
max_dd = re.max_drawdown()
logger.info(f"Portfolio VaR (hist) = {var_h:.4%}, (param) = {var_p:.4%}, max DD = {max_dd:.4%}")
# 9) 计算并输出整体组合绩效
portfolio_metrics = calc_portfolio_metrics(portfolio_ret)
logger.info(
f"Portfolio annual_return={portfolio_metrics['annual_return']:.4%}, "
f"annual_vol={portfolio_metrics['annual_vol']:.4%}, "
f"sharpe={portfolio_metrics['sharpe']:.2f}, "
f"max_drawdown={portfolio_metrics['max_drawdown']:.2%}"
)
# 10) 保存 portfolio_weights.csv 和 portfolio_metrics.csv
weights_path = os.path.join(output_dir, "portfolio_weights.csv")
weights.to_csv(weights_path, header=True)
logger.info(f"Saved portfolio weights to {weights_path}.")
pd.DataFrame([portfolio_metrics]).to_csv(
os.path.join(output_dir, "portfolio_metrics.csv"),
index=False
)
logger.info(f"Saved portfolio metrics to {os.path.join(output_dir, 'portfolio_metrics.csv')}.")
logger.info("Backtest pipeline completed successfully.")
if __name__ == "__main__":
main()
|