| """Unified Pipeline - Orchestrates all AlphaForge components end-to-end. |
| |
| INPUT: Market data (OHLCV), news feed, macro data, options chain |
| OUTPUT: Portfolio weights, risk metrics, PnL, dashboards |
| |
| This is the central brain β one class to rule them all. |
| """ |
| import numpy as np; import pandas as pd; import torch; import json; import os |
| from typing import Dict, List, Optional, Tuple, Any |
| from datetime import datetime, timedelta |
| import warnings; warnings.filterwarnings('ignore') |
|
|
| class AlphaForgePipeline: |
| """Production-grade unified pipeline: data β alpha β risk β weights β backtest.""" |
|
|
| def __init__(self, config: Optional[Dict] = None): |
| self.config = config or self.default_config() |
| self._init_components() |
| self.state = {'pnl': [], 'weights': [], 'alerts': [], 'signals': {}, 'regime': 'neutral'} |
|
|
| |
| @staticmethod |
| def default_config() -> Dict: |
| return { |
| 'tickers': ['SPY','QQQ','AAPL','MSFT','GOOGL','AMZN','META','NVDA','TSLA','JPM','V','WMT','XLF','XLK','XLE'], |
| 'lookback': 60, 'horizon': 5, |
| 'rebalance_freq': 'W', |
| 'alpha': {'lstm_hidden':128,'lstm_layers':2,'trans_d_model':128,'trans_nhead':4,'xgb_depth':6,'xgb_lr':0.05,'xgb_estimators':200, |
| 'ensemble_weights':{'lstm':0.3,'transformer':0.3,'xgboost':0.4},'epochs':50,'device':'cpu'}, |
| 'sentiment': {'enabled':True,'model':'ProsusAI/finbert','weight':0.3,'window':5}, |
| 'volatility': {'garch_p':1,'garch_q':1,'garch_dist':'t','lstm_hidden':64}, |
| 'portfolio': {'max_weight':0.25,'risk_aversion':2.0,'transaction_cost':0.0003,'target_return':None}, |
| 'risk': {'var_conf':[0.95,0.99],'max_drawdown_threshold':-0.10,'scaling_factor':2.0}, |
| 'online': {'enable_drift_detection':True,'adaptation_window':21,'drift_threshold':0.3}, |
| 'advanced_features': True, |
| 'include_macro': True, |
| 'include_sentiment': True, |
| } |
|
|
| def _init_components(self): |
| """Lazy-init all model components.""" |
| self._alpha_model = None |
| self._sentiment_model = None |
| self._vol_engine = None |
| self._optimizer = None |
| self._risk_engine = None |
| self._feature_engine = None |
|
|
| |
| def fetch_market_data(self, start: str, end: str) -> Dict[str, pd.DataFrame]: |
| """Fetch and preprocess market data.""" |
| from market_data import MarketDataPipeline |
| pipeline = MarketDataPipeline(self.config['tickers'], start, end) |
| return pipeline.fetch_data() |
|
|
| def build_features(self, data: Dict[str, pd.DataFrame]) -> pd.DataFrame: |
| """Build advanced feature matrix (90+ cols).""" |
| if self.config['advanced_features']: |
| from advanced_features_part1 import MicrostructureFeatures, CrossSectionalFeatures |
| from macro_features import MacroFeatures |
| from regime_features import RegimeFeatures |
| from technical_indicators import AdvancedTechnical |
| all_features = [] |
| for ticker, df in data.items(): |
| close = np.array(df['Close']).flatten(); high = np.array(df['High']).flatten() |
| low = np.array(df['Low']).flatten(); vol = np.array(df['Volume']).flatten() |
| cs = pd.Series(close, index=df.index); hs = pd.Series(high, index=df.index) |
| ls = pd.Series(low, index=df.index); vs = pd.Series(vol, index=df.index) |
| f = pd.DataFrame(index=df.index) |
| f['ticker'] = ticker; f['close'] = close |
| for col_df in [ |
| MicrostructureFeatures.compute_all(cs,hs,ls,vs), |
| RegimeFeatures.volatility_regime(cs.pct_change().fillna(0)), |
| RegimeFeatures.liquidity_regime(vs,cs), |
| RegimeFeatures.trend_regime(cs), |
| AdvancedTechnical.ichimoku(cs,hs,ls), |
| AdvancedTechnical.supertrend(cs,hs,ls), |
| AdvancedTechnical.keltner_channels(cs,hs,ls), |
| AdvancedTechnical.volume_profile(cs,vs,hs,ls), |
| ]: |
| for c in col_df.columns: f[c] = col_df[c].values |
| all_features.append(f) |
| features_df = pd.concat(all_features, axis=0) |
| if self.config['include_macro']: |
| macro = MacroFeatures._synthetic_macro(str(features_df.index[0])[:10], str(features_df.index[-1])[:10]) |
| for c in macro.columns: features_df[f'macro_{c}'] = macro[c].reindex(features_df.index).ffill() |
| |
| nc = [c for c in features_df.columns if c not in ['ticker','close']] |
| for ticker in features_df['ticker'].unique(): |
| m = features_df['ticker'] == ticker |
| for col in nc: |
| s = features_df.loc[m, col]; rm = s.rolling(42).mean(); rs = s.rolling(42).std().replace(0,1) |
| features_df.loc[m, col] = (s - rm) / rs |
| return features_df.replace([np.inf, -np.inf], 0).fillna(0) |
| else: |
| from market_data import MarketDataPipeline |
| return MarketDataPipeline(self.config['tickers'], '', '').create_feature_matrix() |
|
|
| |
| def train_alpha(self, X: np.ndarray, y: np.ndarray, X_val=None, y_val=None) -> Dict: |
| """Train the alpha model ensemble.""" |
| from alpha_model import AlphaEnsemble |
| ac = self.config['alpha'] |
| self._alpha_model = AlphaEnsemble( |
| input_size=X.shape[2], seq_len=X.shape[1], |
| lstm_hidden=ac['lstm_hidden'], lstm_layers=ac['lstm_layers'], |
| trans_d_model=ac['trans_d_model'], trans_nhead=ac['trans_nhead'], |
| xgb_depth=ac['xgb_depth'], xgb_lr=ac['xgb_lr'], xgb_estimators=ac['xgb_estimators'], |
| weights=ac['ensemble_weights'], device=ac['device'] |
| ) |
| return self._alpha_model.fit(X, y, X_val, y_val, epochs=ac['epochs']) |
|
|
| def predict_alpha(self, X: np.ndarray) -> np.ndarray: |
| """Generate alpha predictions.""" |
| if self._alpha_model is None: |
| raise RuntimeError("Alpha model not trained") |
| return self._alpha_model.predict(X) |
|
|
| |
| def optimize_portfolio(self, mu: np.ndarray, Sigma: np.ndarray, |
| current_weights: Optional[np.ndarray] = None) -> Dict: |
| """Optimize portfolio weights.""" |
| from portfolio_optimizer import PortfolioOptimizer |
| pc = self.config['portfolio'] |
| opt = PortfolioOptimizer( |
| max_weight=pc['max_weight'], risk_aversion=pc['risk_aversion'], |
| transaction_cost=pc['transaction_cost'] |
| ) |
| return opt.optimize_max_sharpe(mu, Sigma, current_weights) |
|
|
| |
| def compute_risk_metrics(self, returns: np.ndarray, weights: np.ndarray, |
| returns_df: pd.DataFrame) -> Dict: |
| """Compute comprehensive risk metrics.""" |
| from risk_engine import RiskEngine |
| rc = self.config['risk'] |
| risk = RiskEngine(confidence_levels=rc['var_conf']) |
| port_ret = returns_df.dot(weights) if returns_df.shape[1] == len(weights) else np.dot(returns, weights) |
| return { |
| **risk.compute_all_var(port_ret.values if hasattr(port_ret,'values') else port_ret), |
| **risk.compute_tail_risk(port_ret.values if hasattr(port_ret,'values') else port_ret), |
| 'portfolio_var': risk.portfolio_var(weights, returns_df, 'parametric', 0.95) |
| } |
|
|
| |
| def run(self, start: str, end: str, mode: str = 'backtest') -> Dict[str, Any]: |
| """Run full pipeline end-to-end.""" |
| print(f"π AlphaForge Pipeline: {start} β {end}") |
|
|
| |
| data = self.fetch_market_data(start, end) |
| features = self.build_features(data) |
|
|
| |
| from market_data import MarketDataPipeline |
| pipeline = MarketDataPipeline(self.config['tickers'], start, end) |
| X, y, tickers, dates = pipeline.create_sequences(features, self.config['lookback'], self.config['horizon']) |
| n = len(X) |
| X_train, y_train = X[:int(n*0.7)], y[:int(n*0.7)] |
| X_test, y_test = X[int(n*0.85):], y[int(n*0.85):] |
|
|
| |
| self.train_alpha(X_train, y_train) |
| alpha_pred = self.predict_alpha(X_test) |
| from backtest_engine import compute_information_coefficient |
| ic = compute_information_coefficient(pd.Series(alpha_pred), pd.Series(y_test), by_date=False) |
|
|
| |
| from volatility_model import VolatilityEngine |
| vc = self.config['volatility'] |
| vol_engine = VolatilityEngine(garch_p=vc['garch_p'], garch_q=vc['garch_q'], garch_dist=vc['garch_dist']) |
| returns_dict = {} |
| for t in self.config['tickers']: |
| if t in data: |
| c = np.array(data[t]['Close']).flatten() |
| returns_dict[t] = pd.Series(np.log(c[1:]/c[:-1]), index=data[t].index[1:]) |
| returns_df = pd.DataFrame(returns_dict).fillna(0) |
|
|
| |
| pred_df = pd.DataFrame({'date': dates[int(n*0.85):], 'ticker': tickers[int(n*0.85):], |
| 'predicted_return': alpha_pred, 'actual_return': y_test}) |
| test_dates = sorted(pd.to_datetime(pred_df['date'].unique())) |
| weights_history = [] |
| for rd in test_dates[::5]: |
| dp = pred_df[pred_df['date'] == rd] |
| if len(dp) < 3: continue |
| mu = dp.set_index('ticker')['predicted_return'].reindex(self.config['tickers']).fillna(0).values |
| try: |
| cov = vol_engine.build_covariance_matrix(returns_df, rd) |
| cov = cov.reindex(index=self.config['tickers'], columns=self.config['tickers']).fillna(0).values |
| except: cov = np.eye(len(self.config['tickers'])) * 0.04 |
| result = self.optimize_portfolio(mu, cov) |
| weights_history.append(pd.Series(result['weights'], index=self.config['tickers'], name=rd)) |
|
|
| if not weights_history: |
| return {'error': 'No valid rebalance dates'} |
|
|
| weights_df = pd.DataFrame(weights_history) |
|
|
| |
| from backtest_engine import BacktestEngine, RegimeDetector |
| bt = BacktestEngine(initial_capital=1_000_000) |
| bt_returns = returns_df.reindex(weights_df.index).fillna(0) |
| metrics = bt.run_backtest(bt_returns, weights_df, rebalance_dates=weights_df.index) |
|
|
| |
| risk = self.compute_risk_metrics(np.array(bt.returns_history), weights_df.iloc[-1].values, |
| bt_returns) |
|
|
| |
| if 'SPY' in returns_df.columns: |
| rdet = RegimeDetector() |
| spy_r = returns_df['SPY'].reindex(weights_df.index).fillna(0) |
| rdet.detect_regimes(spy_r) |
| regime_stats = rdet.get_regime_stats(spy_r) |
|
|
| return { |
| 'metrics': metrics, |
| 'ic': ic, |
| 'risk': risk, |
| 'regime_stats': regime_stats.to_dict() if 'regime_stats' in dir() else None, |
| 'weights': weights_df.tail(10).to_dict(), |
| 'n_signals': len(alpha_pred), |
| 'feature_count': X.shape[2], |
| } |
|
|
|
|
| |
|
|
| class HyperparameterSweeper: |
| """Grid search over alpha model hyperparameters.""" |
|
|
| def __init__(self, config_grid: Dict[str, List]): |
| self.grid = config_grid |
| self.results = [] |
|
|
| def run(self, X: np.ndarray, y: np.ndarray, n_splits: int = 3) -> pd.DataFrame: |
| from itertools import product |
| keys = list(self.grid.keys()) |
| combos = list(product(*self.grid.values())) |
| print(f"π§Ή Sweeping {len(combos)} hyperparameter combinations...") |
|
|
| for i, combo in enumerate(combos): |
| params = dict(zip(keys, combo)) |
| print(f" [{i+1}/{len(combos)}] {params}") |
|
|
| |
| from alpha_model import AlphaEnsemble |
| n = len(X) |
| fold_size = n // (n_splits + 1) |
| ics = [] |
|
|
| for fold in range(n_splits): |
| train_end = (fold + 1) * fold_size |
| val_end = train_end + fold_size |
| X_f, y_f = X[:train_end], y[:train_end] |
| X_v, y_v = X[train_end:val_end], y[train_end:val_end] |
| if len(X_v) < 10: continue |
| model = AlphaEnsemble( |
| input_size=X.shape[2], seq_len=X.shape[1], |
| lstm_hidden=params.get('lstm_hidden',128), |
| lstm_layers=params.get('lstm_layers',2), |
| trans_d_model=params.get('trans_d_model',128), |
| xgb_depth=params.get('xgb_depth',6), |
| xgb_lr=params.get('xgb_lr',0.05), |
| xgb_estimators=params.get('xgb_estimators',200), |
| device=params.get('device','cpu') |
| ) |
| model.fit(X_f, y_f, X_v, y_v, epochs=params.get('epochs',30)) |
| from backtest_engine import compute_information_coefficient |
| pred = model.predict(X_v) |
| ic = compute_information_coefficient(pd.Series(pred), pd.Series(y_v), by_date=False) |
| ics.append(ic['mean_ic']) |
|
|
| result = {**params, 'mean_ic': np.mean(ics), 'std_ic': np.std(ics), 'fold_ics': ics} |
| self.results.append(result) |
|
|
| df = pd.DataFrame(self.results).sort_values('mean_ic', ascending=False) |
| print(f"\nβ
Best IC: {df['mean_ic'].iloc[0]:.4f} with params: {dict(df.iloc[0][list(keys)])}") |
| return df |