| | import sys |
| | import pandas as pd |
| | import numpy as np |
| | from sklearn.model_selection import KFold |
| | from xgboost import XGBRegressor |
| | from lightgbm import LGBMRegressor |
| | from sklearn.linear_model import ( |
| | HuberRegressor, RANSACRegressor, TheilSenRegressor, |
| | Lasso, ElasticNet, Ridge |
| | ) |
| | from sklearn.cross_decomposition import PLSRegression |
| | from sklearn.preprocessing import StandardScaler, RobustScaler |
| | from sklearn.ensemble import RandomForestRegressor |
| | from scipy.stats import pearsonr |
| | import warnings |
| | import torch |
| | import matplotlib.pyplot as plt |
| | import seaborn as sns |
| | from concurrent.futures import ThreadPoolExecutor, as_completed |
| | from itertools import combinations |
| | import time |
| | warnings.filterwarnings('ignore') |
| |
|
| | |
| | plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans'] |
| | plt.rcParams['axes.unicode_minus'] = False |
| |
|
| | |
| | class Config: |
| | TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/train.parquet" |
| | TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/test.parquet" |
| | |
| | |
| | |
| | FEATURES = [ |
| | "X863", "X856", "X598", "X862", "X385", "X852", "X603", "X860", "X674", |
| | "X415", "X345", "X855", "X174", "X302", "X178", "X168", "X612", |
| | "buy_qty", "sell_qty", "volume", "X888", "X421", "X333", |
| | "bid_qty", "ask_qty" |
| | ] |
| | |
| | LABEL_COLUMN = "label" |
| | N_FOLDS = 3 |
| | RANDOM_STATE = 42 |
| | |
| | |
| | CORRELATION_THRESHOLD = 0.8 |
| | IC_WEIGHT_METHOD = "abs" |
| | SAVE_RESULTS = True |
| | CREATE_VISUALIZATIONS = True |
| | REMOVE_ORIGINAL_FEATURES = True |
| | |
| | |
| | IC_THRESHOLD = 0.04 |
| | |
| | |
| | MAX_WORKERS = 4 |
| | USE_SAMPLING = False |
| | SAMPLE_SIZE = 10000 |
| | USE_GPU = True |
| | USE_MATRIX_MULTIPLICATION = True |
| |
|
| | |
| | def feature_engineering(df): |
| | """Original features plus new robust features""" |
| | |
| | df['volume_weighted_sell'] = df['sell_qty'] * df['volume'] |
| | df['buy_sell_ratio'] = df['buy_qty'] / (df['sell_qty'] + 1e-8) |
| | df['selling_pressure'] = df['sell_qty'] / (df['volume'] + 1e-8) |
| | df['effective_spread_proxy'] = np.abs(df['buy_qty'] - df['sell_qty']) / (df['volume'] + 1e-8) |
| | |
| | |
| | df['log_volume'] = np.log1p(df['volume']) |
| | df['bid_ask_imbalance'] = (df['bid_qty'] - df['ask_qty']) / (df['bid_qty'] + df['ask_qty'] + 1e-8) |
| | df['order_flow_imbalance'] = (df['buy_qty'] - df['sell_qty']) / (df['buy_qty'] + df['sell_qty'] + 1e-8) |
| | df['liquidity_ratio'] = (df['bid_qty'] + df['ask_qty']) / (df['volume'] + 1e-8) |
| | |
| | |
| | df = df.replace([np.inf, -np.inf], np.nan) |
| | |
| | |
| | for col in df.columns: |
| | if df[col].isna().any(): |
| | median_val = df[col].median() |
| | df[col] = df[col].fillna(median_val if not pd.isna(median_val) else 0) |
| | |
| | return df |
| |
|
| | train_df = pd.read_parquet(Config.TRAIN_PATH) |
| | test_df = pd.read_parquet(Config.TEST_PATH) |
| |
|
| | train_df = feature_engineering(train_df) |
| |
|
| | |
| | def calculate_correlation_matrix_and_ic(train_df, features, label_col='label', correlation_threshold=0.8, max_workers=4, test_df=None): |
| | """ |
| | 计算特征间的相关系数矩阵和每个特征与标签的IC值(优化版本) |
| | 并对IC为负的特征先取反,使所有IC为正 |
| | """ |
| | |
| | available_features = [f for f in features if f in train_df.columns] |
| | print(f"可用特征数量: {len(available_features)}") |
| |
|
| | |
| | ic_values = fast_ic_calculation(train_df, available_features, label_col, max_workers=max_workers) |
| | print("初始IC统计:") |
| | print(ic_values.describe()) |
| |
|
| | |
| | neg_ic_features = ic_values[ic_values < 0].index.tolist() |
| | print(f"IC为负的特征数量: {len(neg_ic_features)}") |
| | for f in neg_ic_features: |
| | train_df[f] = -train_df[f] |
| | if test_df is not None and f in test_df.columns: |
| | test_df[f] = -test_df[f] |
| |
|
| | |
| | ic_values = fast_ic_calculation(train_df, available_features, label_col, max_workers=max_workers) |
| | print("IC取正后统计:") |
| | print(ic_values.describe()) |
| |
|
| | |
| | corr_matrix = fast_correlation_matrix(train_df, available_features, method='pearson', max_workers=max_workers) |
| |
|
| | |
| | feature_groups = aggregate_correlated_features( |
| | corr_matrix, ic_values, correlation_threshold |
| | ) |
| |
|
| | return corr_matrix, ic_values, feature_groups, train_df, test_df |
| |
|
| | def aggregate_correlated_features(corr_matrix, ic_values, threshold=0.8): |
| | """ |
| | 基于相关系数和IC值对高相关因子进行聚合 |
| | |
| | Parameters: |
| | ----------- |
| | corr_matrix : pd.DataFrame |
| | 相关系数矩阵 |
| | ic_values : pd.Series |
| | 每个特征的IC值 |
| | threshold : float |
| | 相关系数阈值 |
| | |
| | Returns: |
| | -------- |
| | feature_groups : list |
| | 聚合后的特征组,每个组包含特征名和聚合权重 |
| | """ |
| | |
| | features = list(corr_matrix.columns) |
| | used_features = set() |
| | feature_groups = [] |
| | |
| | |
| | ic_abs = ic_values.abs().sort_values(ascending=False) |
| | |
| | for feature in ic_abs.index: |
| | if feature in used_features: |
| | continue |
| | |
| | |
| | correlated_features = [] |
| | for other_feature in features: |
| | if other_feature != feature and other_feature not in used_features: |
| | corr_value = abs(corr_matrix.loc[feature, other_feature]) |
| | if corr_value > threshold: |
| | correlated_features.append(other_feature) |
| | |
| | if correlated_features: |
| | |
| | group_features = [feature] + correlated_features |
| | used_features.update(group_features) |
| | |
| | |
| | group_ic_values = ic_values[group_features] |
| | weights = calculate_ic_weighted_weights(group_ic_values, Config.IC_WEIGHT_METHOD) |
| | |
| | feature_groups.append({ |
| | 'features': group_features, |
| | 'weights': weights, |
| | 'representative': feature, |
| | 'group_ic': group_ic_values.mean() |
| | }) |
| | |
| | print(f"特征组 {len(feature_groups)}: {feature} (IC={ic_values[feature]:.4f}) " |
| | f"与 {len(correlated_features)} 个特征聚合") |
| | print(f"组{len(feature_groups) - 1} 权重: {weights}, 特征: {group_features}") |
| | |
| | |
| | else: |
| | |
| | used_features.add(feature) |
| | feature_groups.append({ |
| | 'features': [feature], |
| | 'weights': [1.0], |
| | 'representative': feature, |
| | 'group_ic': ic_values[feature] |
| | }) |
| | |
| | return feature_groups |
| |
|
| | def calculate_ic_weighted_weights(ic_values, method="abs"): |
| | """ |
| | 基于IC值计算特征权重 |
| | |
| | Parameters: |
| | ----------- |
| | ic_values : pd.Series |
| | 特征IC值 |
| | method : str |
| | 权重计算方法: "abs", "square", "rank" |
| | |
| | Returns: |
| | -------- |
| | weights : list |
| | 归一化的权重列表 |
| | """ |
| | if method == "abs": |
| | |
| | weights_base = ic_values.abs() |
| | elif method == "square": |
| | |
| | weights_base = ic_values ** 2 |
| | elif method == "rank": |
| | |
| | weights_base = ic_values.abs().rank(ascending=False) |
| | else: |
| | raise ValueError(f"不支持的权重计算方法: {method}") |
| | |
| | |
| | weights_base = weights_base + 1e-8 |
| | |
| | |
| | weights = weights_base / weights_base.sum() |
| | |
| | return weights.tolist() |
| |
|
| | def calculate_optimal_ic_weights(df, features, label_col): |
| | """ |
| | 对于给定特征组,使用最大化IC合成法计算最优权重。 |
| | 参数: |
| | df: pd.DataFrame,包含特征和标签 |
| | features: list,特征名 |
| | label_col: str,标签名 |
| | 返回: |
| | weights: list,归一化权重 |
| | """ |
| | if len(features) == 1: |
| | return [1.0] |
| | Z = df[features].values |
| | Z = (Z - Z.mean(axis=0)) / (Z.std(axis=0) + 1e-8) |
| | R = df[label_col].values.reshape(-1, 1) |
| | |
| | cov_ZZ = np.cov(Z, rowvar=False) |
| | cov_ZR = np.cov(Z, R, rowvar=False)[:-1, -1] |
| | |
| | cov_ZZ += np.eye(cov_ZZ.shape[0]) * 1e-6 |
| | |
| | try: |
| | w = np.linalg.solve(cov_ZZ, cov_ZR) |
| | except np.linalg.LinAlgError: |
| | w = np.linalg.lstsq(cov_ZZ, cov_ZR, rcond=None)[0] |
| | |
| | if np.sum(np.abs(w)) > 1e-8: |
| | w = w / np.sum(np.abs(w)) |
| | else: |
| | w = np.ones_like(w) / len(w) |
| | return w.tolist() |
| |
|
| | def create_aggregated_features(df, feature_groups, remove_original=True, label_col=None): |
| | """ |
| | 基于特征组创建聚合特征(只用最大化IC合成法计算权重,并输出与IC加权对比) |
| | """ |
| | aggregated_df = df.copy() |
| | aggregated_original_features = set() |
| | if label_col is None: |
| | label_col = Config.LABEL_COLUMN |
| | for i, group in enumerate(feature_groups): |
| | features = group['features'] |
| | representative = group['representative'] |
| | |
| | missing_features = [f for f in features if f not in df.columns] |
| | if missing_features: |
| | print(f"警告: 特征组 {i} 中缺少特征: {missing_features}") |
| | continue |
| | |
| | weights = calculate_optimal_ic_weights(df, features, label_col) |
| | |
| | ic_vec = [] |
| | for f in features: |
| | try: |
| | ic = np.corrcoef(df[f], df[label_col])[0, 1] |
| | except Exception: |
| | ic = 0.0 |
| | ic_vec.append(ic) |
| | ic_weights = calculate_ic_weighted_weights(pd.Series(ic_vec, index=features), method='abs') |
| | print(f"组{i} features: {features}") |
| | print(f" 最大化IC权重: {weights}") |
| | print(f" IC加权权重: {ic_weights}") |
| | if len(features) == 1: |
| | agg_feature = df[features[0]] * weights[0] |
| | else: |
| | agg_feature = sum(df[features[j]] * weights[j] for j in range(len(features))) |
| | agg_feature_name = f"agg_group_{i}_{representative}" |
| | aggregated_df[agg_feature_name] = agg_feature |
| | print(f"创建聚合特征: {agg_feature_name} (包含 {len(features)} 个原始特征)") |
| | aggregated_original_features.update(features) |
| | |
| | if remove_original: |
| | features_to_remove = [f for f in aggregated_original_features if f in aggregated_df.columns] |
| | if features_to_remove: |
| | aggregated_df = aggregated_df.drop(columns=features_to_remove) |
| | print(f"删除了 {len(features_to_remove)} 个原始特征: {features_to_remove}") |
| | else: |
| | print("没有需要删除的原始特征") |
| | return aggregated_df |
| |
|
| | def calculate_aggregated_features_ic_and_filter(df_aggregated, feature_groups, config_features, ic_threshold=0.04): |
| | """ |
| | 计算聚合特征的IC值,并基于IC值进行筛选,同时处理与Config.FEATURES的去重逻辑 |
| | |
| | Parameters: |
| | ----------- |
| | df_aggregated : pd.DataFrame |
| | 聚合后的数据框 |
| | feature_groups : list |
| | 特征组列表 |
| | config_features : list |
| | Config.FEATURES中的特征列表 |
| | ic_threshold : float |
| | IC值阈值 |
| | |
| | Returns: |
| | -------- |
| | df_final : pd.DataFrame |
| | 最终筛选后的数据框 |
| | selected_features : list |
| | 最终选择的特征列表 |
| | aggregated_ic_values : pd.Series |
| | 聚合特征的IC值 |
| | """ |
| | |
| | print(f"\n开始计算聚合特征IC值并筛选 (IC阈值: {ic_threshold})...") |
| | |
| | |
| | aggregated_feature_names = [] |
| | for i, group in enumerate(feature_groups): |
| | representative = group['representative'] |
| | agg_feature_name = f"agg_group_{i}_{representative}" |
| | if agg_feature_name in df_aggregated.columns: |
| | aggregated_feature_names.append(agg_feature_name) |
| | |
| | print(f"聚合特征数量: {len(aggregated_feature_names)}") |
| | |
| | |
| | print("计算聚合特征的IC值...") |
| | aggregated_ic_values = fast_ic_calculation( |
| | df_aggregated, aggregated_feature_names, Config.LABEL_COLUMN, Config.MAX_WORKERS |
| | ) |
| | |
| | print("聚合特征IC值统计:") |
| | print(f" 平均IC值: {aggregated_ic_values.mean():.4f}") |
| | print(f" 最大IC值: {aggregated_ic_values.max():.4f}") |
| | print(f" 最小IC值: {aggregated_ic_values.min():.4f}") |
| | print(f" IC值>=阈值的特征数量: {(aggregated_ic_values >= ic_threshold).sum()}") |
| | |
| | |
| | high_ic_aggregated = aggregated_ic_values[aggregated_ic_values >= ic_threshold].index.tolist() |
| | print(f"IC值>=阈值的聚合特征数量: {len(high_ic_aggregated)}") |
| | |
| | |
| | config_features_available = [] |
| | for feature in config_features: |
| | if feature in df_aggregated.columns: |
| | config_features_available.append(feature) |
| | else: |
| | print(f"警告: Config.FEATURES中的特征 {feature} 不在聚合数据中") |
| | |
| | print(f"Config.FEATURES中可用的特征数量: {len(config_features_available)}") |
| | |
| | |
| | final_features = [] |
| | conflicts_resolved = [] |
| | |
| | |
| | for agg_feature in high_ic_aggregated: |
| | |
| | group_idx = int(agg_feature.split('_')[2]) |
| | group = feature_groups[group_idx] |
| | |
| | if len(group['features']) == 1: |
| | |
| | original_feature = group['features'][0] |
| | if original_feature in config_features_available: |
| | |
| | conflicts_resolved.append({ |
| | 'aggregated_feature': agg_feature, |
| | 'original_feature': original_feature, |
| | 'aggregated_ic': aggregated_ic_values[agg_feature], |
| | 'reason': 'Single factor aggregation conflicts with Config.FEATURES' |
| | }) |
| | print(f"去重: 单因子聚合 {agg_feature} (IC={aggregated_ic_values[agg_feature]:.4f}) 与Config.FEATURES中的 {original_feature} 冲突,保留原始特征") |
| | continue |
| | |
| | final_features.append(agg_feature) |
| | |
| | |
| | for config_feature in config_features_available: |
| | |
| | is_conflict = False |
| | for conflict in conflicts_resolved: |
| | if conflict['original_feature'] == config_feature: |
| | is_conflict = True |
| | break |
| | |
| | if not is_conflict: |
| | final_features.append(config_feature) |
| | |
| | |
| | if Config.LABEL_COLUMN in df_aggregated.columns: |
| | final_features.append(Config.LABEL_COLUMN) |
| | |
| | |
| | available_features = [f for f in final_features if f in df_aggregated.columns] |
| | df_final = df_aggregated[available_features].copy() |
| | |
| | |
| | print(f"\n特征筛选结果:") |
| | print(f" 原始聚合特征数量: {len(aggregated_feature_names)}") |
| | print(f" 高IC值聚合特征数量: {len(high_ic_aggregated)}") |
| | print(f" Config.FEATURES特征数量: {len(config_features_available)}") |
| | print(f" 冲突解决数量: {len(conflicts_resolved)}") |
| | print(f" 最终特征数量: {len(available_features)}") |
| | |
| | if conflicts_resolved: |
| | print(f"\n冲突解决详情:") |
| | for conflict in conflicts_resolved: |
| | print(f" - {conflict['aggregated_feature']} (IC={conflict['aggregated_ic']:.4f}) -> {conflict['original_feature']}: {conflict['reason']}") |
| | |
| | |
| | final_feature_names = [f for f in available_features if f != Config.LABEL_COLUMN] |
| | print(f"\n最终选择的特征 ({len(final_feature_names)} 个):") |
| | for i, feature in enumerate(final_feature_names, 1): |
| | if feature.startswith('agg_group_'): |
| | ic_val = aggregated_ic_values[feature] |
| | print(f" {i:3d}. {feature:30s} (IC={ic_val:.4f})") |
| | else: |
| | print(f" {i:3d}. {feature:30s} (Config.FEATURES)") |
| | |
| | return df_final, final_feature_names, aggregated_ic_values |
| |
|
| | |
| | def visualize_correlation_and_ic(corr_matrix, ic_values, feature_groups, save_plots=True): |
| | """ |
| | Visualize correlation matrix, IC distribution, and feature aggregation results (English version) |
| | """ |
| | fig, axes = plt.subplots(2, 2, figsize=(20, 16)) |
| | fig.suptitle('Feature Correlation Analysis and IC Distribution', fontsize=16, fontweight='bold') |
| | |
| | |
| | mask = np.triu(np.ones_like(corr_matrix, dtype=bool)) |
| | sns.heatmap(corr_matrix, mask=mask, annot=False, cmap='RdBu_r', center=0, |
| | square=True, linewidths=0.5, cbar_kws={"shrink": .8}, ax=axes[0,0]) |
| | axes[0,0].set_title('Feature Correlation Matrix', fontsize=14, fontweight='bold') |
| | |
| | |
| | axes[0,1].hist(ic_values.values, bins=30, alpha=0.7, color='skyblue', edgecolor='black') |
| | axes[0,1].axvline(ic_values.mean(), color='red', linestyle='--', |
| | label=f'Mean: {ic_values.mean():.4f}') |
| | axes[0,1].axvline(0, color='green', linestyle='-', alpha=0.5, label='IC=0') |
| | axes[0,1].set_xlabel('IC Value') |
| | axes[0,1].set_ylabel('Frequency') |
| | axes[0,1].set_title('Feature IC Value Distribution', fontsize=14, fontweight='bold') |
| | axes[0,1].legend() |
| | axes[0,1].grid(True, alpha=0.3) |
| | |
| | |
| | top_ic_features = ic_values.abs().sort_values(ascending=False).head(20) |
| | colors = ['red' if ic_values[feature] < 0 else 'blue' for feature in top_ic_features.index] |
| | axes[1,0].barh(range(len(top_ic_features)), top_ic_features.values, color=colors, alpha=0.7) |
| | axes[1,0].set_yticks(range(len(top_ic_features))) |
| | axes[1,0].set_yticklabels(top_ic_features.index, fontsize=8) |
| | axes[1,0].set_xlabel('|IC Value|') |
| | axes[1,0].set_title('Top 20 |IC Value| Features', fontsize=14, fontweight='bold') |
| | axes[1,0].grid(True, alpha=0.3) |
| | |
| | |
| | group_sizes = [len(group['features']) for group in feature_groups] |
| | group_ics = [group['group_ic'] for group in feature_groups] |
| | single_features = [i for i, size in enumerate(group_sizes) if size == 1] |
| | grouped_features = [i for i, size in enumerate(group_sizes) if size > 1] |
| | if single_features: |
| | axes[1,1].scatter([group_sizes[i] for i in single_features], |
| | [group_ics[i] for i in single_features], |
| | alpha=0.6, label='Single Feature', s=50) |
| | if grouped_features: |
| | axes[1,1].scatter([group_sizes[i] for i in grouped_features], |
| | [group_ics[i] for i in grouped_features], |
| | alpha=0.8, label='Aggregated Feature', s=100, color='red') |
| | axes[1,1].set_xlabel('Feature Group Size') |
| | axes[1,1].set_ylabel('Group Mean IC Value') |
| | axes[1,1].set_title('Feature Aggregation Result', fontsize=14, fontweight='bold') |
| | axes[1,1].legend() |
| | axes[1,1].grid(True, alpha=0.3) |
| | plt.tight_layout() |
| | if save_plots: |
| | plt.savefig('./threshold_6_29/feature_analysis.png', dpi=300, bbox_inches='tight') |
| | print("Saved feature analysis image: feature_analysis.png") |
| | plt.show() |
| |
|
| | def create_feature_summary_report(corr_matrix, ic_values, feature_groups, selected_features=None): |
| | """ |
| | 创建特征分析报告 |
| | |
| | Parameters: |
| | ----------- |
| | corr_matrix : pd.DataFrame |
| | 相关系数矩阵 |
| | ic_values : pd.Series |
| | 特征IC值 |
| | feature_groups : list |
| | 特征组列表 |
| | selected_features : list, optional |
| | 最终选择的特征列表 |
| | """ |
| | |
| | report = [] |
| | report.append("=" * 60) |
| | report.append("Feature Analysis Report") |
| | report.append("=" * 60) |
| | |
| | |
| | report.append(f"\n1. Basic Statistical Information:") |
| | report.append(f" Total Feature Count: {len(ic_values)}") |
| | report.append(f" Average IC Value: {ic_values.mean():.4f}") |
| | report.append(f" IC Value Standard Deviation: {ic_values.std():.4f}") |
| | report.append(f" Maximum IC Value: {ic_values.max():.4f}") |
| | report.append(f" Minimum IC Value: {ic_values.min():.4f}") |
| | report.append(f" Positive IC Value Feature Count: {(ic_values > 0).sum()}") |
| | report.append(f" Negative IC Value Feature Count: {(ic_values < 0).sum()}") |
| | |
| | |
| | high_corr_count = 0 |
| | for i in range(len(corr_matrix.columns)): |
| | for j in range(i+1, len(corr_matrix.columns)): |
| | if abs(corr_matrix.iloc[i, j]) > Config.CORRELATION_THRESHOLD: |
| | high_corr_count += 1 |
| | |
| | report.append(f"\n2. High Correlation Analysis (|Correlation| > {Config.CORRELATION_THRESHOLD}):") |
| | report.append(f" High Correlation Feature Pair Count: {high_corr_count}") |
| | report.append(f" Correlation Matrix Density: {high_corr_count / (len(corr_matrix) * (len(corr_matrix) - 1) / 2):.4f}") |
| | |
| | |
| | report.append(f"\n3. Feature Aggregation Results:") |
| | report.append(f" Feature Group Count: {len(feature_groups)}") |
| | |
| | single_features = [g for g in feature_groups if len(g['features']) == 1] |
| | grouped_features = [g for g in feature_groups if len(g['features']) > 1] |
| | |
| | report.append(f" Single Feature Group Count: {len(single_features)}") |
| | report.append(f" Aggregated Feature Group Count: {len(grouped_features)}") |
| | |
| | if grouped_features: |
| | avg_group_size = np.mean([len(g['features']) for g in grouped_features]) |
| | report.append(f" Average Aggregated Group Size: {avg_group_size:.2f}") |
| | |
| | |
| | if selected_features is not None: |
| | report.append(f"\n4. Feature Selection Results (IC Threshold: {Config.IC_THRESHOLD}):") |
| | report.append(f" Final Selected Feature Count: {len(selected_features)}") |
| | |
| | |
| | aggregated_count = sum(1 for f in selected_features if f.startswith('agg_group_')) |
| | config_count = sum(1 for f in selected_features if not f.startswith('agg_group_')) |
| | |
| | report.append(f" Aggregated Features in Final Selection: {aggregated_count}") |
| | report.append(f" Config.FEATURES in Final Selection: {config_count}") |
| | |
| | |
| | final_aggregated = [f for f in selected_features if f.startswith('agg_group_')] |
| | if final_aggregated: |
| | report.append(f"\n5. Final Selected Aggregated Features:") |
| | for i, feature in enumerate(final_aggregated, 1): |
| | group_idx = int(feature.split('_')[2]) |
| | group = feature_groups[group_idx] |
| | report.append(f" {i:2d}. {feature:25s} (Group IC={group['group_ic']:.4f})") |
| | |
| | |
| | report.append(f"\n6. Top 10 Highest IC Value Features:") |
| | top_ic = ic_values.abs().sort_values(ascending=False).head(10) |
| | for i, (feature, ic_abs) in enumerate(top_ic.items(), 1): |
| | ic_original = ic_values[feature] |
| | report.append(f" {i:2d}. {feature:20s} |IC|={ic_abs:.4f} (IC={ic_original:.4f})") |
| | |
| | |
| | report.append(f"\n7. Feature Aggregation Details:") |
| | for i, group in enumerate(grouped_features, 1): |
| | report.append(f" Group {i}: {group['representative']} (IC={group['group_ic']:.4f})") |
| | report.append(f" Contains Features: {', '.join(group['features'])}") |
| | report.append(f" Weights: {[f'{w:.3f}' for w in group['weights']]}") |
| | |
| | |
| | with open('./threshold_6_29/feature_analysis_report.txt', 'w', encoding='utf-8') as f: |
| | f.write('\n'.join(report)) |
| | |
| | print('\n'.join(report)) |
| | print(f"\nReport Saved to: feature_analysis_report.txt") |
| |
|
| | |
| | def fast_correlation_matrix(df, features, method='pearson', max_workers=4): |
| | """ |
| | 快速计算相关系数矩阵,支持并行计算和多种优化策略 |
| | |
| | Parameters: |
| | ----------- |
| | df : pd.DataFrame |
| | 数据框 |
| | features : list |
| | 特征列表 |
| | method : str |
| | 相关系数计算方法: 'pearson', 'spearman' |
| | max_workers : int |
| | 并行计算的工作线程数 |
| | |
| | Returns: |
| | -------- |
| | corr_matrix : pd.DataFrame |
| | 相关系数矩阵 |
| | """ |
| | |
| | print(f"开始计算相关系数矩阵 (特征数量: {len(features)}, 方法: {method})") |
| | start_time = time.time() |
| | |
| | |
| | if method == 'pearson' and Config.USE_MATRIX_MULTIPLICATION: |
| | if Config.USE_GPU and torch.cuda.is_available(): |
| | corr_matrix = torch_correlation(df, features, use_gpu=True) |
| | print(f"GPU矩阵乘法优化耗时: {time.time() - start_time:.2f}秒") |
| | else: |
| | corr_matrix = matrix_correlation(df, features) |
| | print(f"CPU矩阵乘法优化耗时: {time.time() - start_time:.2f}秒") |
| | return corr_matrix |
| | |
| | |
| | if Config.USE_SAMPLING and len(df) > Config.SAMPLE_SIZE: |
| | print(f"数据量较大,使用采样计算 (采样大小: {Config.SAMPLE_SIZE})...") |
| | sample_size = min(Config.SAMPLE_SIZE, len(df)) |
| | sample_df = df.sample(n=sample_size, random_state=42) |
| | feature_data = sample_df[features] |
| | corr_matrix = feature_data.corr(method=method) |
| | print(f"采样计算耗时: {time.time() - start_time:.2f}秒") |
| | return corr_matrix |
| | |
| | |
| | else: |
| | print(f"使用并行计算 (线程数: {max_workers})...") |
| | return parallel_correlation_matrix(df, features, method, max_workers) |
| |
|
| | def matrix_correlation(df, features): |
| | """ |
| | 使用矩阵乘法计算相关系数矩阵 (A * A^T 方法) |
| | |
| | Parameters: |
| | ----------- |
| | df : pd.DataFrame |
| | 数据框 |
| | features : list |
| | 特征列表 |
| | |
| | Returns: |
| | -------- |
| | corr_matrix : pd.DataFrame |
| | 相关系数矩阵 |
| | """ |
| | |
| | |
| | feature_data = df[features].values |
| | |
| | |
| | feature_data_std = (feature_data - feature_data.mean(axis=0)) / feature_data.std(axis=0) |
| | |
| | |
| | feature_data_std = np.nan_to_num(feature_data_std, nan=0.0) |
| | |
| | |
| | n = feature_data_std.shape[0] |
| | corr_matrix_np = np.dot(feature_data_std.T, feature_data_std) / (n - 1) |
| | |
| | |
| | np.fill_diagonal(corr_matrix_np, 1.0) |
| | |
| | |
| | corr_matrix = pd.DataFrame(corr_matrix_np, index=features, columns=features) |
| | |
| | return corr_matrix |
| |
|
| | def torch_correlation(df, features, use_gpu=False): |
| | """ |
| | 使用PyTorch张量计算相关系数矩阵(可选GPU加速) |
| | |
| | Parameters: |
| | ----------- |
| | df : pd.DataFrame |
| | 数据框 |
| | features : list |
| | 特征列表 |
| | use_gpu : bool |
| | 是否使用GPU加速 |
| | |
| | Returns: |
| | -------- |
| | corr_matrix : pd.DataFrame |
| | 相关系数矩阵 |
| | """ |
| | |
| | |
| | feature_data = df[features].values |
| | |
| | |
| | if use_gpu and torch.cuda.is_available(): |
| | device = torch.device('cuda') |
| | print("使用GPU加速计算...") |
| | else: |
| | device = torch.device('cpu') |
| | print("使用CPU计算...") |
| | |
| | |
| | X = torch.tensor(feature_data, dtype=torch.float32, device=device) |
| | |
| | |
| | X_mean = torch.mean(X, dim=0, keepdim=True) |
| | X_std = torch.std(X, dim=0, keepdim=True, unbiased=True) |
| | X_std = torch.where(X_std == 0, torch.ones_like(X_std), X_std) |
| | X_norm = (X - X_mean) / X_std |
| | |
| | |
| | X_norm = torch.nan_to_num(X_norm, nan=0.0) |
| | |
| | |
| | n = X_norm.shape[0] |
| | corr_matrix_tensor = torch.mm(X_norm.T, X_norm) / (n - 1) |
| | |
| | |
| | torch.diagonal(corr_matrix_tensor)[:] = 1.0 |
| | |
| | |
| | corr_matrix_np = corr_matrix_tensor.cpu().numpy() |
| | |
| | |
| | corr_matrix = pd.DataFrame(corr_matrix_np, index=features, columns=features) |
| | |
| | return corr_matrix |
| |
|
| | def parallel_correlation_matrix(df, features, method='pearson', max_workers=4): |
| | """ |
| | 并行计算相关系数矩阵 |
| | |
| | Parameters: |
| | ----------- |
| | df : pd.DataFrame |
| | 数据框 |
| | features : list |
| | 特征列表 |
| | method : str |
| | 相关系数计算方法 |
| | max_workers : int |
| | 并行计算的工作线程数 |
| | |
| | Returns: |
| | -------- |
| | corr_matrix : pd.DataFrame |
| | 相关系数矩阵 |
| | """ |
| | |
| | def calculate_correlation_pair(pair): |
| | """计算一对特征的相关系数""" |
| | feat1, feat2 = pair |
| | if method == 'pearson': |
| | corr, _ = pearsonr(df[feat1], df[feat2]) |
| | else: |
| | corr = df[feat1].corr(df[feat2], method='spearman') |
| | return (feat1, feat2, corr) |
| | |
| | |
| | feature_pairs = list(combinations(features, 2)) |
| | print(f"需要计算 {len(feature_pairs)} 个特征对的相关系数") |
| | |
| | |
| | results = {} |
| | with ThreadPoolExecutor(max_workers=max_workers) as executor: |
| | future_to_pair = {executor.submit(calculate_correlation_pair, pair): pair for pair in feature_pairs} |
| | |
| | completed = 0 |
| | for future in as_completed(future_to_pair): |
| | feat1, feat2, corr = future.result() |
| | results[(feat1, feat2)] = corr |
| | results[(feat2, feat1)] = corr |
| | completed += 1 |
| | |
| | if completed % 100 == 0: |
| | print(f"已完成: {completed}/{len(feature_pairs)} ({completed/len(feature_pairs)*100:.1f}%)") |
| | |
| | |
| | corr_matrix = pd.DataFrame(index=features, columns=features) |
| | for feat1 in features: |
| | for feat2 in features: |
| | if feat1 == feat2: |
| | corr_matrix.loc[feat1, feat2] = 1.0 |
| | else: |
| | corr_matrix.loc[feat1, feat2] = results.get((feat1, feat2), 0.0) |
| | |
| | return corr_matrix |
| |
|
| | def fast_ic_calculation(df, features, label_col, max_workers=4): |
| | """ |
| | 快速计算特征IC值,支持并行计算 |
| | |
| | Parameters: |
| | ----------- |
| | df : pd.DataFrame |
| | 数据框 |
| | features : list |
| | 特征列表 |
| | label_col : str |
| | 标签列名 |
| | max_workers : int |
| | 并行计算的工作线程数 |
| | |
| | Returns: |
| | -------- |
| | ic_values : pd.Series |
| | 特征IC值 |
| | """ |
| | |
| | print(f"开始计算特征IC值 (特征数量: {len(features)})") |
| | start_time = time.time() |
| | |
| | def calculate_ic(feature): |
| | """计算单个特征的IC值""" |
| | try: |
| | ic, _ = pearsonr(df[feature], df[label_col]) |
| | return feature, ic |
| | except: |
| | return feature, 0.0 |
| | |
| | |
| | ic_dict = {} |
| | with ThreadPoolExecutor(max_workers=max_workers) as executor: |
| | future_to_feature = {executor.submit(calculate_ic, feature): feature for feature in features} |
| | |
| | completed = 0 |
| | for future in as_completed(future_to_feature): |
| | feature, ic = future.result() |
| | ic_dict[feature] = ic |
| | completed += 1 |
| | |
| | if completed % 50 == 0: |
| | print(f"IC计算进度: {completed}/{len(features)} ({completed/len(features)*100:.1f}%)") |
| | |
| | ic_values = pd.Series(ic_dict) |
| | print(f"IC值计算耗时: {time.time() - start_time:.2f}秒") |
| | |
| | return ic_values |
| |
|
| | def benchmark_correlation_methods(df, features, sample_size=1000): |
| | """ |
| | 比较不同相关系数计算方法的性能 |
| | |
| | Parameters: |
| | ----------- |
| | df : pd.DataFrame |
| | 数据框 |
| | features : list |
| | 特征列表 |
| | sample_size : int |
| | 用于测试的样本大小 |
| | |
| | Returns: |
| | -------- |
| | results : dict |
| | 各方法的性能结果 |
| | """ |
| | |
| | print("=" * 60) |
| | print("相关系数计算方法性能比较") |
| | print("=" * 60) |
| | |
| | |
| | if len(df) > sample_size: |
| | test_df = df.sample(n=sample_size, random_state=42) |
| | else: |
| | test_df = df |
| | |
| | test_features = features[:min(50, len(features))] |
| | print(f"测试数据: {len(test_df)} 行, {len(test_features)} 个特征") |
| | |
| | results = {} |
| | |
| | |
| | print("\n1. 测试 pandas corr() 方法...") |
| | start_time = time.time() |
| | try: |
| | feature_data = test_df[test_features] |
| | corr_pandas = feature_data.corr() |
| | pandas_time = time.time() - start_time |
| | results['pandas_corr'] = {'time': pandas_time, 'success': True} |
| | print(f" 耗时: {pandas_time:.3f}秒") |
| | except Exception as e: |
| | results['pandas_corr'] = {'time': float('inf'), 'success': False, 'error': str(e)} |
| | print(f" 失败: {e}") |
| | |
| | |
| | print("\n2. 测试矩阵乘法 (CPU)...") |
| | start_time = time.time() |
| | try: |
| | corr_matrix = matrix_correlation(test_df, test_features) |
| | matrix_time = time.time() - start_time |
| | results['matrix_cpu'] = {'time': matrix_time, 'success': True} |
| | print(f" 耗时: {matrix_time:.3f}秒") |
| | except Exception as e: |
| | results['matrix_cpu'] = {'time': float('inf'), 'success': False, 'error': str(e)} |
| | print(f" 失败: {e}") |
| | |
| | |
| | print("\n3. 测试 PyTorch (CPU)...") |
| | start_time = time.time() |
| | try: |
| | corr_torch_cpu = torch_correlation(test_df, test_features, use_gpu=False) |
| | torch_cpu_time = time.time() - start_time |
| | results['torch_cpu'] = {'time': torch_cpu_time, 'success': True} |
| | print(f" 耗时: {torch_cpu_time:.3f}秒") |
| | except Exception as e: |
| | results['torch_cpu'] = {'time': float('inf'), 'success': False, 'error': str(e)} |
| | print(f" 失败: {e}") |
| | |
| | |
| | if torch.cuda.is_available(): |
| | print("\n4. 测试 PyTorch (GPU)...") |
| | start_time = time.time() |
| | try: |
| | corr_torch_gpu = torch_correlation(test_df, test_features, use_gpu=True) |
| | torch_gpu_time = time.time() - start_time |
| | results['torch_gpu'] = {'time': torch_gpu_time, 'success': True} |
| | print(f" 耗时: {torch_gpu_time:.3f}秒") |
| | except Exception as e: |
| | results['torch_gpu'] = {'time': float('inf'), 'success': False, 'error': str(e)} |
| | print(f" 失败: {e}") |
| | else: |
| | print("\n4. GPU不可用,跳过GPU测试") |
| | results['torch_gpu'] = {'time': float('inf'), 'success': False, 'error': 'GPU not available'} |
| | |
| | |
| | print("\n5. 测试并行计算...") |
| | start_time = time.time() |
| | try: |
| | corr_parallel = parallel_correlation_matrix(test_df, test_features, method='pearson', max_workers=4) |
| | parallel_time = time.time() - start_time |
| | results['parallel'] = {'time': parallel_time, 'success': True} |
| | print(f" 耗时: {parallel_time:.3f}秒") |
| | except Exception as e: |
| | results['parallel'] = {'time': float('inf'), 'success': False, 'error': str(e)} |
| | print(f" 失败: {e}") |
| | |
| | |
| | print(f"\n=== 性能比较结果 ===") |
| | successful_methods = {k: v for k, v in results.items() if v['success']} |
| | |
| | if successful_methods: |
| | fastest_method = min(successful_methods.items(), key=lambda x: x[1]['time']) |
| | print(f"最快方法: {fastest_method[0]} ({fastest_method[1]['time']:.3f}秒)") |
| | |
| | print(f"\n详细结果:") |
| | for method, result in sorted(successful_methods.items(), key=lambda x: x[1]['time']): |
| | speedup = fastest_method[1]['time'] / result['time'] |
| | print(f" {method:12s}: {result['time']:6.3f}秒 (相对速度: {speedup:.2f}x)") |
| | |
| | |
| | failed_methods = {k: v for k, v in results.items() if not v['success']} |
| | if failed_methods: |
| | print(f"\n失败的方法:") |
| | for method, result in failed_methods.items(): |
| | print(f" {method}: {result.get('error', 'Unknown error')}") |
| | |
| | return results |
| |
|
| | if __name__ == "__main__": |
| | |
| | |
| | |
| | if len(sys.argv) > 1 and sys.argv[1] == '--benchmark': |
| | print("=" * 60) |
| | print("运行相关系数计算方法性能测试") |
| | print("=" * 60) |
| | |
| | |
| | train_df = pd.read_parquet(Config.TRAIN_PATH) |
| | all_features = [col for col in train_df.columns if col != Config.LABEL_COLUMN] |
| | |
| | |
| | benchmark_correlation_methods(train_df, all_features) |
| | sys.exit(0) |
| | |
| | print("=" * 60) |
| | print("开始特征相关性分析和因子聚合") |
| | print("=" * 60) |
| | |
| | |
| | print("\n1. 加载数据...") |
| | train_df = pd.read_parquet(Config.TRAIN_PATH) |
| | test_df = pd.read_parquet(Config.TEST_PATH) |
| | print(f"训练数据形状: {train_df.shape}") |
| | print(f"测试数据形状: {test_df.shape}") |
| | |
| | |
| | print("\n2. 执行特征工程...") |
| | train_df = feature_engineering(train_df) |
| | test_df = feature_engineering(test_df) |
| | print(f"特征工程后训练数据形状: {train_df.shape}") |
| | print(f"特征工程后测试数据形状: {test_df.shape}") |
| | engineered_features = [ |
| | "volume_weighted_sell", "buy_sell_ratio", "selling_pressure", |
| | "effective_spread_proxy", "log_volume", "bid_ask_imbalance", |
| | "order_flow_imbalance", "liquidity_ratio" |
| | ] |
| | Config.FEATURES = list(set(Config.FEATURES + engineered_features)) |
| | |
| | |
| | print("\n2.5 Remove constant features...") |
| | feature_cols = [col for col in train_df.columns if col != Config.LABEL_COLUMN] |
| | constant_features = [col for col in feature_cols if train_df[col].std() == 0] |
| | if constant_features: |
| | print(f"Remove {len(constant_features)} constant features: {constant_features}") |
| | train_df = train_df.drop(columns=constant_features) |
| | test_df = test_df.drop(columns=[col for col in constant_features if col in test_df.columns]) |
| | else: |
| | print("No constant features found.") |
| | |
| | |
| | all_features = [col for col in train_df.columns if col != Config.LABEL_COLUMN] |
| | print(f"\n特征数量: {len(all_features)}") |
| | |
| | |
| | print(f"\n3. 计算相关系数矩阵 (阈值: {Config.CORRELATION_THRESHOLD})...") |
| | corr_matrix, ic_values, feature_groups, train_df, test_df = calculate_correlation_matrix_and_ic( |
| | train_df, all_features, Config.LABEL_COLUMN, Config.CORRELATION_THRESHOLD, Config.MAX_WORKERS, test_df |
| | ) |
| | |
| | |
| | print(f"\n4. 基本统计信息:") |
| | print(f" 相关系数矩阵形状: {corr_matrix.shape}") |
| | print(f" 平均IC值: {ic_values.mean():.4f}") |
| | print(f" 最大IC值: {ic_values.max():.4f}") |
| | print(f" 最小IC值: {ic_values.min():.4f}") |
| | print(f" IC值标准差: {ic_values.std():.4f}") |
| | |
| | |
| | print(f"\n5. 特征聚合结果:") |
| | print(f" 特征组数量: {len(feature_groups)}") |
| | |
| | single_features = [g for g in feature_groups if len(g['features']) == 1] |
| | grouped_features = [g for g in feature_groups if len(g['features']) > 1] |
| | |
| | print(f" 单独特征组: {len(single_features)}") |
| | print(f" 聚合特征组: {len(grouped_features)}") |
| | |
| | |
| | print(f"\n6. 创建聚合特征...") |
| | train_df_aggregated = create_aggregated_features(train_df, feature_groups, Config.REMOVE_ORIGINAL_FEATURES) |
| | test_df_aggregated = create_aggregated_features(test_df, feature_groups, Config.REMOVE_ORIGINAL_FEATURES) |
| | |
| | print(f" 聚合前训练特征数量: {len(all_features)}") |
| | print(f" 聚合后训练特征数量: {len([col for col in train_df_aggregated.columns if col != Config.LABEL_COLUMN])}") |
| | print(f" 聚合后测试特征数量: {len([col for col in test_df_aggregated.columns])}") |
| | |
| | |
| | print(f"\n7. 计算聚合特征IC值并筛选...") |
| | train_df_final, selected_features, aggregated_ic_values = calculate_aggregated_features_ic_and_filter( |
| | train_df_aggregated, feature_groups, Config.FEATURES, Config.IC_THRESHOLD |
| | ) |
| | |
| | |
| | test_features_available = [f for f in selected_features if f in test_df_aggregated.columns] |
| | if Config.LABEL_COLUMN in test_df_aggregated.columns: |
| | test_features_available.append(Config.LABEL_COLUMN) |
| | test_df_final = test_df_aggregated[test_features_available].copy() |
| | |
| | print(f" 筛选后训练特征数量: {len([col for col in train_df_final.columns if col != Config.LABEL_COLUMN])}") |
| | print(f" 筛选后测试特征数量: {len([col for col in test_df_final.columns if col != Config.LABEL_COLUMN])}") |
| | |
| | |
| | if Config.SAVE_RESULTS: |
| | print(f"\n8. 保存结果...") |
| | corr_matrix.to_csv('./threshold_6_29/correlation_matrix.csv') |
| | ic_values.to_csv('./threshold_6_29/ic_values.csv') |
| | train_df_aggregated.to_parquet('./threshold_6_29/train_aggregated.parquet') |
| | test_df_aggregated.to_parquet('./threshold_6_29/test_aggregated.parquet') |
| | |
| | train_df_final.to_parquet('./threshold_6_29/train_final.parquet') |
| | test_df_final.to_parquet('./threshold_6_29/test_final.parquet') |
| | |
| | aggregated_ic_values.to_csv('./threshold_6_29/aggregated_ic_values.csv') |
| | print(" 相关系数矩阵已保存: correlation_matrix.csv") |
| | print(" 特征IC值已保存: ic_values.csv") |
| | print(" 聚合后训练数据已保存: train_aggregated.parquet") |
| | print(" 聚合后测试数据已保存: test_aggregated.parquet") |
| | print(" 最终筛选后训练数据已保存: train_final.parquet") |
| | print(" 最终筛选后测试数据已保存: test_final.parquet") |
| | print(" 聚合特征IC值已保存: aggregated_ic_values.csv") |
| | |
| | |
| | print(f"\n9. Top 10 highest IC features:") |
| | print(ic_values.abs().sort_values(ascending=False).head(10)) |
| | |
| | |
| | print(f"\n10. Highly correlated feature pairs (|correlation| > {Config.CORRELATION_THRESHOLD}):") |
| | high_corr_pairs = [] |
| | for i in range(len(corr_matrix.columns)): |
| | for j in range(i+1, len(corr_matrix.columns)): |
| | corr_val = corr_matrix.iloc[i, j] |
| | if abs(corr_val) > Config.CORRELATION_THRESHOLD: |
| | high_corr_pairs.append((corr_matrix.columns[i], corr_matrix.columns[j], corr_val)) |
| | |
| | for pair in sorted(high_corr_pairs, key=lambda x: abs(x[2]), reverse=True)[:10]: |
| | print(f" {pair[0]} <-> {pair[1]}: {pair[2]:.4f}") |
| | |
| | |
| | if Config.CREATE_VISUALIZATIONS: |
| | print(f"\n11. Generate visualization...") |
| | visualize_correlation_and_ic(corr_matrix, ic_values, feature_groups, Config.SAVE_RESULTS) |
| | |
| | |
| | if Config.SAVE_RESULTS: |
| | print(f"\n12. Generate feature analysis report...") |
| | create_feature_summary_report(corr_matrix, ic_values, feature_groups, selected_features) |
| | |
| | print(f"\n" + "=" * 60) |
| | print("Feature correlation analysis and factor aggregation completed!") |
| | print("=" * 60) |