File size: 36,856 Bytes
c687548
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
import sys
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from sklearn.linear_model import (
    HuberRegressor, RANSACRegressor, TheilSenRegressor,
    Lasso, ElasticNet, Ridge
)
from sklearn.cross_decomposition import PLSRegression
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.ensemble import RandomForestRegressor
from scipy.stats import pearsonr
import warnings
import torch
import matplotlib.pyplot as plt
import seaborn as sns
from concurrent.futures import ThreadPoolExecutor, as_completed
from itertools import combinations
import time
warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

# ===== Configuration =====
class Config:
    TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/train.parquet"
    TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/test.parquet"
    # SUBMISSION_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/sample_submission_zmj.csv"
    
    # Original features plus additional market features
    # FEATURES = [
    #     "X863", "X856", "X598", "X862", "X385", "X852", "X603", "X860", "X674",
    #     "X415", "X345", "X855", "X174", "X302", "X178", "X168", "X612",
    #     "buy_qty", "sell_qty", "volume", "X888", "X421", "X333",
    #     "bid_qty", "ask_qty"
    # ]
    
    LABEL_COLUMN = "label"
    N_FOLDS = 3
    RANDOM_STATE = 42
    
    # 相关系数分析配置
    CORRELATION_THRESHOLD = 0.8  # 相关系数阈值,大于此值的因子将被聚合
    IC_WEIGHT_METHOD = "abs"     # IC权重计算方法: "abs", "square", "rank"
    SAVE_RESULTS = True          # 是否保存分析结果
    CREATE_VISUALIZATIONS = True # 是否创建可视化图表
    REMOVE_ORIGINAL_FEATURES = True  # 是否删除原始特征
    
    # 性能优化配置
    MAX_WORKERS = 4              # 并行计算的工作线程数
    USE_SAMPLING = False          # 大数据集是否使用采样计算
    SAMPLE_SIZE = 10000          # 采样大小
    USE_GPU = True              # 是否使用GPU加速(需要PyTorch)
    USE_MATRIX_MULTIPLICATION = True  # 是否使用矩阵乘法优化

# ===== Feature Engineering =====
def feature_engineering(df):
    """Original features plus new robust features"""
    # Original features
    df['volume_weighted_sell'] = df['sell_qty'] * df['volume']
    df['buy_sell_ratio'] = df['buy_qty'] / (df['sell_qty'] + 1e-8)
    df['selling_pressure'] = df['sell_qty'] / (df['volume'] + 1e-8)
    df['effective_spread_proxy'] = np.abs(df['buy_qty'] - df['sell_qty']) / (df['volume'] + 1e-8)
    
    # New robust features
    df['log_volume'] = np.log1p(df['volume'])
    df['bid_ask_imbalance'] = (df['bid_qty'] - df['ask_qty']) / (df['bid_qty'] + df['ask_qty'] + 1e-8)
    df['order_flow_imbalance'] = (df['buy_qty'] - df['sell_qty']) / (df['buy_qty'] + df['sell_qty'] + 1e-8)
    df['liquidity_ratio'] = (df['bid_qty'] + df['ask_qty']) / (df['volume'] + 1e-8)
    
    # Handle infinities and NaN
    df = df.replace([np.inf, -np.inf], np.nan)
    
    # For each column, replace NaN with median for robustness
    for col in df.columns:
        if df[col].isna().any():
            median_val = df[col].median()
            df[col] = df[col].fillna(median_val if not pd.isna(median_val) else 0)
    
    return df

train_df = pd.read_parquet(Config.TRAIN_PATH)
test_df = pd.read_parquet(Config.TEST_PATH)

train_df = feature_engineering(train_df)

# ===== 相关系数矩阵计算和因子聚合 =====
def calculate_correlation_matrix_and_ic(train_df, features, label_col='label', correlation_threshold=0.8, max_workers=4, test_df=None):
    """
    计算特征间的相关系数矩阵和每个特征与标签的IC值(优化版本)
    并对IC为负的特征先取反,使所有IC为正
    """
    # 确保特征列存在
    available_features = [f for f in features if f in train_df.columns]
    print(f"可用特征数量: {len(available_features)}")

    # 1. 先计算IC值
    ic_values = fast_ic_calculation(train_df, available_features, label_col, max_workers=max_workers)
    print("初始IC统计:")
    print(ic_values.describe())

    # 2. 对IC为负的特征取反
    neg_ic_features = ic_values[ic_values < 0].index.tolist()
    print(f"IC为负的特征数量: {len(neg_ic_features)}")
    for f in neg_ic_features:
        train_df[f] = -train_df[f]
        if test_df is not None and f in test_df.columns:
            test_df[f] = -test_df[f]

    # 3. 重新计算IC值(此时全为正)
    ic_values = fast_ic_calculation(train_df, available_features, label_col, max_workers=max_workers)
    print("IC取正后统计:")
    print(ic_values.describe())

    # 4. 计算相关系数矩阵
    corr_matrix = fast_correlation_matrix(train_df, available_features, method='pearson', max_workers=max_workers)

    # 5. 聚合
    feature_groups = aggregate_correlated_features(
        corr_matrix, ic_values, correlation_threshold
    )

    return corr_matrix, ic_values, feature_groups, train_df, test_df

def aggregate_correlated_features(corr_matrix, ic_values, threshold=0.8):
    """
    基于相关系数和IC值对高相关因子进行聚合
    
    Parameters:
    -----------
    corr_matrix : pd.DataFrame
        相关系数矩阵
    ic_values : pd.Series
        每个特征的IC值
    threshold : float
        相关系数阈值
        
    Returns:
    --------
    feature_groups : list
        聚合后的特征组,每个组包含特征名和聚合权重
    """
    
    features = list(corr_matrix.columns)
    used_features = set()
    feature_groups = []
    
    # 按IC值绝对值排序,优先选择IC值高的特征作为代表
    ic_abs = ic_values.abs().sort_values(ascending=False)
    
    for feature in ic_abs.index:
        if feature in used_features:
            continue
            
        # 找到与当前特征高度相关的其他特征
        correlated_features = []
        for other_feature in features:
            if other_feature != feature and other_feature not in used_features:
                corr_value = abs(corr_matrix.loc[feature, other_feature])
                if corr_value > threshold:
                    correlated_features.append(other_feature)
        
        if correlated_features:
            # 创建特征组,包含主特征和相关特征
            group_features = [feature] + correlated_features
            used_features.update(group_features)
            
            # 计算基于IC值的权重
            group_ic_values = ic_values[group_features]
            weights = calculate_ic_weighted_weights(group_ic_values, Config.IC_WEIGHT_METHOD)
            
            feature_groups.append({
                'features': group_features,
                'weights': weights,
                'representative': feature,
                'group_ic': group_ic_values.mean()
            })
            
            print(f"特征组 {len(feature_groups)}: {feature} (IC={ic_values[feature]:.4f}) "
                  f"与 {len(correlated_features)} 个特征聚合")
            print(f"组{len(feature_groups) - 1} 权重: {weights}, 特征: {group_features}")
            # if len(features) == 1:
            #     print(f"单特征组: {features[0]}, 权重: {weights[0]}, 非零样本数: {(df[features[0]] != 0).sum()}")
        else:
            # 单独的特征
            used_features.add(feature)
            feature_groups.append({
                'features': [feature],
                'weights': [1.0],
                'representative': feature,
                'group_ic': ic_values[feature]
            })
    
    return feature_groups

def calculate_ic_weighted_weights(ic_values, method="abs"):
    """
    基于IC值计算特征权重
    
    Parameters:
    -----------
    ic_values : pd.Series
        特征IC值
    method : str
        权重计算方法: "abs", "square", "rank"
        
    Returns:
    --------
    weights : list
        归一化的权重列表
    """
    if method == "abs":
        # 使用IC值的绝对值作为权重基础
        weights_base = ic_values.abs()
    elif method == "square":
        # 使用IC值的平方作为权重基础
        weights_base = ic_values ** 2
    elif method == "rank":
        # 使用IC值排名作为权重基础
        weights_base = ic_values.abs().rank(ascending=False)
    else:
        raise ValueError(f"不支持的权重计算方法: {method}")
    
    # 避免零权重
    weights_base = weights_base + 1e-8
    
    # 归一化权重
    weights = weights_base / weights_base.sum()
    
    return weights.tolist()

def calculate_optimal_ic_weights(df, features, label_col):
    """
    对于给定特征组,使用最大化IC合成法计算最优权重。
    参数:
        df: pd.DataFrame,包含特征和标签
        features: list,特征名
        label_col: str,标签名
    返回:
        weights: list,归一化权重
    """
    if len(features) == 1:
        return [1.0]
    Z = df[features].values
    Z = (Z - Z.mean(axis=0)) / (Z.std(axis=0) + 1e-8)  # 标准化
    R = df[label_col].values.reshape(-1, 1)
    # 协方差矩阵
    cov_ZZ = np.cov(Z, rowvar=False)
    cov_ZR = np.cov(Z, R, rowvar=False)[:-1, -1]
    # 防止协方差矩阵奇异,加微小正则项
    cov_ZZ += np.eye(cov_ZZ.shape[0]) * 1e-6
    # 求解最优权重
    try:
        w = np.linalg.solve(cov_ZZ, cov_ZR)
    except np.linalg.LinAlgError:
        w = np.linalg.lstsq(cov_ZZ, cov_ZR, rcond=None)[0]
    # 归一化(L1范数)
    if np.sum(np.abs(w)) > 1e-8:
        w = w / np.sum(np.abs(w))
    else:
        w = np.ones_like(w) / len(w)
    return w.tolist()

def create_aggregated_features(df, feature_groups, remove_original=True, label_col=None):
    """
    基于特征组创建聚合特征(只用最大化IC合成法计算权重,并输出与IC加权对比)
    """
    aggregated_df = df.copy()
    aggregated_original_features = set()
    if label_col is None:
        label_col = Config.LABEL_COLUMN
    for i, group in enumerate(feature_groups):
        features = group['features']
        representative = group['representative']
        # 检查所有特征是否都存在
        missing_features = [f for f in features if f not in df.columns]
        if missing_features:
            print(f"警告: 特征组 {i} 中缺少特征: {missing_features}")
            continue
        # 最大化IC合成法权重
        weights = calculate_optimal_ic_weights(df, features, label_col)
        # IC加权(abs),每个特征和标签单独算皮尔逊相关系数
        ic_vec = []
        for f in features:
            try:
                ic = np.corrcoef(df[f], df[label_col])[0, 1]
            except Exception:
                ic = 0.0
            ic_vec.append(ic)
        ic_weights = calculate_ic_weighted_weights(pd.Series(ic_vec, index=features), method='abs')
        print(f"组{i} features: {features}")
        print(f"  最大化IC权重: {weights}")
        print(f"  IC加权权重: {ic_weights}")
        if len(features) == 1:
            agg_feature = df[features[0]] * weights[0]
        else:
            agg_feature = sum(df[features[j]] * weights[j] for j in range(len(features)))
        agg_feature_name = f"agg_group_{i}_{representative}"
        aggregated_df[agg_feature_name] = agg_feature
        print(f"创建聚合特征: {agg_feature_name} (包含 {len(features)} 个原始特征)")
        aggregated_original_features.update(features)
    # 删除原始特征
    if remove_original:
        features_to_remove = [f for f in aggregated_original_features if f in aggregated_df.columns]
        if features_to_remove:
            aggregated_df = aggregated_df.drop(columns=features_to_remove)
            print(f"删除了 {len(features_to_remove)} 个原始特征: {features_to_remove}")
        else:
            print("没有需要删除的原始特征")
    return aggregated_df

# ===== 可视化函数 =====
def visualize_correlation_and_ic(corr_matrix, ic_values, feature_groups, save_plots=True):
    """
    Visualize correlation matrix, IC distribution, and feature aggregation results (English version)
    """
    fig, axes = plt.subplots(2, 2, figsize=(20, 16))
    fig.suptitle('Feature Correlation Analysis and IC Distribution', fontsize=16, fontweight='bold')
    
    # 1. Correlation matrix heatmap
    mask = np.triu(np.ones_like(corr_matrix, dtype=bool))
    sns.heatmap(corr_matrix, mask=mask, annot=False, cmap='RdBu_r', center=0,
                square=True, linewidths=0.5, cbar_kws={"shrink": .8}, ax=axes[0,0])
    axes[0,0].set_title('Feature Correlation Matrix', fontsize=14, fontweight='bold')
    
    # 2. IC distribution histogram
    axes[0,1].hist(ic_values.values, bins=30, alpha=0.7, color='skyblue', edgecolor='black')
    axes[0,1].axvline(ic_values.mean(), color='red', linestyle='--', 
                      label=f'Mean: {ic_values.mean():.4f}')
    axes[0,1].axvline(0, color='green', linestyle='-', alpha=0.5, label='IC=0')
    axes[0,1].set_xlabel('IC Value')
    axes[0,1].set_ylabel('Frequency')
    axes[0,1].set_title('Feature IC Value Distribution', fontsize=14, fontweight='bold')
    axes[0,1].legend()
    axes[0,1].grid(True, alpha=0.3)
    
    # 3. Top 20 highest IC features
    top_ic_features = ic_values.abs().sort_values(ascending=False).head(20)
    colors = ['red' if ic_values[feature] < 0 else 'blue' for feature in top_ic_features.index]
    axes[1,0].barh(range(len(top_ic_features)), top_ic_features.values, color=colors, alpha=0.7)
    axes[1,0].set_yticks(range(len(top_ic_features)))
    axes[1,0].set_yticklabels(top_ic_features.index, fontsize=8)
    axes[1,0].set_xlabel('|IC Value|')
    axes[1,0].set_title('Top 20 |IC Value| Features', fontsize=14, fontweight='bold')
    axes[1,0].grid(True, alpha=0.3)
    
    # 4. Feature aggregation results
    group_sizes = [len(group['features']) for group in feature_groups]
    group_ics = [group['group_ic'] for group in feature_groups]
    single_features = [i for i, size in enumerate(group_sizes) if size == 1]
    grouped_features = [i for i, size in enumerate(group_sizes) if size > 1]
    if single_features:
        axes[1,1].scatter([group_sizes[i] for i in single_features], 
                         [group_ics[i] for i in single_features], 
                         alpha=0.6, label='Single Feature', s=50)
    if grouped_features:
        axes[1,1].scatter([group_sizes[i] for i in grouped_features], 
                         [group_ics[i] for i in grouped_features], 
                         alpha=0.8, label='Aggregated Feature', s=100, color='red')
    axes[1,1].set_xlabel('Feature Group Size')
    axes[1,1].set_ylabel('Group Mean IC Value')
    axes[1,1].set_title('Feature Aggregation Result', fontsize=14, fontweight='bold')
    axes[1,1].legend()
    axes[1,1].grid(True, alpha=0.3)
    plt.tight_layout()
    if save_plots:
        plt.savefig('./max_IC_mixed/feature_analysis.png', dpi=300, bbox_inches='tight')
        print("Saved feature analysis image: feature_analysis.png")
    plt.show()

def create_feature_summary_report(corr_matrix, ic_values, feature_groups):
    """
    创建特征分析报告
    
    Parameters:
    -----------
    corr_matrix : pd.DataFrame
        相关系数矩阵
    ic_values : pd.Series
        特征IC值
    feature_groups : list
        特征组列表
    """
    
    report = []
    report.append("=" * 60)
    report.append("Feature Analysis Report")
    report.append("=" * 60)
    
    # 基本统计
    report.append(f"\n1. Basic Statistical Information:")
    report.append(f"    Total Feature Count: {len(ic_values)}")
    report.append(f"    Average IC Value: {ic_values.mean():.4f}")
    report.append(f"   IC Value Standard Deviation: {ic_values.std():.4f}")
    report.append(f"    Maximum IC Value: {ic_values.max():.4f}")
    report.append(f"    Minimum IC Value: {ic_values.min():.4f}")
    report.append(f"    Positive IC Value Feature Count: {(ic_values > 0).sum()}")
    report.append(f"    Negative IC Value Feature Count: {(ic_values < 0).sum()}")
    
    # 高相关性分析
    high_corr_count = 0
    for i in range(len(corr_matrix.columns)):
        for j in range(i+1, len(corr_matrix.columns)):
            if abs(corr_matrix.iloc[i, j]) > Config.CORRELATION_THRESHOLD:
                high_corr_count += 1
    
    report.append(f"\n2. High Correlation Analysis (|Correlation| > {Config.CORRELATION_THRESHOLD}):")
    report.append(f"    High Correlation Feature Pair Count: {high_corr_count}")
    report.append(f"    Correlation Matrix Density: {high_corr_count / (len(corr_matrix) * (len(corr_matrix) - 1) / 2):.4f}")
    
    # 特征聚合结果
    report.append(f"\n3. Feature Aggregation Results:")
    report.append(f"    Feature Group Count: {len(feature_groups)}")
    
    single_features = [g for g in feature_groups if len(g['features']) == 1]
    grouped_features = [g for g in feature_groups if len(g['features']) > 1]
    
    report.append(f"    Single Feature Group Count: {len(single_features)}")
    report.append(f"    Aggregated Feature Group Count: {len(grouped_features)}")
    
    if grouped_features:
        avg_group_size = np.mean([len(g['features']) for g in grouped_features])
        report.append(f"    Average Aggregated Group Size: {avg_group_size:.2f}")
    
    # 前10个最高IC值特征
    report.append(f"\n4. Top 10 Highest IC Value Features:")
    top_ic = ic_values.abs().sort_values(ascending=False).head(10)
    for i, (feature, ic_abs) in enumerate(top_ic.items(), 1):
        ic_original = ic_values[feature]
        report.append(f"   {i:2d}. {feature:20s} |IC|={ic_abs:.4f} (IC={ic_original:.4f})")
    
    # 特征聚合详情
    report.append(f"\n5. Feature Aggregation Details:")
    for i, group in enumerate(grouped_features, 1):
        report.append(f"    Group {i}: {group['representative']} (IC={group['group_ic']:.4f})")
        report.append(f"        Contains Features: {', '.join(group['features'])}")
        report.append(f"        Weights: {[f'{w:.3f}' for w in group['weights']]}")
    
    # 保存报告
    with open('./max_IC_mixed/feature_analysis_report.txt', 'w', encoding='utf-8') as f:
        f.write('\n'.join(report))
    
    print('\n'.join(report))
    print(f"\nReport Saved to: feature_analysis_report.txt")

# ===== 优化的相关系数计算 =====
def fast_correlation_matrix(df, features, method='pearson', max_workers=4):
    """
    快速计算相关系数矩阵,支持并行计算和多种优化策略
    
    Parameters:
    -----------
    df : pd.DataFrame
        数据框
    features : list
        特征列表
    method : str
        相关系数计算方法: 'pearson', 'spearman'
    max_workers : int
        并行计算的工作线程数
        
    Returns:
    --------
    corr_matrix : pd.DataFrame
        相关系数矩阵
    """
    
    print(f"开始计算相关系数矩阵 (特征数量: {len(features)}, 方法: {method})")
    start_time = time.time()
    
    # 方法1: 使用矩阵乘法优化(最快)
    if method == 'pearson' and Config.USE_MATRIX_MULTIPLICATION:
        if Config.USE_GPU and torch.cuda.is_available():
            corr_matrix = torch_correlation(df, features, use_gpu=True)
            print(f"GPU矩阵乘法优化耗时: {time.time() - start_time:.2f}秒")
        else:
            corr_matrix = matrix_correlation(df, features)
            print(f"CPU矩阵乘法优化耗时: {time.time() - start_time:.2f}秒")
        return corr_matrix
    
    # 方法2: 对于大数据集,使用采样计算
    if Config.USE_SAMPLING and len(df) > Config.SAMPLE_SIZE:
        print(f"数据量较大,使用采样计算 (采样大小: {Config.SAMPLE_SIZE})...")
        sample_size = min(Config.SAMPLE_SIZE, len(df))
        sample_df = df.sample(n=sample_size, random_state=42)
        feature_data = sample_df[features]
        corr_matrix = feature_data.corr(method=method)
        print(f"采样计算耗时: {time.time() - start_time:.2f}秒")
        return corr_matrix
    
    # 方法3: 并行计算(适用于中等规模数据)
    else:
        print(f"使用并行计算 (线程数: {max_workers})...")
        return parallel_correlation_matrix(df, features, method, max_workers)

def matrix_correlation(df, features):
    """
    使用矩阵乘法计算相关系数矩阵 (A * A^T 方法)
    
    Parameters:
    -----------
    df : pd.DataFrame
        数据框
    features : list
        特征列表
        
    Returns:
    --------
    corr_matrix : pd.DataFrame
        相关系数矩阵
    """
    
    # 提取特征数据
    feature_data = df[features].values
    
    # 标准化数据 (z-score)
    feature_data_std = (feature_data - feature_data.mean(axis=0)) / feature_data.std(axis=0)
    
    # 处理NaN值
    feature_data_std = np.nan_to_num(feature_data_std, nan=0.0)
    
    # 计算相关系数矩阵: (A * A^T) / (n-1)
    n = feature_data_std.shape[0]
    corr_matrix_np = np.dot(feature_data_std.T, feature_data_std) / (n - 1)
    
    # 确保对角线为1
    np.fill_diagonal(corr_matrix_np, 1.0)
    
    # 转换为DataFrame
    corr_matrix = pd.DataFrame(corr_matrix_np, index=features, columns=features)
    
    return corr_matrix

def torch_correlation(df, features, use_gpu=False):
    """
    使用PyTorch张量计算相关系数矩阵(可选GPU加速)
    
    Parameters:
    -----------
    df : pd.DataFrame
        数据框
    features : list
        特征列表
    use_gpu : bool
        是否使用GPU加速
        
    Returns:
    --------
    corr_matrix : pd.DataFrame
        相关系数矩阵
    """
    
    # 提取特征数据
    feature_data = df[features].values
    
    # 转换为PyTorch张量
    if use_gpu and torch.cuda.is_available():
        device = torch.device('cuda')
        print("使用GPU加速计算...")
    else:
        device = torch.device('cpu')
        print("使用CPU计算...")
    
    # 转换为张量并移动到设备
    X = torch.tensor(feature_data, dtype=torch.float32, device=device)
    
    # 标准化数据
    X_mean = torch.mean(X, dim=0, keepdim=True)
    X_std = torch.std(X, dim=0, keepdim=True, unbiased=True)
    X_std = torch.where(X_std == 0, torch.ones_like(X_std), X_std)  # 避免除零
    X_norm = (X - X_mean) / X_std
    
    # 处理NaN值
    X_norm = torch.nan_to_num(X_norm, nan=0.0)
    
    # 计算相关系数矩阵: (X_norm^T * X_norm) / (n-1)
    n = X_norm.shape[0]
    corr_matrix_tensor = torch.mm(X_norm.T, X_norm) / (n - 1)
    
    # 确保对角线为1
    torch.diagonal(corr_matrix_tensor)[:] = 1.0
    
    # 移回CPU并转换为numpy
    corr_matrix_np = corr_matrix_tensor.cpu().numpy()
    
    # 转换为DataFrame
    corr_matrix = pd.DataFrame(corr_matrix_np, index=features, columns=features)
    
    return corr_matrix

def parallel_correlation_matrix(df, features, method='pearson', max_workers=4):
    """
    并行计算相关系数矩阵
    
    Parameters:
    -----------
    df : pd.DataFrame
        数据框
    features : list
        特征列表
    method : str
        相关系数计算方法
    max_workers : int
        并行计算的工作线程数
        
    Returns:
    --------
    corr_matrix : pd.DataFrame
        相关系数矩阵
    """
    
    def calculate_correlation_pair(pair):
        """计算一对特征的相关系数"""
        feat1, feat2 = pair
        if method == 'pearson':
            corr, _ = pearsonr(df[feat1], df[feat2])
        else:  # spearman
            corr = df[feat1].corr(df[feat2], method='spearman')
        return (feat1, feat2, corr)
    
    # 生成所有特征对
    feature_pairs = list(combinations(features, 2))
    print(f"需要计算 {len(feature_pairs)} 个特征对的相关系数")
    
    # 并行计算
    results = {}
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        future_to_pair = {executor.submit(calculate_correlation_pair, pair): pair for pair in feature_pairs}
        
        completed = 0
        for future in as_completed(future_to_pair):
            feat1, feat2, corr = future.result()
            results[(feat1, feat2)] = corr
            results[(feat2, feat1)] = corr  # 对称矩阵
            completed += 1
            
            if completed % 100 == 0:
                print(f"已完成: {completed}/{len(feature_pairs)} ({completed/len(feature_pairs)*100:.1f}%)")
    
    # 构建相关系数矩阵
    corr_matrix = pd.DataFrame(index=features, columns=features)
    for feat1 in features:
        for feat2 in features:
            if feat1 == feat2:
                corr_matrix.loc[feat1, feat2] = 1.0
            else:
                corr_matrix.loc[feat1, feat2] = results.get((feat1, feat2), 0.0)
    
    return corr_matrix

def fast_ic_calculation(df, features, label_col, max_workers=4):
    """
    快速计算特征IC值,支持并行计算
    
    Parameters:
    -----------
    df : pd.DataFrame
        数据框
    features : list
        特征列表
    label_col : str
        标签列名
    max_workers : int
        并行计算的工作线程数
        
    Returns:
    --------
    ic_values : pd.Series
        特征IC值
    """
    
    print(f"开始计算特征IC值 (特征数量: {len(features)})")
    start_time = time.time()
    
    def calculate_ic(feature):
        """计算单个特征的IC值"""
        try:
            ic, _ = pearsonr(df[feature], df[label_col])
            return feature, ic
        except:
            return feature, 0.0
    
    # 并行计算IC值
    ic_dict = {}
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        future_to_feature = {executor.submit(calculate_ic, feature): feature for feature in features}
        
        completed = 0
        for future in as_completed(future_to_feature):
            feature, ic = future.result()
            ic_dict[feature] = ic
            completed += 1
            
            if completed % 50 == 0:
                print(f"IC计算进度: {completed}/{len(features)} ({completed/len(features)*100:.1f}%)")
    
    ic_values = pd.Series(ic_dict)
    print(f"IC值计算耗时: {time.time() - start_time:.2f}秒")
    
    return ic_values

def benchmark_correlation_methods(df, features, sample_size=1000):
    """
    比较不同相关系数计算方法的性能
    
    Parameters:
    -----------
    df : pd.DataFrame
        数据框
    features : list
        特征列表
    sample_size : int
        用于测试的样本大小
        
    Returns:
    --------
    results : dict
        各方法的性能结果
    """
    
    print("=" * 60)
    print("相关系数计算方法性能比较")
    print("=" * 60)
    
    # 采样数据用于测试
    if len(df) > sample_size:
        test_df = df.sample(n=sample_size, random_state=42)
    else:
        test_df = df
    
    test_features = features[:min(50, len(features))]  # 限制特征数量用于测试
    print(f"测试数据: {len(test_df)} 行, {len(test_features)} 个特征")
    
    results = {}
    
    # 方法1: pandas corr()
    print("\n1. 测试 pandas corr() 方法...")
    start_time = time.time()
    try:
        feature_data = test_df[test_features]
        corr_pandas = feature_data.corr()
        pandas_time = time.time() - start_time
        results['pandas_corr'] = {'time': pandas_time, 'success': True}
        print(f"  耗时: {pandas_time:.3f}秒")
    except Exception as e:
        results['pandas_corr'] = {'time': float('inf'), 'success': False, 'error': str(e)}
        print(f"  失败: {e}")
    
    # 方法2: 矩阵乘法 (CPU)
    print("\n2. 测试矩阵乘法 (CPU)...")
    start_time = time.time()
    try:
        corr_matrix = matrix_correlation(test_df, test_features)
        matrix_time = time.time() - start_time
        results['matrix_cpu'] = {'time': matrix_time, 'success': True}
        print(f"  耗时: {matrix_time:.3f}秒")
    except Exception as e:
        results['matrix_cpu'] = {'time': float('inf'), 'success': False, 'error': str(e)}
        print(f"  失败: {e}")
    
    # 方法3: PyTorch (CPU)
    print("\n3. 测试 PyTorch (CPU)...")
    start_time = time.time()
    try:
        corr_torch_cpu = torch_correlation(test_df, test_features, use_gpu=False)
        torch_cpu_time = time.time() - start_time
        results['torch_cpu'] = {'time': torch_cpu_time, 'success': True}
        print(f"  耗时: {torch_cpu_time:.3f}秒")
    except Exception as e:
        results['torch_cpu'] = {'time': float('inf'), 'success': False, 'error': str(e)}
        print(f"  失败: {e}")
    
    # 方法4: PyTorch (GPU)
    if torch.cuda.is_available():
        print("\n4. 测试 PyTorch (GPU)...")
        start_time = time.time()
        try:
            corr_torch_gpu = torch_correlation(test_df, test_features, use_gpu=True)
            torch_gpu_time = time.time() - start_time
            results['torch_gpu'] = {'time': torch_gpu_time, 'success': True}
            print(f"  耗时: {torch_gpu_time:.3f}秒")
        except Exception as e:
            results['torch_gpu'] = {'time': float('inf'), 'success': False, 'error': str(e)}
            print(f"  失败: {e}")
    else:
        print("\n4. GPU不可用,跳过GPU测试")
        results['torch_gpu'] = {'time': float('inf'), 'success': False, 'error': 'GPU not available'}
    
    # 方法5: 并行计算
    print("\n5. 测试并行计算...")
    start_time = time.time()
    try:
        corr_parallel = parallel_correlation_matrix(test_df, test_features, method='pearson', max_workers=4)
        parallel_time = time.time() - start_time
        results['parallel'] = {'time': parallel_time, 'success': True}
        print(f"  耗时: {parallel_time:.3f}秒")
    except Exception as e:
        results['parallel'] = {'time': float('inf'), 'success': False, 'error': str(e)}
        print(f"  失败: {e}")
    
    # 显示比较结果
    print(f"\n=== 性能比较结果 ===")
    successful_methods = {k: v for k, v in results.items() if v['success']}
    
    if successful_methods:
        fastest_method = min(successful_methods.items(), key=lambda x: x[1]['time'])
        print(f"最快方法: {fastest_method[0]} ({fastest_method[1]['time']:.3f}秒)")
        
        print(f"\n详细结果:")
        for method, result in sorted(successful_methods.items(), key=lambda x: x[1]['time']):
            speedup = fastest_method[1]['time'] / result['time']
            print(f"  {method:12s}: {result['time']:6.3f}秒 (相对速度: {speedup:.2f}x)")
    
    # 显示失败的方法
    failed_methods = {k: v for k, v in results.items() if not v['success']}
    if failed_methods:
        print(f"\n失败的方法:")
        for method, result in failed_methods.items():
            print(f"  {method}: {result.get('error', 'Unknown error')}")
    
    return results

if __name__ == "__main__":
    # ===== 主执行流程 =====
    
    # 检查是否运行性能测试
    if len(sys.argv) > 1 and sys.argv[1] == '--benchmark':
        print("=" * 60)
        print("运行相关系数计算方法性能测试")
        print("=" * 60)
        
        # 加载数据
        train_df = pd.read_parquet(Config.TRAIN_PATH)
        all_features = [col for col in train_df.columns if col != Config.LABEL_COLUMN]
        
        # 运行性能测试
        benchmark_correlation_methods(train_df, all_features)
        sys.exit(0)
    
    print("=" * 60)
    print("开始特征相关性分析和因子聚合")
    print("=" * 60)
    
    # 1. 加载数据
    print("\n1. 加载数据...")
    train_df = pd.read_parquet(Config.TRAIN_PATH)
    test_df = pd.read_parquet(Config.TEST_PATH)
    print(f"训练数据形状: {train_df.shape}")
    print(f"测试数据形状: {test_df.shape}")
    
    # 2. 特征工程
    print("\n2. 执行特征工程...")
    train_df = feature_engineering(train_df)
    test_df = feature_engineering(test_df)
    print(f"特征工程后训练数据形状: {train_df.shape}")
    print(f"特征工程后测试数据形状: {test_df.shape}")
    
    # 2.5 剔除恒定特征
    print("\n2.5 Remove constant features...")
    feature_cols = [col for col in train_df.columns if col != Config.LABEL_COLUMN]
    constant_features = [col for col in feature_cols if train_df[col].std() == 0]
    if constant_features:
        print(f"Remove {len(constant_features)} constant features: {constant_features}")
        train_df = train_df.drop(columns=constant_features)
        test_df = test_df.drop(columns=[col for col in constant_features if col in test_df.columns])
    else:
        print("No constant features found.")
    
    # 3. 获取所有特征
    all_features = [col for col in train_df.columns if col != Config.LABEL_COLUMN]
    print(f"\n特征数量: {len(all_features)}")
    
    # 4. 计算相关系数矩阵和IC值,并自动IC取正
    print(f"\n3. 计算相关系数矩阵 (阈值: {Config.CORRELATION_THRESHOLD})...")
    corr_matrix, ic_values, feature_groups, train_df, test_df = calculate_correlation_matrix_and_ic(
        train_df, all_features, Config.LABEL_COLUMN, Config.CORRELATION_THRESHOLD, Config.MAX_WORKERS, test_df
    )
    
    # 5. 显示基本统计
    print(f"\n4. 基本统计信息:")
    print(f"   相关系数矩阵形状: {corr_matrix.shape}")
    print(f"   平均IC值: {ic_values.mean():.4f}")
    print(f"   最大IC值: {ic_values.max():.4f}")
    print(f"   最小IC值: {ic_values.min():.4f}")
    print(f"   IC值标准差: {ic_values.std():.4f}")
    
    # 6. 显示特征聚合结果
    print(f"\n5. 特征聚合结果:")
    print(f"   特征组数量: {len(feature_groups)}")
    
    single_features = [g for g in feature_groups if len(g['features']) == 1]
    grouped_features = [g for g in feature_groups if len(g['features']) > 1]
    
    print(f"   单独特征组: {len(single_features)}")
    print(f"   聚合特征组: {len(grouped_features)}")
    
    # 7. 创建聚合特征
    print(f"\n6. 创建聚合特征...")
    train_df_aggregated = create_aggregated_features(train_df, feature_groups, Config.REMOVE_ORIGINAL_FEATURES)
    test_df_aggregated = create_aggregated_features(test_df, feature_groups, Config.REMOVE_ORIGINAL_FEATURES)
    
    print(f"   聚合前训练特征数量: {len(all_features)}")
    print(f"   聚合后训练特征数量: {len([col for col in train_df_aggregated.columns if col != Config.LABEL_COLUMN])}")
    print(f"   聚合后测试特征数量: {len([col for col in test_df_aggregated.columns])}")
    
    # 8. 保存结果
    if Config.SAVE_RESULTS:
        print(f"\n7. 保存结果...")
        corr_matrix.to_csv('./max_IC_mixed/correlation_matrix.csv')
        ic_values.to_csv('./max_IC_mixed/ic_values.csv')
        train_df_aggregated.to_parquet('./max_IC_mixed/train_aggregated.parquet')
        test_df_aggregated.to_parquet('./max_IC_mixed/test_aggregated.parquet')
        print("   相关系数矩阵已保存: correlation_matrix.csv")
        print("   特征IC值已保存: ic_values.csv")
        print("   聚合后训练数据已保存: train_aggregated.parquet")
        print("   聚合后测试数据已保存: test_aggregated.parquet")
    
    # 9. 显示高IC值特征
    print(f"\n8. Top 10 highest IC features:")
    print(ic_values.abs().sort_values(ascending=False).head(10))
    
    # 10. 显示高相关性特征对
    print(f"\n9. Highly correlated feature pairs (|correlation| > {Config.CORRELATION_THRESHOLD}):")
    high_corr_pairs = []
    for i in range(len(corr_matrix.columns)):
        for j in range(i+1, len(corr_matrix.columns)):
            corr_val = corr_matrix.iloc[i, j]
            if abs(corr_val) > Config.CORRELATION_THRESHOLD:
                high_corr_pairs.append((corr_matrix.columns[i], corr_matrix.columns[j], corr_val))
    
    for pair in sorted(high_corr_pairs, key=lambda x: abs(x[2]), reverse=True)[:10]:
        print(f"   {pair[0]} <-> {pair[1]}: {pair[2]:.4f}")
    
    # 11. 生成可视化
    if Config.CREATE_VISUALIZATIONS:
        print(f"\n10. Generate visualization...")
        visualize_correlation_and_ic(corr_matrix, ic_values, feature_groups, Config.SAVE_RESULTS)
    
    # 12. 生成报告
    if Config.SAVE_RESULTS:
        print(f"\n11. Generate feature analysis report...")
        create_feature_summary_report(corr_matrix, ic_values, feature_groups)
    
    print(f"\n" + "=" * 60)
    print("Feature correlation analysis and factor aggregation completed!")
    print("=" * 60)