ChanceuxMJ commited on
Commit
c687548
·
verified ·
1 Parent(s): 60a52b1

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +56 -0
  2. DRW/DRW-Crypto/.gitignore +10 -0
  3. DRW/DRW-Crypto/.python-version +1 -0
  4. DRW/DRW-Crypto/HyperparameterOptimizer.py +351 -0
  5. DRW/DRW-Crypto/README.md +37 -0
  6. DRW/DRW-Crypto/Settings.py +178 -0
  7. DRW/DRW-Crypto/Utils.py +655 -0
  8. DRW/DRW-Crypto/__pycache__/Settings.cpython-311.pyc +0 -0
  9. DRW/DRW-Crypto/__pycache__/Utils.cpython-311.pyc +0 -0
  10. DRW/DRW-Crypto/__pycache__/inplemental.cpython-311.pyc +0 -0
  11. DRW/DRW-Crypto/feature_engineering.ipynb +0 -0
  12. DRW/DRW-Crypto/inplemental.py +156 -0
  13. DRW/DRW-Crypto/main.py +89 -0
  14. DRW/DRW-Crypto/optimize_params.py +143 -0
  15. DRW/DRW-Crypto/pyproject.toml +28 -0
  16. DRW/DRW-Crypto/sub-sample-vs-super-sample-noisy-rows.ipynb +1676 -0
  17. DRW/DRW-Crypto/uv.lock +0 -0
  18. LYY/baseline1/pipeline1.py +368 -0
  19. LYY/baseline1/submission_regularized_ensemble.csv +3 -0
  20. LYY/baseline1/submission_robust_ensemble.csv +3 -0
  21. LYY/baseline1/submission_simple_ensemble.csv +3 -0
  22. LYY/baseline1/submission_weighted_ensemble.csv +3 -0
  23. LYY/baseline1/submission_xgb_baseline.csv +3 -0
  24. LYY/pipeline.py +379 -0
  25. LYY/submission_regularized_ensemble.csv +3 -0
  26. LYY/submission_robust_ensemble.csv +3 -0
  27. LYY/submission_simple_ensemble.csv +3 -0
  28. LYY/submission_weighted_ensemble.csv +3 -0
  29. LYY/submission_xgb_baseline.csv +3 -0
  30. LYY/xgb_hyper_search.py +61 -0
  31. ZMJ/alpha_mixed.py +950 -0
  32. ZMJ/analyze.py +323 -0
  33. ZMJ/data_processed/correlation_matrix.csv +3 -0
  34. ZMJ/data_processed/feature_analysis.png +3 -0
  35. ZMJ/data_processed/feature_analysis_report.txt +576 -0
  36. ZMJ/data_processed/ic_values.csv +904 -0
  37. ZMJ/data_processed/test_aggregated.parquet +3 -0
  38. ZMJ/data_processed/train_aggregated.parquet +3 -0
  39. ZMJ/data_processed_7_16/alpha_selected.py +154 -0
  40. ZMJ/data_processed_7_16/data_processed.py +92 -0
  41. ZMJ/data_processed_7_16/output.log +73 -0
  42. ZMJ/data_processed_7_16/scaler.pkl +3 -0
  43. ZMJ/data_processed_7_16/submission_xgb_baseline_59_pca.csv +3 -0
  44. ZMJ/data_processed_7_16/test_df.pkl +3 -0
  45. ZMJ/data_processed_7_16/train.py +282 -0
  46. ZMJ/data_processed_7_16/train_df.pkl +3 -0
  47. ZMJ/data_processed_7_16/xgb_prediction-2.csv +3 -0
  48. ZMJ/data_processed_7_16/xgb_prediction.csv +3 -0
  49. ZMJ/data_processed_new/correlation_matrix.csv +0 -0
  50. ZMJ/data_processed_new/feature_analysis.png +3 -0
.gitattributes CHANGED
@@ -57,3 +57,59 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ LYY/baseline1/submission_regularized_ensemble.csv filter=lfs diff=lfs merge=lfs -text
61
+ LYY/baseline1/submission_robust_ensemble.csv filter=lfs diff=lfs merge=lfs -text
62
+ LYY/baseline1/submission_simple_ensemble.csv filter=lfs diff=lfs merge=lfs -text
63
+ LYY/baseline1/submission_weighted_ensemble.csv filter=lfs diff=lfs merge=lfs -text
64
+ LYY/baseline1/submission_xgb_baseline.csv filter=lfs diff=lfs merge=lfs -text
65
+ LYY/submission_regularized_ensemble.csv filter=lfs diff=lfs merge=lfs -text
66
+ LYY/submission_robust_ensemble.csv filter=lfs diff=lfs merge=lfs -text
67
+ LYY/submission_simple_ensemble.csv filter=lfs diff=lfs merge=lfs -text
68
+ LYY/submission_weighted_ensemble.csv filter=lfs diff=lfs merge=lfs -text
69
+ LYY/submission_xgb_baseline.csv filter=lfs diff=lfs merge=lfs -text
70
+ ZMJ/data_processed/correlation_matrix.csv filter=lfs diff=lfs merge=lfs -text
71
+ ZMJ/data_processed_7_16/submission_xgb_baseline_59_pca.csv filter=lfs diff=lfs merge=lfs -text
72
+ ZMJ/data_processed_7_16/xgb_prediction-2.csv filter=lfs diff=lfs merge=lfs -text
73
+ ZMJ/data_processed_7_16/xgb_prediction.csv filter=lfs diff=lfs merge=lfs -text
74
+ ZMJ/data_processed_new/sample_submission.csv filter=lfs diff=lfs merge=lfs -text
75
+ ZMJ/max_IC_mixed/sample_submission.csv filter=lfs diff=lfs merge=lfs -text
76
+ ZMJ/max_IC_mixed/submission_mlp_cv.csv filter=lfs diff=lfs merge=lfs -text
77
+ ZMJ/max_IC_mixed/submission_regularized_ensemble.csv filter=lfs diff=lfs merge=lfs -text
78
+ ZMJ/max_IC_mixed/submission_robust_ensemble.csv filter=lfs diff=lfs merge=lfs -text
79
+ ZMJ/max_IC_mixed/submission_simple_ensemble.csv filter=lfs diff=lfs merge=lfs -text
80
+ ZMJ/max_IC_mixed/submission_tree_ensemble.csv filter=lfs diff=lfs merge=lfs -text
81
+ ZMJ/max_IC_mixed/submission_weighted_ensemble.csv filter=lfs diff=lfs merge=lfs -text
82
+ ZMJ/max_IC_mixed/submission_xgb_baseline.csv filter=lfs diff=lfs merge=lfs -text
83
+ ZMJ/old_data/correlation_matrix.csv filter=lfs diff=lfs merge=lfs -text
84
+ ZMJ/submission_regularized_ensemble.csv filter=lfs diff=lfs merge=lfs -text
85
+ ZMJ/submission_robust_ensemble.csv filter=lfs diff=lfs merge=lfs -text
86
+ ZMJ/submission_simple_ensemble.csv filter=lfs diff=lfs merge=lfs -text
87
+ ZMJ/submission_tree_ensemble.csv filter=lfs diff=lfs merge=lfs -text
88
+ ZMJ/submission_weighted_ensemble.csv filter=lfs diff=lfs merge=lfs -text
89
+ ZMJ/submission_xgb_baseline.csv filter=lfs diff=lfs merge=lfs -text
90
+ ZMJ/threshold_6_29/sample_submission.csv filter=lfs diff=lfs merge=lfs -text
91
+ ZMJ/threshold_6_29/submission_regularized_ensemble.csv filter=lfs diff=lfs merge=lfs -text
92
+ ZMJ/threshold_6_29/submission_robust_ensemble.csv filter=lfs diff=lfs merge=lfs -text
93
+ ZMJ/threshold_6_29/submission_simple_ensemble.csv filter=lfs diff=lfs merge=lfs -text
94
+ ZMJ/threshold_6_29/submission_tree_ensemble.csv filter=lfs diff=lfs merge=lfs -text
95
+ ZMJ/threshold_6_29/submission_weighted_ensemble.csv filter=lfs diff=lfs merge=lfs -text
96
+ ZMJ/threshold_6_29/submission_xgb_baseline.csv filter=lfs diff=lfs merge=lfs -text
97
+ ZMJ/threshold_6_30/submission_regularized_ensemble.csv filter=lfs diff=lfs merge=lfs -text
98
+ ZMJ/threshold_6_30/submission_robust_ensemble.csv filter=lfs diff=lfs merge=lfs -text
99
+ ZMJ/threshold_6_30/submission_simple_ensemble.csv filter=lfs diff=lfs merge=lfs -text
100
+ ZMJ/threshold_6_30/submission_tree_ensemble.csv filter=lfs diff=lfs merge=lfs -text
101
+ ZMJ/threshold_6_30/submission_weighted_ensemble.csv filter=lfs diff=lfs merge=lfs -text
102
+ ZMJ/threshold_6_30/submission_xgb_baseline.csv filter=lfs diff=lfs merge=lfs -text
103
+ ZMJ/threshold_6_30/submission_xgb_baseline_59.csv filter=lfs diff=lfs merge=lfs -text
104
+ ZMJ/threshold_6_30/submission_xgb_baseline_59_new_v1.csv filter=lfs diff=lfs merge=lfs -text
105
+ ZMJ/threshold_6_30/submission_xgb_baseline_59_old.csv filter=lfs diff=lfs merge=lfs -text
106
+ ZMJ/threshold_6_30/submission_xgb_baseline_59_pca.csv filter=lfs diff=lfs merge=lfs -text
107
+ ZMJ/threshold_6_30/submission_xgb_baseline_all.csv filter=lfs diff=lfs merge=lfs -text
108
+ data/sample_submission.csv filter=lfs diff=lfs merge=lfs -text
109
+ new_data/sample_submission.csv filter=lfs diff=lfs merge=lfs -text
110
+ submission_regularized_ensemble.csv filter=lfs diff=lfs merge=lfs -text
111
+ submission_robust_ensemble.csv filter=lfs diff=lfs merge=lfs -text
112
+ submission_simple_ensemble.csv filter=lfs diff=lfs merge=lfs -text
113
+ submission_tree_ensemble.csv filter=lfs diff=lfs merge=lfs -text
114
+ submission_weighted_ensemble.csv filter=lfs diff=lfs merge=lfs -text
115
+ submission_xgb_baseline.csv filter=lfs diff=lfs merge=lfs -text
DRW/DRW-Crypto/.gitignore ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python-generated files
2
+ __pycache__/
3
+ *.py[oc]
4
+ build/
5
+ dist/
6
+ wheels/
7
+ *.egg-info
8
+
9
+ # Virtual environments
10
+ .venv
DRW/DRW-Crypto/.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.11
DRW/DRW-Crypto/HyperparameterOptimizer.py ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2025/7/7 11:15
3
+ # @Author : Lukax
4
+ # @Email : Lukarxiang@gmail.com
5
+ # @File : HyperparameterOptimizer.py
6
+ # -*- presentd: PyCharm -*-
7
+
8
+
9
+ import os
10
+ import json
11
+ import optuna
12
+ import datetime
13
+ import numpy as np
14
+ import pandas as pd
15
+ from Settings import Config
16
+ import matplotlib.pyplot as plt
17
+ from scipy.stats import pearsonr
18
+ from optuna.samplers import TPESampler
19
+ from optuna.pruners import MedianPruner
20
+ from typing import Dict, Any, List, Callable
21
+ from sklearn.model_selection import cross_val_score, KFold
22
+
23
+
24
+
25
+ class OptunaOptimizer:
26
+ def __init__(self, model_name: str, config = Config):
27
+ self.model_name = model_name.lower()
28
+ self.config = config
29
+ self.study = None
30
+ self.best_params = None
31
+
32
+ def create_objective(self, X: np.ndarray, y: np.ndarray, cv_folds: int = 3):
33
+ def objective(trial):
34
+ params = self._suggest_parameters(trial)
35
+
36
+ try:
37
+ model = self._create_model(params)
38
+ scores = []
39
+
40
+ kfold = KFold(n_splits = cv_folds, shuffle = True, random_state = self.config.RANDOM_STATE)
41
+ for train_idx, val_idx in kfold.split(X):
42
+ X_train, X_val = X[train_idx], X[val_idx]
43
+ y_train, y_val = y[train_idx], y[val_idx]
44
+
45
+ # 根据模型类型使用不同的训练方式
46
+ if self.model_name in ['xgb', 'lgb', 'cat']:
47
+ # 梯度提升模型支持早停验证集
48
+ if self.model_name == 'xgb':
49
+ model.fit(X_train, y_train, eval_set = [(X_val, y_val)], verbose = False)
50
+ elif self.model_name == 'lgb':
51
+ model.fit(X_train, y_train, eval_set = [(X_val, y_val)])
52
+ elif self.model_name == 'cat':
53
+ model.fit(X_train, y_train, eval_set = [(X_val, y_val)], verbose = False)
54
+ else:
55
+ # Random Forest等模型不支持eval_set
56
+ model.fit(X_train, y_train)
57
+
58
+ y_pred = model.predict(X_val)
59
+ score = pearsonr(y_val, y_pred)[0]
60
+ scores.append(score)
61
+
62
+ trial.report(score, len(scores) - 1)
63
+ if trial.should_prune():
64
+ raise optuna.TrialPruned()
65
+ return np.mean(scores)
66
+ except Exception as e:
67
+ print(f"Trial failed: {str(e)}")
68
+ return -1.0 # 返回很差的分数
69
+
70
+ return objective
71
+
72
+ def _suggest_parameters(self, trial):
73
+ if self.model_name == 'xgb':
74
+ return {
75
+ 'n_estimators': trial.suggest_int('n_estimators', 500, 3000),
76
+ 'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.1, log = True),
77
+ 'max_depth': trial.suggest_int('max_depth', 6, 25),
78
+ 'max_leaves': trial.suggest_int('max_leaves', 8, 50),
79
+ 'min_child_weight': trial.suggest_int('min_child_weight', 1, 50),
80
+ 'subsample': trial.suggest_float('subsample', 0.05, 1.0),
81
+ 'colsample_bytree': trial.suggest_float('colsample_bytree', 0.5, 1.0),
82
+ 'colsample_bylevel': trial.suggest_float('colsample_bylevel', 0.3, 1.0),
83
+ 'colsample_bynode': trial.suggest_float('colsample_bynode', 0.3, 1.0),
84
+ 'reg_alpha': trial.suggest_float('reg_alpha', 0.1, 100.0, log = True),
85
+ 'reg_lambda': trial.suggest_float('reg_lambda', 0.1, 100.0, log = True),
86
+ 'gamma': trial.suggest_float('gamma', 0.1, 10.0),
87
+ 'tree_method': 'hist',
88
+ 'device': 'gpu' if hasattr(Config, 'USE_GPU') and Config.USE_GPU else 'cpu',
89
+ 'verbosity': 0,
90
+ 'random_state': self.config.RANDOM_STATE,
91
+ 'n_jobs': -1
92
+ }
93
+ elif self.model_name == 'lgb':
94
+ return {
95
+ 'n_estimators': trial.suggest_int('n_estimators', 500, 3000),
96
+ 'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.1, log = True),
97
+ 'max_depth': trial.suggest_int('max_depth', 6, 25),
98
+ 'num_leaves': trial.suggest_int('num_leaves', 15, 200),
99
+ 'min_child_samples': trial.suggest_int('min_child_samples', 5, 100),
100
+ 'subsample': trial.suggest_float('subsample', 0.4, 1.0),
101
+ 'colsample_bytree': trial.suggest_float('colsample_bytree', 0.4, 1.0),
102
+ 'reg_alpha': trial.suggest_float('reg_alpha', 0.1, 100.0, log = True),
103
+ 'reg_lambda': trial.suggest_float('reg_lambda', 0.1, 100.0, log = True),
104
+ 'feature_fraction': trial.suggest_float('feature_fraction', 0.4, 1.0),
105
+ 'bagging_fraction': trial.suggest_float('bagging_fraction', 0.4, 1.0),
106
+ 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7),
107
+ 'objective': 'regression',
108
+ 'metric': 'rmse',
109
+ 'boosting_type': 'gbdt',
110
+ 'verbose': -1,
111
+ 'random_state': self.config.RANDOM_STATE,
112
+ 'n_jobs': -1
113
+ }
114
+ elif self.model_name == 'cat':
115
+ return {
116
+ 'iterations': trial.suggest_int('iterations', 500, 3000),
117
+ 'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.1),
118
+ 'depth': trial.suggest_int('depth', 4, 12),
119
+ 'l2_leaf_reg': trial.suggest_float('l2_leaf_reg', 1, 10),
120
+ 'bootstrap_type': trial.suggest_categorical('bootstrap_type', ['Bayesian', 'Bernoulli', 'MVS']),
121
+ 'random_seed': self.config.RANDOM_STATE,
122
+ 'verbose': False,
123
+ 'allow_writing_files': False
124
+ }
125
+ elif self.model_name == 'rf':
126
+ return {
127
+ 'n_estimators': trial.suggest_int('n_estimators', 100, 1000),
128
+ 'max_depth': trial.suggest_int('max_depth', 5, 30),
129
+ 'min_samples_split': trial.suggest_int('min_samples_split', 2, 20),
130
+ 'min_samples_leaf': trial.suggest_int('min_samples_leaf', 1, 10),
131
+ 'max_features': trial.suggest_categorical('max_features', ['sqrt', 'log2', None]),
132
+ 'bootstrap': trial.suggest_categorical('bootstrap', [True, False]),
133
+ 'random_state': self.config.RANDOM_STATE,
134
+ 'n_jobs': -1
135
+ }
136
+ else:
137
+ raise ValueError(f"不支持的模型类型: {self.model_name}")
138
+
139
+ def _create_model(self, params):
140
+ learners = self.config.get_learners()
141
+ for learner in learners:
142
+ if learner['name'] == self.model_name:
143
+ return learner['estimator'](**params)
144
+ raise ValueError(f"未找到模型: {self.model_name}")
145
+
146
+ def optimize(self, X: np.ndarray, y: np.ndarray, n_trials: int = 100, cv_folds: int = 3, study_name: str = None) -> Dict[str, Any]:
147
+ study_name = study_name or f"{self.model_name}_optimization"
148
+ self.study = optuna.create_study(
149
+ direction = 'maximize', # 最大化Pearson相关系数
150
+ sampler = TPESampler(seed = self.config.RANDOM_STATE),
151
+ pruner = MedianPruner(n_startup_trials = 10, n_warmup_steps = 5),
152
+ study_name = study_name
153
+ )
154
+
155
+ objective = self.create_objective(X, y, cv_folds)
156
+ print(f"Optimizing {self.model_name} hyperParameter...")
157
+ print(f"trail: {n_trials}, fold: {cv_folds}")
158
+
159
+ self.study.optimize(objective, n_trials = n_trials, show_progress_bar = True)
160
+ self.best_params = self.study.best_params
161
+ best_score = self.study.best_value
162
+ print(f"Optimized score: {best_score:.3f}\nBest Parameters: {self.best_params}")
163
+ res = {'best_params': self.best_params, 'best_score': best_score, 'study': self.study, 'n_trials': len(self.study.trials)}
164
+ return res
165
+
166
+ def save_results(self, save_path: str = None):
167
+ if self.best_params is None:
168
+ raise ValueError("Can't save before optimized.")
169
+
170
+ save_path = save_path or os.path.join(Config.RESULTS_DIR, f"{self.model_name}_best_params.json")
171
+ result = {
172
+ 'model_name': self.model_name,
173
+ 'best_params': self.best_params,
174
+ 'best_score': self.study.best_value,
175
+ 'optimization_time': str(pd.Timestamp.now()),
176
+ 'n_trials': len(self.study.trials)
177
+ }
178
+ with open(save_path, 'w', encoding = 'utf-8') as f:
179
+ json.dump(result, f, indent = 2, ensure_ascii = False)
180
+
181
+ print(f"optimized result saved in {save_path}")
182
+ return save_path
183
+
184
+
185
+ class HyperparameterManager:
186
+ def __init__(self, config = Config):
187
+ self.config = config
188
+ self.optimizers = {}
189
+ self.results = {}
190
+
191
+ def register_optimizer(self, model_name: str, optimizer_type: str = 'optuna'):
192
+ if optimizer_type == 'optuna':
193
+ self.optimizers[model_name] = OptunaOptimizer(model_name, self.config)
194
+ else:
195
+ raise ValueError(f"Unsupported optimizer: {optimizer_type}")
196
+
197
+ def optimize_all_models(self, X: np.ndarray, y: np.ndarray, n_trials: int = 50, cv_folds: int = 3) -> Dict[str, Any]:
198
+ learners = self.config.get_learners()
199
+ model_names = [learner['name'] for learner in learners]
200
+
201
+ print(f"Starting hyperparameter optimization for {len(model_names)} models")
202
+ print(f"Model list: {model_names}")
203
+
204
+ for model_name in model_names:
205
+ print(f"\n{'='*50}")
206
+ print(f"Optimizing model: {model_name.upper()}")
207
+ print(f"{'='*50}")
208
+
209
+ self.register_optimizer(model_name)
210
+
211
+ try:
212
+ result = self.optimizers[model_name].optimize(X, y, n_trials, cv_folds)
213
+ self.results[model_name] = result
214
+
215
+ self.optimizers[model_name].save_results()
216
+
217
+ except Exception as e:
218
+ print(f"Model {model_name} optimization failed: {str(e)}")
219
+ continue
220
+
221
+ return self.results
222
+
223
+ def update_config(self, config_path: str = 'Settings.py'):
224
+ if not self.results:
225
+ return
226
+
227
+ with open(config_path, 'r', encoding = 'utf-8') as f:
228
+ config_content = f.read()
229
+ new_learners_config = self._generate_learners_config()
230
+
231
+ backup_path = config_path.replace('.py', f'_backup_{datetime.datetime.today().strftime("%m%d-%H%M")}.py')
232
+ with open(backup_path, 'w', encoding = 'utf-8') as f:
233
+ f.write(config_content)
234
+ print(f"Original config backed up to: {backup_path}")
235
+
236
+ updated_content = self._update_learners_in_config(config_content, new_learners_config)
237
+
238
+ with open(config_path, 'w', encoding = 'utf-8') as f:
239
+ f.write(updated_content)
240
+
241
+ print(f"Config file updated: {config_path}")
242
+
243
+ def _generate_learners_config(self) -> str:
244
+ config_lines = [" @classmethod", " def get_learners(cls):", ' """获取配置好的学习器列表"""', " return ["]
245
+ learners = self.config.get_learners()
246
+ for learner in learners:
247
+ model_name = learner['name']
248
+ config_lines.append(f" {{")
249
+ config_lines.append(f" 'name': '{model_name}',")
250
+ config_lines.append(f" 'estimator': {learner['estimator'].__name__},")
251
+ config_lines.append(f" 'params': {{")
252
+
253
+ if model_name in self.results:
254
+ params = self.results[model_name]['best_params']
255
+ print(f" Using optimized parameters for {model_name}")
256
+ else:
257
+ params = learner['params']
258
+ print(f" Keeping original parameters for {model_name}")
259
+
260
+ # 格式化参数
261
+ for key, value in params.items():
262
+ if isinstance(value, str):
263
+ config_lines.append(f' "{key}": "{value}",')
264
+ else:
265
+ config_lines.append(f' "{key}": {value},')
266
+
267
+ config_lines.append(f" }},")
268
+ config_lines.append(f" }},")
269
+
270
+ config_lines.append(" ]")
271
+
272
+ return '\n'.join(config_lines)
273
+
274
+ def _update_learners_in_config(self, content: str, new_config: str) -> str:
275
+ start_marker = "@classmethod\n def get_learners(cls):" # 找到get_learners方法的位置
276
+ end_marker = " ]"
277
+
278
+ start_idx = content.find(start_marker)
279
+ if start_idx == -1:
280
+ print("get_learners method not found, appending new config")
281
+ return content + "\n\n" + new_config
282
+
283
+ temp_content = content[start_idx:] # 找到方法结束位置
284
+ end_idx = temp_content.find(end_marker)
285
+ if end_idx == -1:
286
+ print("get_learners method end position not found")
287
+ return content
288
+
289
+ before = content[:start_idx] # 替换配置
290
+ after = content[start_idx + end_idx + len(end_marker):]
291
+
292
+ return before + new_config + after
293
+
294
+ def plot_optimization_history(self, save_path: str = None):
295
+ if not self.results:
296
+ print("No optimization results to plot")
297
+ return
298
+
299
+ fig, axes = plt.subplots(2, 2, figsize = (15, 10))
300
+ axes = axes.flatten()
301
+
302
+ for i, (model_name, result) in enumerate(self.results.items()):
303
+ if i >= 4: # 最多显示4个模型
304
+ break
305
+
306
+ study = result['study']
307
+ trials = study.trials
308
+
309
+ values = [trial.value for trial in trials if trial.value is not None]
310
+ axes[i].plot(values)
311
+ axes[i].set_title(f'{model_name.upper()} Optimization History')
312
+ axes[i].set_xlabel('Trial Number')
313
+ axes[i].set_ylabel('Pearson Correlation')
314
+ axes[i].grid(True)
315
+
316
+ plt.tight_layout()
317
+
318
+ if save_path:
319
+ plt.savefig(save_path, dpi = 300, bbox_inches = 'tight')
320
+ print(f"Optimization history plot saved to: {save_path}")
321
+ else:
322
+ plt.show()
323
+
324
+
325
+
326
+ def quick_optimize_single_model(model_name: str, X: np.ndarray, y: np.ndarray, n_trials: int = 100) -> Dict[str, Any]:
327
+ optimizer = OptunaOptimizer(model_name)
328
+ result = optimizer.optimize(X, y, n_trials = n_trials)
329
+ optimizer.save_results()
330
+
331
+ return result
332
+
333
+
334
+ # 使用示例
335
+ if __name__ == "__main__":
336
+ # 测试
337
+ np.random.seed(42)
338
+ X = np.random.randn(1000, 10)
339
+ y = X[:, 0] + 0.5 * X[:, 1] + np.random.randn(1000) * 0.1
340
+
341
+ # result = quick_optimize_single_model('xgb', X, y, n_trials = 20)
342
+
343
+ manager = HyperparameterManager()
344
+ results = manager.optimize_all_models(X, y, n_trials = 10)
345
+
346
+ # manager.update_config()
347
+
348
+
349
+ history_path = os.path.join(Config.RESULTS_DIR, 'optimization_history.png') # 绘制优化历史
350
+ manager.plot_optimization_history(history_path)
351
+
DRW/DRW-Crypto/README.md ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Kaggle ML model (version 1.0)
2
+ 参考 sub-sample-vs-super-sample-noisy-rows.ipynb的主要训练流程,以此基础上,拓展了
3
+ - ML 单模型种类
4
+ - ML 单模型的参数搜索
5
+ - ML 多模型集成的权重搜索
6
+ 等功能,并让整个工作流完整化。
7
+
8
+ 还需补充方向:
9
+ - 特征的选择(已有 feature_engineering.ipynb可参考,需搜集其他思路)
10
+ - MLP的参数搜索
11
+
12
+ ### 模块介绍
13
+ 模型参数搜索: optimize_params + HyperparameterOptimizer
14
+ 策略集成工作流: main + Utils + inplemental
15
+ 相关配置文件: Settings
16
+
17
+
18
+ ### 环境安装
19
+ 如果自己习惯 conda环境,可以直接按照 pyproject.toml中的依赖进行自行安装,略过下面的内容
20
+
21
+ 下面介绍一种简单快速的环境安装方法 UV
22
+ 1. 安装 uv工具 https://github.com/astral-sh/uv
23
+ windows powershell:powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex"
24
+ macOS terminal:curl -LsSf https://astral.sh/uv/install.sh | sh
25
+
26
+ 2. powershell 进入项目
27
+ uv python install 3.11
28
+ uv sync
29
+
30
+ 如果是 windows且已经安装了 cuda,执行 setup_cuda.py,卸载 cpu版本的 torch,安装 gpu版本的 torch
31
+ .venv/bin/activate
32
+ python setup_cuda.py
33
+ 安装验证
34
+ python -c "import torch; print(f'PyTorch: {torch.__version__}'); print(f'CUDA: {torch.cuda.is_available()}')"
35
+ 安装的 gpu版本 torch需要参考本机安装的 cuda版本来进行选择
36
+
37
+
DRW/DRW-Crypto/Settings.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2025/7/4 18:48
3
+ # @Author : Lukax
4
+ # @Email : Lukarxiang@gmail.com
5
+ # @File : Settings.py
6
+ # -*- presentd: PyCharm -*-
7
+
8
+
9
+ import os
10
+ import torch
11
+ from xgboost import XGBRegressor
12
+ from lightgbm import LGBMRegressor
13
+ from catboost import CatBoostRegressor
14
+ from sklearn.ensemble import RandomForestRegressor
15
+
16
+
17
+
18
+ class Config:
19
+ ROOT_PATH = os.getcwd()
20
+ DATA_DIR = os.path.join(ROOT_PATH, 'data')
21
+ SUBMISSION_DIR = os.path.join(ROOT_PATH, 'submission')
22
+ RESULTS_DIR = os.path.join(ROOT_PATH, 'results')
23
+
24
+ os.makedirs(DATA_DIR, exist_ok=True)
25
+ os.makedirs(SUBMISSION_DIR, exist_ok=True)
26
+ os.makedirs(RESULTS_DIR, exist_ok=True)
27
+ TRAIN_PATH = os.path.join(DATA_DIR, 'train.parquet')
28
+ TEST_PATH = os.path.join(DATA_DIR, 'test.parquet')
29
+ SUBMISSION_PATH = os.path.join(DATA_DIR, 'sample_submission.csv')
30
+
31
+ FEATURES = [
32
+ "bid_qty", "ask_qty", "buy_qty", "sell_qty", "volume",
33
+ "X598", "X385", "X603", "X674", "X415", "X345", "X174",
34
+ "X302", "X178", "X168", "X612", "X421", "X333", "X586", "X292"
35
+ ]
36
+
37
+ MLP_FEATURES = [
38
+ "bid_qty", "ask_qty", "buy_qty", "sell_qty", "volume",
39
+ "X344", "X598", "X385", "X603", "X674", "X415", "X345", "X137",
40
+ "X174", "X302", "X178", "X532", "X168", "X612"
41
+ ]
42
+
43
+ TARGET = 'label'
44
+ N_FOLDS = 5
45
+ RANDOM_STATE = 23
46
+ OUTLIER_FRACTION = 0.001
47
+ OUTLIER_STRATEGIES = ['reduce', 'remove', 'double', 'none']
48
+
49
+
50
+ ENSEMBLE_METHODS = ['grid', 'stacking']
51
+ GRID_SEARCH_STRIDE1 = 0.1
52
+ GRID_SEARCH_STRIDE2 = 0.025
53
+
54
+ SLICE_CONFIGS = [
55
+ {'name': 'full', 'anchor_ratio': 0, 'after': True, 'adjust_outlier': False},
56
+ {'name': 'recent_90', 'anchor_ratio': 0.1, 'after': True, 'adjust_outlier': False},
57
+ {'name': 'recent_85', 'anchor_ratio': 0.15, 'after': True, 'adjust_outlier': False},
58
+ {'name': 'recent_80', 'anchor_ratio': 0.2, 'after': True, 'adjust_outlier': False},
59
+ {'name': 'first_25', 'anchor_ratio': 0.25, 'after': False, 'adjust_outlier': False},
60
+ ]
61
+
62
+ SLICE_WEIGHTS = [
63
+ 1.0, # full_data
64
+ 1.0, # last_90pct
65
+ 1.0, # last_85pct
66
+ 1.0, # last_80pct
67
+ 0.25, # oldest_25pct
68
+ 0.9, # full_data_outlier_adj
69
+ 0.9, # last_90pct_outlier_adj
70
+ 0.9, # last_85pct_outlier_adj
71
+ 0.9, # last_80pct_outlier_adj
72
+ 0.2 # oldest_25pct_outlier_adj
73
+ ]
74
+
75
+
76
+ MLP_CONFIG = {
77
+ 'layers': [len(MLP_FEATURES), 128, 64, 1],
78
+ 'activation': 'relu',
79
+ 'last_activation': None,
80
+ 'dropout_rate': 0.6,
81
+ 'learning_rate': 0.001,
82
+ 'batch_size': 1024,
83
+ 'epochs': 100,
84
+ 'patience': 10
85
+ }
86
+
87
+
88
+ @classmethod
89
+ def get_learners(cls):
90
+ return [
91
+ {
92
+ 'name': 'xgb',
93
+ 'estimator': XGBRegressor,
94
+ 'params': {
95
+ "tree_method": "hist",
96
+ "device": "gpu" if torch.cuda.is_available() else "cpu",
97
+ "colsample_bylevel": 0.4778,
98
+ "colsample_bynode": 0.3628,
99
+ "colsample_bytree": 0.7107,
100
+ "gamma": 1.7095,
101
+ "learning_rate": 0.02213,
102
+ "max_depth": 20,
103
+ "max_leaves": 12,
104
+ "min_child_weight": 16,
105
+ "n_estimators": 1667,
106
+ "subsample": 0.06567,
107
+ "reg_alpha": 39.3524,
108
+ "reg_lambda": 75.4484,
109
+ "verbosity": 0,
110
+ "random_state": cls.RANDOM_STATE,
111
+ "n_jobs": -1
112
+ },
113
+ },
114
+ {
115
+ 'name': 'lgb',
116
+ 'estimator': LGBMRegressor,
117
+ 'params': {
118
+ "objective": "regression",
119
+ "metric": "rmse",
120
+ "boosting_type": "gbdt",
121
+ "num_leaves": 31,
122
+ "learning_rate": 0.05,
123
+ "feature_fraction": 0.9,
124
+ "bagging_fraction": 0.8,
125
+ "bagging_freq": 5,
126
+ "verbose": -1,
127
+ "random_state": cls.RANDOM_STATE,
128
+ "n_estimators": 1000
129
+ },
130
+ },
131
+ {
132
+ 'name': 'cat',
133
+ 'estimator': CatBoostRegressor,
134
+ 'params': {
135
+ "iterations": 1000,
136
+ "learning_rate": 0.03,
137
+ "depth": 6,
138
+ "l2_leaf_reg": 3,
139
+ "random_seed": cls.RANDOM_STATE,
140
+ "verbose": False,
141
+ "allow_writing_files": False
142
+ },
143
+ },
144
+ {
145
+ 'name': 'rf',
146
+ 'estimator': RandomForestRegressor,
147
+ 'params': {
148
+ "n_estimators": 200,
149
+ "max_depth": 15,
150
+ "min_samples_split": 5,
151
+ "min_samples_leaf": 2,
152
+ "random_state": cls.RANDOM_STATE,
153
+ "n_jobs": -1
154
+ },
155
+ },
156
+ ]
157
+
158
+ @property
159
+ def LEARNERS(self):
160
+ return self.get_learners()
161
+
162
+ @classmethod
163
+ def print_config_summary(cls):
164
+ print("=" * 50)
165
+ print(f"GBDT feature nums: {len(cls.FEATURES)}")
166
+ print(f"MLP feature nums: {len(cls.MLP_FEATURES)}")
167
+ print(f"n_cv: {cls.N_FOLDS}")
168
+ print(f"outlier_fraction: {cls.OUTLIER_FRACTION}")
169
+ print(f"outlier_strategies: {cls.OUTLIER_STRATEGIES}")
170
+ print(f"learners: {[l['name'] for l in cls.get_learners()]}")
171
+ print("=" * 50)
172
+
173
+
174
+
175
+ Config = Config()
176
+
177
+
178
+
DRW/DRW-Crypto/Utils.py ADDED
@@ -0,0 +1,655 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2025/7/4 19:53
3
+ # @Author : Lukax
4
+ # @Email : Lukarxiang@gmail.com
5
+ # @File : Utils.py
6
+ # -*- presentd: PyCharm -*-
7
+
8
+
9
+ import os
10
+ import torch
11
+ import random
12
+ import numpy as np
13
+ import pandas as pd
14
+ import torch.nn as nn
15
+ import torch.optim as optim
16
+ from Settings import Config
17
+ from itertools import product
18
+ from scipy.stats import pearsonr
19
+ from xgboost import XGBRegressor
20
+ from lightgbm import LGBMRegressor
21
+ from sklearn.linear_model import Ridge
22
+ from catboost import CatBoostRegressor
23
+ from sklearn.model_selection import KFold
24
+ from sklearn.preprocessing import StandardScaler
25
+ from sklearn.ensemble import RandomForestRegressor
26
+ from sklearn.model_selection import cross_val_score
27
+ from sklearn.model_selection import train_test_split
28
+ from sklearn.metrics import mean_squared_error as MSE
29
+ from torch.utils.data import DataLoader, TensorDataset
30
+
31
+
32
+
33
+ class MLP(nn.Module):
34
+ def __init__(self, layers = [128, 64], activation = 'relu', last_activation = None, dropout_rate = 0.6):
35
+ super(MLP, self).__init__()
36
+ self.activation = get_activation(activation)
37
+ self.last_activation = get_activation(last_activation) # 单独设置一下最后一个线性层的激活函数,可能和之前的不同
38
+
39
+ self.linears = nn.ModuleList()
40
+ [self.linears.append(nn.Linear(layers[i], layers[i + 1])) for i in range(len(layers) - 1)]
41
+ self.dropout = nn.Dropout(dropout_rate) # 跟在映射,激活的后边做 dropout
42
+
43
+ def forward(self, x):
44
+ for i in range(len(self.linears) - 1):
45
+ x = self.activation(self.linears[i](x))
46
+ x = self.dropout(x)
47
+ x = self.linears[-1](x)
48
+ if self.last_activation is not None:
49
+ x = self.last_activation(x)
50
+ return x
51
+
52
+
53
+ class CheckPointer:
54
+ def __init__(self, path = None):
55
+ if path is None:
56
+ path = os.path.join(Config.RESULTS_DIR, 'best_model.pt')
57
+ self.path = path
58
+ self.best_pearson = -np.inf
59
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
60
+
61
+ def load(self, model):
62
+ model.load_state_dict(torch.load(self.path, map_location = self.device))
63
+ print(f'load model from {self.path} with Pearson: {self.best_pearson:.4f}')
64
+ return model
65
+
66
+ def __call__(self, pearson_coef, model):
67
+ if pearson_coef > self.best_pearson:
68
+ self.best_pearson = pearson_coef
69
+ torch.save(model.state_dict(), self.path)
70
+ print(f'save better model with Pearson:{self.best_pearson:.4f}')
71
+
72
+
73
+ def set_seed(seed = 23):
74
+ random.seed(seed)
75
+ np.random.seed(seed)
76
+ torch.cuda.manual_seed(seed)
77
+ torch.cuda.manual_seed_all(seed)
78
+ torch.backends.cudnn.benchmark = False
79
+ os.environ['PYTHONHASHSEED'] = str(seed)
80
+ torch.backends.cudnn.deterministic = True
81
+
82
+
83
+ def get_activation(func):
84
+ if func == None: return None
85
+ func = func.lower()
86
+ if func == 'relu': return nn.ReLU()
87
+ elif func == 'tanh': return nn.Tanh()
88
+ elif func == 'sigmoid': return nn.Sigmoid()
89
+ else: raise ValueError(f'Unsupported activation function: {func}')
90
+
91
+
92
+ def get_model(model): # 用来检测异常值的简单轻量树模型
93
+ if model == None: return None
94
+ model = model.lower()
95
+ if model == 'rf': return RandomForestRegressor(n_estimators = 100, max_depth = 10, random_state = Config.RANDOM_STATE, n_jobs = -1)
96
+ elif model == 'xgb': return XGBRegressor(n_estimators = 50, max_depth = 6, random_state = Config.RANDOM_STATE, verbosity = 0, n_jobs = -1)
97
+ elif model == 'lgb': return LGBMRegressor(n_estimators = 50, max_depth = 6, random_state = Config.RANDOM_STATE, verbose = -1, n_jobs = -1)
98
+ elif model == 'cat': return CatBoostRegressor(n_estimators = 50, max_depth = 6, random_state = Config.RANDOM_STATE, verbose = -1, allow_writing_files = False)
99
+ else: raise ValueError(f'Unsupported model: {model}')
100
+
101
+
102
+ def get_time_decay_weights(n, k = 0.9):
103
+ pos = np.arange(n)
104
+ normalized = pos / (n - 1) if n > 1 else pos
105
+ weights = k ** (1.0 - normalized)
106
+ w = weights * n / weights.sum()
107
+ return w
108
+
109
+
110
+ def detect_outlier_weights(X, y, sample_weights, outlier_fraction = 0.001, strategy = 'none', model = 'rf'):
111
+ if strategy == 'none' or len(y) < 100:
112
+ return sample_weights, np.zeros(len(y), dtype = bool)
113
+
114
+ n_outlier = max(1, int(len(y) * outlier_fraction))
115
+ model = get_model(model)
116
+ model.fit(X, y, sample_weight = sample_weights)
117
+ pred = model.predict(X)
118
+ residuals = np.abs(y - pred)
119
+
120
+ sorted_res = np.sort(residuals)
121
+ residual_threshold = sorted_res[-n_outlier] if n_outlier <= len(y) else sorted_res[-1]
122
+ outlier_mask = residuals >= residual_threshold
123
+
124
+ # 判断阈值划分后有更多满足条件的记录,即等于划分阈值的记录存在多个
125
+ if np.sum(outlier_mask) > n_outlier:
126
+ outlier_idx = np.where(outlier_mask)[0] # outlier_mask 是一个 bool类型数组,np.where 检索其中为 True的位序,返回一个元组,元��第一个元素是 True值的对应索引,使用切片 [0]取出
127
+ np.random_state(23)
128
+ select_idx = np.random.choice(outlier_idx, n_outlier, replace = False)
129
+ outlier_mask = np.zeros(len(y), dtype = bool)
130
+ outlier_mask[select_idx] = True # 其实也可以制作一个 Series,然后 pandas排序后取前 n_outliers的 index后做同样操作
131
+
132
+ adjusted_w = sample_weights.copy()
133
+ if outlier_mask.any():
134
+ if strategy == 'reduce':
135
+ outlier_res = residuals[outlier_mask]
136
+ min_res, max_res = outlier_res.min(), outlier_res.max()
137
+ norm_res = (outlier_res - min_res) / (max_res - min_res) if max_res > min_res else np.ones_like(outlier_res)
138
+ w_factors = 0.8 - 0.6 * norm_res
139
+ adjusted_w[outlier_mask] *= w_factors
140
+
141
+ elif strategy == 'remove': adjusted_w[outlier_mask] = 0
142
+ elif strategy == 'double': adjusted_w[outlier_mask] *= 2.0
143
+ print(f" Strategy '{strategy}': Adjusted {n_outlier} outliers ({outlier_fraction*100:.1f}% of data)")
144
+
145
+ return outlier_mask, adjusted_w
146
+
147
+
148
+ def get_slices_and_weights(n):
149
+ base_slices = []
150
+ for config in Config.SLICE_CONFIGS:
151
+ slice = config.copy()
152
+ slice['anchor'] = int(n * config['anchor_ratio']) if config['anchor_ratio'] > 0 else 0
153
+ base_slices += [slice]
154
+
155
+ adjusted_slices = []
156
+ for bslice in base_slices:
157
+ slice = bslice.copy()
158
+ slice['name'] = f"{slice['name']}_adjust_outlier"
159
+ slice['adjust_outlier'] = True
160
+ adjusted_slices += [slice]
161
+
162
+ weights = np.array(Config.SLICE_WEIGHTS)
163
+ weights = weights / weights.sum()
164
+ assert len(base_slices + adjusted_slices) == len(weights)
165
+
166
+ return base_slices + adjusted_slices, weights
167
+
168
+
169
+ def analyze_outliers(train):
170
+ X, y = train[Config.FEATURES].values, train[Config.TARGET].values
171
+ sample_weights = get_time_decay_weights(len(train))
172
+ outlier_mask, _ = detect_outlier_weights(X, y, sample_weights, outlier_fraction = Config.OUTLIER_FRACTION, strategy = 'remove') # 这里调用只是为了找出 outlier,无需计算权重用于建模,随便选一个简单的策略
173
+ outlier_idx = np.where(outlier_mask)[0]
174
+ n_outlier = len(outlier_idx)
175
+ print(f"outlier detected: {n_outlier} ({n_outlier / len(train) * 100:.2f}%)")
176
+
177
+ if n_outlier == 0: print('no outliers detected with current threshold. consider adjusting outlier_fraction value.')
178
+ else: _ = analyze_outliers_statistical(train, y, outlier_mask, outlier_idx) # 对异常值进行统计性分析
179
+ return outlier_idx
180
+
181
+
182
+ def analyze_outliers_statistical(train, y, outlier_mask, outlier_idx):
183
+ # analyze outliers y
184
+ normal_y, outlier_y = y[~outlier_mask], y[outlier_mask]
185
+ print(f"Normal samples - Min {normal_y.min():.4f} Max {normal_y.max():.4f} Mean {normal_y.mean():.4f} Std {normal_y.std():4f}")
186
+ print(f"outlier samples - Min {outlier_y.min():.4f} Max {outlier_y.max():.4f} Mean {outlier_y.mean():.4f} Std {outlier_y.std():4f}")
187
+
188
+ # analyze outliers x, all features
189
+ features = Config.FEATURES
190
+ normal_features, outlier_features = train.iloc[~outlier_mask][features], train.iloc[outlier_idx][features]
191
+ feature_diffs = []
192
+ for feat in features:
193
+ normal_mean, outlier_mean = normal_features[feat].mean(), outlier_features[feat].mean()
194
+ if normal_mean != 0:
195
+ relative_diff = abs(outlier_mean - normal_mean) / abs(normal_mean)
196
+ feature_diffs += [(feat, relative_diff, outlier_mean, normal_mean)]
197
+
198
+ feature_diffs.sort(key = lambda x: x[1], reverse = True)
199
+ print(f"Top 10 most different features:")
200
+ for feat, diff, _, __ in feature_diffs[:10]:
201
+ print(f" {feat}: {diff * 100:.1f}% difference")
202
+
203
+ print(f" Features with >50% difference: {sum(1 for t in feature_diffs if t[1] > 0.5)}")
204
+ print(f" Features with >100% difference: {sum(1 for t in feature_diffs if t[1] > 1.0)}")
205
+ return feature_diffs
206
+
207
+
208
+ from sklearn.model_selection import KFold
209
+ import numpy as np
210
+
211
+ def train2compare_outlier_strategy(train, test, mode='single'):
212
+ train = train.replace([np.inf, -np.inf], np.nan).dropna(subset=[Config.TARGET]).reset_index(drop=True)
213
+ n = len(train)
214
+
215
+ # 1. 初始化结果容器
216
+ if mode == 'ensemble':
217
+ strategy_res = {s: {'oof_scores': [], 'slice_scores': []}
218
+ for s in Config.OUTLIER_STRATEGIES}
219
+ else:
220
+ strategy_res = {
221
+ f"{s}_{l['name']}": {'oof_scores': [], 'slice_scores': []}
222
+ for s in Config.OUTLIER_STRATEGIES
223
+ for l in Config.get_learners()
224
+ }
225
+
226
+ best_strategy, best_score = 'reduce', -np.inf
227
+ best_oof_pred = best_test_pred = best_combination = None
228
+
229
+ # 2. 统一的全量权重(后面按 slice 再切)
230
+ base_weight = get_time_decay_weights(n)
231
+
232
+ folds = KFold(n_splits=Config.N_FOLDS, shuffle=False)
233
+
234
+ for strategy in Config.OUTLIER_STRATEGIES:
235
+ print(f'Comparing {strategy.upper()}')
236
+ slices, slice_weights = get_slices_and_weights(n)
237
+
238
+ # 3. 初始化 oof / test 缓存(保持你原来的结构)
239
+ oof_pred = {l['name']: {sl['name']: np.zeros(n) for sl in slices}
240
+ for l in Config.get_learners()}
241
+ test_pred = {l['name']: {sl['name']: np.zeros(len(test)) for sl in slices}
242
+ for l in Config.get_learners()}
243
+
244
+ for fold, (train_i, valid_i) in enumerate(folds.split(train), 1):
245
+ print(f'Fold {fold}/{Config.N_FOLDS}')
246
+ valid_x = train.iloc[valid_i][Config.FEATURES]
247
+ valid_y = train.iloc[valid_i][Config.TARGET]
248
+
249
+ for sl in slices:
250
+ sl_name, anchor, after, adjust = (
251
+ sl['name'], sl['anchor'], sl['after'],
252
+ sl.get('adjust_outlier', False)
253
+ )
254
+
255
+ # 4. 生成当前 slice 的 DataFrame 和索引
256
+ if after:
257
+ cut_df = train.iloc[anchor:].reset_index(drop=True)
258
+ idx_in_slice = train_i[(train_i >= anchor)] - anchor
259
+ else:
260
+ cut_df = train.iloc[:anchor].reset_index(drop=True)
261
+ idx_in_slice = train_i[train_i < anchor]
262
+
263
+ if len(idx_in_slice) == 0:
264
+ continue # 空 slice 跳过
265
+
266
+ # 5. 同步切片:X, y, weight 三个数组必须同长
267
+ train_x = cut_df.iloc[idx_in_slice][Config.FEATURES]
268
+ train_y = cut_df.iloc[idx_in_slice][Config.TARGET]
269
+ weight = base_weight[anchor:][idx_in_slice] if after else base_weight[:anchor][idx_in_slice]
270
+
271
+ # 6. 异常值策略覆盖权重(返回的新权重同样长度)
272
+ if adjust and len(train_y) > 100:
273
+ _, weight = detect_outlier_weights(
274
+ train_x.values, train_y.values, weight,
275
+ Config.OUTLIER_FRACTION, strategy)
276
+
277
+ # 7. 训练 & 预测
278
+ for learner in Config.get_learners():
279
+ model = learner['estimator'](**learner['params'])
280
+ print(learner['name'], type(model))
281
+ print(train_x.shape[0], len(train_y), len(weight))
282
+ print(type(train_x), train_x.dtypes.unique())
283
+ print(type(train_y), train_y.dtype)
284
+ print(type(weight), weight.dtype)
285
+ fit_kwargs = dict(
286
+ X=train_x,
287
+ y=train_y,
288
+ sample_weight=weight
289
+ )
290
+
291
+ # 只对 XGB / CatBoost 加 eval_set 和 verbose
292
+ if learner['name'] == 'xgb':
293
+ fit_kwargs.update(eval_set=[(valid_x, valid_y)], verbose=False)
294
+ elif learner['name'] == 'cat':
295
+ fit_kwargs.update(eval_set=[(valid_x, valid_y)], verbose=False)
296
+ elif learner['name'] == 'lgb':
297
+ fit_kwargs['eval_set'] = [(valid_x, valid_y)] # LightGBM 不要 verbose
298
+ # RandomForest 什么都不加
299
+
300
+ model.fit(**fit_kwargs)
301
+
302
+ # 8. oof / test 记录
303
+ if after:
304
+ mask = valid_i >= anchor
305
+ if mask.any():
306
+ idx = valid_i[mask]
307
+ oof_pred[learner['name']][sl_name][idx] = \
308
+ model.predict(train.iloc[idx][Config.FEATURES])
309
+ if anchor and (~mask).any():
310
+ fallback = 'full_adjust_outlier' if adjust else 'full'
311
+ oof_pred[learner['name']][sl_name][valid_i[~mask]] = \
312
+ oof_pred[learner['name']][fallback][valid_i[~mask]]
313
+ else:
314
+ oof_pred[learner['name']][sl_name][valid_i] = \
315
+ model.predict(train.iloc[valid_i][Config.FEATURES])
316
+
317
+ test_pred[learner['name']][sl_name] += \
318
+ model.predict(test[Config.FEATURES])
319
+
320
+ # 9. 对 test 求均值
321
+ for l_name in test_pred:
322
+ for sl_name in test_pred[l_name]:
323
+ test_pred[l_name][sl_name] /= Config.N_FOLDS
324
+
325
+ # 10. 评分与最佳策略更新(保持你原来的逻辑)
326
+ if mode == 'ensemble':
327
+ ensemble_oof, ensemble_test = evaluate_ensemble_strategy(
328
+ oof_pred, test_pred, train, strategy, strategy_res, slice_weights)
329
+ if strategy_res[strategy]['ensemble_score'] > best_score:
330
+ best_score = strategy_res[strategy]['ensemble_score']
331
+ best_strategy, best_combination = strategy, f'Ensemble + {strategy}'
332
+ best_oof_pred, best_test_pred = ensemble_oof, ensemble_test
333
+ else:
334
+ best_score, best_strategy, best_oof_pred, best_test_pred, best_combination = \
335
+ evaluate_single_model_strategy(
336
+ oof_pred, test_pred, train, strategy, strategy_res, slice_weights,
337
+ best_score, best_strategy, best_oof_pred, best_test_pred, best_combination)
338
+
339
+ return best_oof_pred, best_test_pred, strategy_res, best_strategy, best_combination
340
+
341
+ def evaluate_ensemble_strategy(oof_pred, test_pred, train, strategy, strategy_res, slice_weights, method = 'grid'):
342
+ print('\nEvaluating ensemble strategy starting...')
343
+ dic, model_oof_res, model_test_res, model_scores = {}, {}, {}, {}
344
+ learner_names = [learner['name'] for learner in Config.get_learners()]
345
+
346
+ for learner_name in learner_names:
347
+ model_oof = pd.DataFrame(oof_pred[learner_name]).values @ slice_weights
348
+ model_test = pd.DataFrame(test_pred[learner_name]).values @ slice_weights
349
+ model_score = pearsonr(train[Config.TARGET], model_oof)[0]
350
+
351
+ model_oof_res[learner_name], model_test_res[learner_name] = model_oof, model_test
352
+ model_scores[learner_name] = model_score
353
+ print(f"\t{learner_name} score: {model_score:.4f}")
354
+
355
+ true = train[Config.TARGET].values
356
+ model_oof_df, model_test_df = pd.DataFrame(model_oof_res)[learner_names], pd.DataFrame(model_test_res)[learner_names]
357
+
358
+ if method == 'grid':
359
+ print('\nTwo-stage grid search for model weights...')
360
+ model_weights, ensemble_score, info = weightSearch_grid(model_oof_df, true)
361
+ elif method == 'stacking':
362
+ print('\nStacking Ridge fitting model weights...')
363
+ model_weights, ensemble_weights, info = weightSearch_stacking(model_oof_df, true)
364
+ else: raise ValueError(f'Unsupport model weight search method: {method}')
365
+ dic['info'] = info
366
+
367
+ ensemble_oof = model_oof_df.values @ pd.Series(model_weights)[learner_names].values
368
+ ensemble_test = model_test_df.values @ pd.Series(model_weights)[learner_names].values
369
+ final_score = pearsonr(true, ensemble_oof)[0]
370
+ print(f"strategy {strategy} final result:\n\tmethod: {method}\n\tscore: {final_score:.4f}")
371
+
372
+ dic['ensemble_score'], dic['oof_pred'], dic['test_pred'], dic['weight_method'] = final_score, ensemble_oof, ensemble_test, method
373
+ dic['info'], dic['model_weights'], dic['model_scores'], dic['slice_weights'] = info, model_weights, model_scores, slice_weights
374
+ strategy_res[strategy] = dic
375
+
376
+ return ensemble_oof, ensemble_test
377
+
378
+
379
+ def weightSearch_grid(model_oof_df, true, stride1 = 0.1, stride2 = 0.025):
380
+ model_names, n_models = model_oof_df.columns.tolist(), len(model_oof_df.columns)
381
+ print('\nStage 1: Coarse search')
382
+ ranges = [round(i * stride1, 1) for i in range(int(1 / stride1) + 1)]
383
+ best_score, best_weights, search_times = -np.inf, None, 0
384
+
385
+ for weights in product(ranges, repeat = n_models):
386
+ if abs(sum(weights) - 1) > 1e-6: continue # 权重和为1
387
+ if all(w == 0 for w in weights): continue
388
+
389
+ search_times += 1
390
+ ensemble_pred = model_oof_df @ weights
391
+ # score = pearsonr(true, ensemble_pred)[0]
392
+ score = MSE(true, ensemble_pred)
393
+ if score > best_score:
394
+ best_score, best_weights = score, weights
395
+ if search_times % 1000 == 0:
396
+ print(f" Tested {search_times} combinations, current best: {best_score:.4f}")
397
+
398
+ print(f"Stage 1 completed: {best_score:.4f}")
399
+ print(f"Best weights: {[f'{w:.1f}' for w in best_weights]}")
400
+
401
+
402
+ print('Stage 2 starting...')
403
+ fine_ranges = []
404
+ for i in range(n_models):
405
+ center = best_weights[i]
406
+ min_val, max_val = max(0.0, center - stride2 * 2), min(1.0, center + stride2 * 2) # 搜索范围 ±2*fine_step
407
+ candidates, current = [], min_val
408
+ while current <= max_val + 1e-6: # 加小量避免浮点误差
409
+ candidates += [round(current, 3)]
410
+ current += stride2
411
+ fine_ranges += [candidates]
412
+
413
+ print("Fine search range:")
414
+ for model_name, candidates in zip(model_names, fine_ranges):
415
+ print(f" {model_name}: {len(candidates)} candidates [{candidates[0]:.3f}, {candidates[-1]:.3f}]")
416
+
417
+ best_fine_score, best_fine_weights, fine_times = best_score, list(best_weights), 0
418
+ for weights_fine in product(*fine_ranges):
419
+ weights_fine = np.array(weights_fine)
420
+ weights_sum = sum(weights_fine)
421
+ if weights_sum < 0.8 or weights_sum > 1.2: continue # 权重和太偏离1,跳过
422
+ weights_fine = weights_fine / weights_sum # 标准化
423
+ fine_times += 1
424
+
425
+ ensemble_pred_fine = model_oof_df @ weights_fine
426
+ # score_fine = pearsonr(true, ensemble_pred_fine)[0]
427
+ score_fine = MSE(true, ensemble_pred_fine)
428
+ if score_fine > best_fine_score:
429
+ best_fine_score, best_fine_weights = score_fine, weights_fine.tolist()
430
+ if fine_times % 500 == 0:
431
+ print(f" Tested {fine_times} combinations, current best: {best_fine_score:.4f}")
432
+
433
+ print(f"Fine search completed: {best_fine_score:.4f}")
434
+ print(f"Performance improvement: {best_fine_score - best_score:.4f}")
435
+
436
+ # 构建最终权重字典
437
+ best_weights_dict = dict(zip(model_names, best_fine_weights))
438
+ search_info = {"search_times": search_times, "fine_times": fine_times,
439
+ "final_score": best_fine_score, "improvement": best_fine_score - best_score}
440
+ return best_weights_dict, best_fine_score, search_info
441
+
442
+
443
+ def weightSearch_stacking(model_oof_df, true):
444
+ print('\nStacking weight search...')
445
+ model_names, n_models = model_oof_df.columns.tolist(), len(model_oof_df.columns)
446
+ meta_learner = Ridge(alpha = 1.0, random_state = Config.RANDOM_STATE)
447
+ meta_learner.fit(model_oof_df, true)
448
+ raw_weights = meta_learner.coef_
449
+ weights = np.maximum(raw_weights, 0) # 去除负权重
450
+ weights = weights / weights.sum() if weights.sum() > 0 else np.ones(n_models) / n_models # 权重和为负数,使用均等权重;否则可以归一化
451
+
452
+ ensemble_pred = model_oof_df @ weights
453
+ ensemble_score = pearsonr(true, ensemble_pred)[0]
454
+
455
+ cv_scores = cross_val_score(meta_learner, model_oof_df, true, cv = 3, scoring = 'neg_mean_squared_error')
456
+ cv_std = cv_scores.std()
457
+
458
+ print(f"Stacking result: {ensemble_score:.4f}")
459
+ print(f"CV stability (std): {cv_std:.4f}")
460
+ print(f"Model weights: {[f'{w:.3f}' for w in weights]}")
461
+
462
+ weight_dict = dict(zip(model_names, weights))
463
+ search_info = {"method": "stacking", "meta_learner": "Ridge", "cv_stability": cv_std, "ensemble_score": ensemble_score}
464
+
465
+ return weight_dict, ensemble_score, search_info
466
+
467
+
468
+ def evaluate_single_model_strategy(oof_pred, test_pred, train, strategy, strategy_res, slice_weights,
469
+ best_score, best_strategy, best_oof_pred, best_test_pred, best_combination):
470
+ for learner in Config.get_learners():
471
+ learner_name = learner['name']
472
+ print(f"{strategy} single model: {learner_name}")
473
+ key = f"{strategy}_{learner_name}"
474
+
475
+ oof = pd.DataFrame(oof_pred[learner_name]).values @ slice_weights
476
+ test = pd.DataFrame(test_pred[learner_name]).values @ slice_weights
477
+ score = pearsonr(train[Config.TARGET], oof)[0]
478
+ print(f"\t score: {score:.4f}")
479
+
480
+ strategy_res[key]['ensemble_score'] = score
481
+ strategy_res[key]['oof_pred'], strategy_res[key]['test_pred'] = oof, test
482
+ if score > best_score:
483
+ best_score, best_strategy = score, key
484
+ best_oof_pred, best_test_pred, best_combination = oof, test, f"{learner_name.upper()} {strategy}"
485
+
486
+ return best_score, best_strategy, best_oof_pred, best_test_pred, best_combination
487
+
488
+
489
+ def print_strategy_comparison(strategy_res, mode, best_combination):
490
+ print(f"\nFINAL RESULTS - MODE: {mode.upper()}")
491
+ if mode == 'ensemble':
492
+ print("Ensemble Results:")
493
+ for strategy in Config.OUTLIER_STRATEGIES:
494
+ score = strategy_res[strategy]['ensemble_score']
495
+ print(f"\t{strategy}: {score:.4f}")
496
+
497
+ for model_name, model_score in strategy_res[strategy]['model_scores'].items():
498
+ print(f"\t\t{model_name}: {model_score:.4f}")
499
+ else:
500
+ print("Single Results:")
501
+ single_res = [(k, v['ensemble_score']) for k, v in strategy_res.items()]
502
+ single_res.sort(key = lambda x: x[1], reverse = True)
503
+
504
+ for combination, score in single_res[:10]: # Top 10
505
+ print(f"\t{combination}: {score:.4f}")
506
+
507
+ print(f"\nBest Combination: {best_combination}")
508
+ return single_res if mode != 'ensemble' else None
509
+
510
+
511
+
512
+
513
+
514
+ def train_mlp_model(train, test, config = None):
515
+ if config is None:
516
+ config = Config.MLP_CONFIG
517
+
518
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
519
+ X_train_full = train[Config.MLP_FEATURES].values
520
+ y_train_full = train[Config.TARGET].values
521
+ X_train, X_val, y_train, y_val = train_test_split(X_train_full, y_train_full, test_size = 0.2, shuffle = False, random_state = Config.RANDOM_STATE)
522
+
523
+ scaler = StandardScaler()
524
+ X_train = scaler.fit_transform(X_train)
525
+ X_val = scaler.transform(X_val)
526
+ X_test = scaler.transform(test[Config.MLP_FEATURES].values)
527
+
528
+ train_dataset = TensorDataset(torch.tensor(X_train, dtype = torch.float32), torch.tensor(y_train, dtype = torch.float32).unsqueeze(1))
529
+ val_dataset = TensorDataset(torch.tensor(X_val, dtype = torch.float32), torch.tensor(y_val, dtype = torch.float32).unsqueeze(1))
530
+ test_dataset = TensorDataset(torch.tensor(X_test, dtype = torch.float32))
531
+ train_loader = DataLoader(train_dataset, batch_size = config['batch_size'], shuffle = True)
532
+ val_loader = DataLoader(val_dataset, batch_size = config['batch_size'], shuffle = False)
533
+ test_loader = DataLoader(test_dataset, batch_size = config['batch_size'], shuffle = False)
534
+
535
+ model = MLP(layers = config['layers'], activation = config['activation'], last_activation = config['last_activation'], dropout_rate = config['dropout_rate']).to(device)
536
+ criterion = nn.HuberLoss(delta = 5.0, reduction = 'mean')
537
+ optimizer = optim.Adam(model.parameters(), lr = config['learning_rate'])
538
+ checkpointer = CheckPointer(path = os.path.join(Config.RESULTS_DIR, 'best_mlp_model.pt'))
539
+
540
+ print(f"Starting MLP model training, epochs: {config['epochs']}")
541
+ best_val_score = -np.inf
542
+ patience_counter = 0
543
+ patience = config.get('patience', 10)
544
+
545
+ for epoch in range(config['epochs']):
546
+ model.train()
547
+ running_loss = 0.0
548
+
549
+ for inputs, targets in train_loader:
550
+ inputs, targets = inputs.to(device), targets.to(device)
551
+ optimizer.zero_grad()
552
+ outputs = model(inputs)
553
+ loss = criterion(outputs, targets)
554
+ loss.backward()
555
+ optimizer.step()
556
+ running_loss += loss.item()
557
+
558
+ # 验证
559
+ model.eval()
560
+ val_preds, val_trues = [], []
561
+ with torch.no_grad():
562
+ for inputs, targets in val_loader:
563
+ inputs, targets = inputs.to(device), targets.to(device)
564
+ outputs = model(inputs)
565
+ val_preds += [outputs.cpu().numpy()]
566
+ val_trues += [targets.cpu().numpy()]
567
+
568
+ val_preds = np.concatenate(val_preds).flatten()
569
+ val_trues = np.concatenate(val_trues).flatten()
570
+ val_score = pearsonr(val_preds, val_trues)[0]
571
+ print(f"Epoch {epoch+1}/{config['epochs']}: Train Loss: {running_loss/len(train_loader):.4f}, Val Score: {val_score:.4f}")
572
+
573
+ if val_score > best_val_score:
574
+ best_val_score = val_score
575
+ patience_counter = 0
576
+ checkpointer(val_score, model)
577
+ else: patience_counter += 1
578
+
579
+ if patience_counter >= patience:
580
+ print(f"Early stopping at epoch {epoch+1}")
581
+ break
582
+
583
+ # 加载最佳模型并预测
584
+ model = checkpointer.load(model)
585
+ model.eval()
586
+ predictions = []
587
+ with torch.no_grad():
588
+ for inputs, in test_loader:
589
+ inputs = inputs.to(device)
590
+ outputs = model(inputs)
591
+ predictions += [outputs.cpu().numpy()]
592
+
593
+ predictions = np.concatenate(predictions).flatten()
594
+ return predictions, best_val_score
595
+
596
+
597
+ def create_ensemble_submission(ml_predictions, mlp_predictions, submission, ml_weight = 0.8, mlp_weight = 0.2, strategy = 'ensemble'):
598
+ if len(ml_predictions) != len(mlp_predictions):
599
+ raise ValueError(f"预测长度不匹配: ML({len(ml_predictions)}) vs MLP({len(mlp_predictions)})")
600
+
601
+ ensemble_pred = ml_weight * ml_predictions + mlp_weight * mlp_predictions
602
+ submission_ensemble = submission.copy()
603
+ submission_ensemble[Config.TARGET] = ensemble_pred
604
+
605
+ ensemble_filename = f"submission_ensemble_{strategy}_{ml_weight:.1f}ml_{mlp_weight:.1f}mlp.csv"
606
+ ensemble_filepath = os.path.join(Config.SUBMISSION_DIR, ensemble_filename)
607
+ submission_ensemble.to_csv(ensemble_filepath, index = False)
608
+ print(f"Ensemble submission file saved: {ensemble_filepath}")
609
+
610
+ return ensemble_pred, ensemble_filepath
611
+
612
+
613
+ def save2csv(submission_, predictions, score, models = "ML"):
614
+ submission = submission_.copy()
615
+ submission[Config.TARGET] = predictions
616
+ filename = f"submission_{models}_{score:.4f}.csv"
617
+ filepath = os.path.join(Config.SUBMISSION_DIR, filename)
618
+ submission.to_csv(filepath, index = False)
619
+ print(f"{models} submission saved to {filepath}")
620
+ return filepath
621
+
622
+
623
+ def create_multiple_submissions(train, ml_predictions, mlp_predictions, submission, best_strategy, ml_score, mlp_score):
624
+ ml_filename = save2csv(submission, ml_predictions, ml_score, 'ML')
625
+ mlp_filename = save2csv(submission, mlp_predictions, mlp_score, 'MLP')
626
+
627
+ ensemble_configs = [
628
+ (0.9, 0.1, "conservative"), # 保守:主要依赖ML
629
+ (0.7, 0.3, "balanced"), # 平衡
630
+ (0.5, 0.5, "equal"), # 等权重
631
+ ]
632
+
633
+ ensemble_files = []
634
+ for ml_w, mlp_w, desc in ensemble_configs:
635
+ ensemble_pred, ensemble_file = create_ensemble_submission(ml_predictions, mlp_predictions, submission, ml_w, mlp_w, f"{best_strategy}_{desc}")
636
+ ensemble_files += [ensemble_file]
637
+
638
+ if ml_score > mlp_score:
639
+ best_final_pred = ml_predictions
640
+ best_filename = ml_filename
641
+ best_type = "ML"
642
+ else:
643
+ best_final_pred = mlp_predictions
644
+ best_filename = mlp_filename
645
+ best_type = "MLP"
646
+
647
+ print(f"\nRecommended submission: {best_filename} ({best_type})")
648
+ print(f"All generated files:")
649
+ for ef in ensemble_files:
650
+ print(f" - {ef}")
651
+
652
+ return best_final_pred, best_filename
653
+
654
+
655
+
DRW/DRW-Crypto/__pycache__/Settings.cpython-311.pyc ADDED
Binary file (6.09 kB). View file
 
DRW/DRW-Crypto/__pycache__/Utils.cpython-311.pyc ADDED
Binary file (45.1 kB). View file
 
DRW/DRW-Crypto/__pycache__/inplemental.cpython-311.pyc ADDED
Binary file (10.4 kB). View file
 
DRW/DRW-Crypto/feature_engineering.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
DRW/DRW-Crypto/inplemental.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2025/7/4 19:42
3
+ # @Author : Lukax
4
+ # @Email : Lukarxiang@gmail.com
5
+ # @File : inplemental.py
6
+ # -*- presentd: PyCharm -*-
7
+
8
+
9
+ import torch
10
+ import numpy as np
11
+ import pandas as pd
12
+ from Settings import Config
13
+ from torch.utils.data import DataLoader, TensorDataset
14
+
15
+
16
+ def getDataLoader(X, Y, hparams, device, shuffle = True):
17
+ X = torch.tensor(X, dtype = torch.float32, device = device)
18
+ if Y is None:
19
+ dataset = TensorDataset(X)
20
+ else:
21
+ Y = torch.tensor(Y.values if hasattr(Y, 'values') else Y,
22
+ dtype = torch.float32, device = device).unsqueeze(1) # y need 2 dimensions
23
+ dataset = TensorDataset(X, Y)
24
+
25
+ dataloader = DataLoader(dataset, batch_size = hparams['batch_size'], shuffle = shuffle,
26
+ generator = torch.Generator().manual_seed(hparams['seed']))
27
+ return dataloader
28
+
29
+ class Config:
30
+ TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/train.parquet"
31
+ TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/test.parquet"
32
+ SUBMISSION_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/sample_submission.csv"
33
+
34
+ # Original features plus additional market features
35
+ FEATURES = [
36
+ "X175", "X198", "X179", "X173", "X169", "X181", "X94",
37
+ "X197", "X137", "X133", "X163", "X196", "sell_qty",
38
+ "bid_qty", "ask_qty", "buy_qty", "volume"]
39
+ EX_FEATURES = [
40
+ 'X598', 'X385', 'X603', 'X674', 'X415', 'X345', 'X174',
41
+ 'X302', 'X178', 'X168', 'X612', 'X421', 'X333', 'X586', 'X292'
42
+ ]
43
+ TARGET = "label"
44
+ N_FOLDS = 3
45
+ RANDOM_STATE = 42
46
+
47
+ def add_featrues1(df):
48
+ # Original features
49
+ df['bid_ask_interaction'] = df['bid_qty'] * df['ask_qty']
50
+ df['bid_buy_interaction'] = df['bid_qty'] * df['buy_qty']
51
+ df['bid_sell_interaction'] = df['bid_qty'] * df['sell_qty']
52
+ df['ask_buy_interaction'] = df['ask_qty'] * df['buy_qty']
53
+ df['ask_sell_interaction'] = df['ask_qty'] * df['sell_qty']
54
+
55
+ df['volume_weighted_sell'] = df['sell_qty'] * df['volume']
56
+ df['buy_sell_ratio'] = df['buy_qty'] / (df['sell_qty'] + 1e-10)
57
+ df['selling_pressure'] = df['sell_qty'] / (df['volume'] + 1e-10)
58
+ df['log_volume'] = np.log1p(df['volume'])
59
+
60
+ df['effective_spread_proxy'] = np.abs(df['buy_qty'] - df['sell_qty']) / (df['volume'] + 1e-10)
61
+ df['bid_ask_imbalance'] = (df['bid_qty'] - df['ask_qty']) / (df['bid_qty'] + df['ask_qty'] + 1e-10)
62
+ df['order_flow_imbalance'] = (df['buy_qty'] - df['sell_qty']) / (df['buy_qty'] + df['sell_qty'] + 1e-10)
63
+ df['liquidity_ratio'] = (df['bid_qty'] + df['ask_qty']) / (df['volume'] + 1e-10)
64
+
65
+ # === NEW MICROSTRUCTURE FEATURES ===
66
+
67
+ # Price Pressure Indicators
68
+ df['net_order_flow'] = df['buy_qty'] - df['sell_qty']
69
+ df['normalized_net_flow'] = df['net_order_flow'] / (df['volume'] + 1e-10)
70
+ df['buying_pressure'] = df['buy_qty'] / (df['volume'] + 1e-10)
71
+ df['volume_weighted_buy'] = df['buy_qty'] * df['volume']
72
+
73
+ # Liquidity Depth Measures
74
+ df['total_depth'] = df['bid_qty'] + df['ask_qty']
75
+ df['depth_imbalance'] = (df['bid_qty'] - df['ask_qty']) / (df['total_depth'] + 1e-10)
76
+ df['relative_spread'] = np.abs(df['bid_qty'] - df['ask_qty']) / (df['total_depth'] + 1e-10)
77
+ df['log_depth'] = np.log1p(df['total_depth'])
78
+
79
+ # Order Flow Toxicity Proxies
80
+ df['kyle_lambda'] = np.abs(df['net_order_flow']) / (df['volume'] + 1e-10)
81
+ df['flow_toxicity'] = np.abs(df['order_flow_imbalance']) * df['volume']
82
+ df['aggressive_flow_ratio'] = (df['buy_qty'] + df['sell_qty']) / (df['total_depth'] + 1e-10)
83
+
84
+ # Market Activity Indicators
85
+ df['volume_depth_ratio'] = df['volume'] / (df['total_depth'] + 1e-10)
86
+ df['activity_intensity'] = (df['buy_qty'] + df['sell_qty']) / (df['volume'] + 1e-10)
87
+ df['log_buy_qty'] = np.log1p(df['buy_qty'])
88
+ df['log_sell_qty'] = np.log1p(df['sell_qty'])
89
+ df['log_bid_qty'] = np.log1p(df['bid_qty'])
90
+ df['log_ask_qty'] = np.log1p(df['ask_qty'])
91
+
92
+ # Microstructure Volatility Proxies
93
+ df['realized_spread_proxy'] = 2 * np.abs(df['net_order_flow']) / (df['volume'] + 1e-10)
94
+ df['price_impact_proxy'] = df['net_order_flow'] / (df['total_depth'] + 1e-10)
95
+ df['quote_volatility_proxy'] = np.abs(df['depth_imbalance'])
96
+
97
+ # Complex Interaction Terms
98
+ df['flow_depth_interaction'] = df['net_order_flow'] * df['total_depth']
99
+ df['imbalance_volume_interaction'] = df['order_flow_imbalance'] * df['volume']
100
+ df['depth_volume_interaction'] = df['total_depth'] * df['volume']
101
+ df['buy_sell_spread'] = np.abs(df['buy_qty'] - df['sell_qty'])
102
+ df['bid_ask_spread'] = np.abs(df['bid_qty'] - df['ask_qty'])
103
+
104
+ # Information Asymmetry Measures
105
+ df['trade_informativeness'] = df['net_order_flow'] / (df['bid_qty'] + df['ask_qty'] + 1e-10)
106
+ df['execution_shortfall_proxy'] = df['buy_sell_spread'] / (df['volume'] + 1e-10)
107
+ df['adverse_selection_proxy'] = df['net_order_flow'] / (df['total_depth'] + 1e-10) * df['volume']
108
+
109
+ # Market Efficiency Indicators
110
+ df['fill_probability'] = df['volume'] / (df['buy_qty'] + df['sell_qty'] + 1e-10)
111
+ df['execution_rate'] = (df['buy_qty'] + df['sell_qty']) / (df['total_depth'] + 1e-10)
112
+ df['market_efficiency'] = df['volume'] / (df['bid_ask_spread'] + 1e-10)
113
+
114
+ # Non-linear Transformations
115
+ df['sqrt_volume'] = np.sqrt(df['volume'])
116
+ df['sqrt_depth'] = np.sqrt(df['total_depth'])
117
+ df['volume_squared'] = df['volume'] ** 2
118
+ df['imbalance_squared'] = df['order_flow_imbalance'] ** 2
119
+
120
+ # Relative Measures
121
+ df['bid_ratio'] = df['bid_qty'] / (df['total_depth'] + 1e-10)
122
+ df['ask_ratio'] = df['ask_qty'] / (df['total_depth'] + 1e-10)
123
+ df['buy_ratio'] = df['buy_qty'] / (df['buy_qty'] + df['sell_qty'] + 1e-10)
124
+ df['sell_ratio'] = df['sell_qty'] / (df['buy_qty'] + df['sell_qty'] + 1e-10)
125
+
126
+ # Market Stress Indicators
127
+ df['liquidity_consumption'] = (df['buy_qty'] + df['sell_qty']) / (df['total_depth'] + 1e-10)
128
+ df['market_stress'] = df['volume'] / (df['total_depth'] + 1e-10) * np.abs(df['order_flow_imbalance'])
129
+ df['depth_depletion'] = df['volume'] / (df['bid_qty'] + df['ask_qty'] + 1e-10)
130
+
131
+ # Directional Indicators
132
+ df['net_buying_ratio'] = df['net_order_flow'] / (df['volume'] + 1e-10)
133
+ df['directional_volume'] = df['net_order_flow'] * np.log1p(df['volume'])
134
+ df['signed_volume'] = np.sign(df['net_order_flow']) * df['volume']
135
+
136
+ # Replace infinities and NaNs
137
+ df = df.replace([np.inf, -np.inf], 0).fillna(0)
138
+
139
+ return df
140
+
141
+
142
+ def load_data():
143
+ features = list(set(Config.FEATURES + Config.MLP_FEATURES))
144
+ train = pd.read_parquet(Config.TRAIN_PATH, columns = features + [Config.TARGET])
145
+ train = train.dropna(subset=[Config.TARGET]).reset_index(drop=True)
146
+ assert not train[Config.TARGET].isna().any(), "label still has NaN"
147
+ test = pd.read_parquet(Config.TEST_PATH, columns = features)
148
+ submission = pd.read_csv(Config.SUBMISSION_PATH)
149
+ print(f'Origin: train {train.shape}, test {test.shape}, submission {submission.shape}')
150
+
151
+ train, test = add_featrues1(train), add_featrues1(test)
152
+ Config.FEATURES = test.columns.tolist()
153
+
154
+ return train.reset_index(drop = True), test.reset_index(drop = True), submission
155
+
156
+
DRW/DRW-Crypto/main.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2025/7/10 20:56
3
+ # @Author : Lukax
4
+ # @Email : Lukarxiang@gmail.com
5
+ # @File : Utils.py
6
+ # -*- presentd: PyCharm -*-
7
+
8
+
9
+ import os
10
+ import numpy as np
11
+ import pandas as pd
12
+ from Settings import Config
13
+ from inplemental import load_data
14
+ from Utils import set_seed, train2compare_outlier_strategy, print_strategy_comparison, analyze_outliers, train_mlp_model, create_multiple_submissions, save2csv
15
+
16
+
17
+
18
+ def flow():
19
+ Config.print_config_summary()
20
+
21
+ set_seed(Config.RANDOM_STATE)
22
+ train, test, submission = load_data()
23
+ print(f"\ntrain shape: {train.shape}\ntest shape: {test.shape}")
24
+ breakpoint()
25
+
26
+ # if train[Config.TARGET].isnull().any():
27
+ # print(f"target has {train[Config.TARGET].isnull().sum()} NA.")
28
+
29
+ # analyze_outliers(train) # 单纯的异常值数量检测
30
+
31
+ # ML single training
32
+ single_oof_pred, single_test_pred, single_strategy_res, single_best_strategy, single_best_combination = train2compare_outlier_strategy(train, test, mode = 'single')
33
+ print(f"{'='*50}\n\tsingle best: {single_best_combination}")
34
+
35
+ # ML ensemble training
36
+ ensemble_oof_pred, ensemble_test_pred, ensemble_strategy_res, ensemble_best_strategy, ensemble_best_combination = train2compare_outlier_strategy(train, test, mode = 'ensemble')
37
+ print(f"{'='*50}\n\tensemble best: {ensemble_best_combination}")
38
+
39
+ # strategy comparison
40
+ print_strategy_comparison(single_strategy_res, 'single', single_best_combination)
41
+ print_strategy_comparison(ensemble_strategy_res, 'ensemble', ensemble_best_combination)
42
+
43
+ single_best_score = single_strategy_res[single_best_strategy]['ensemble_score']
44
+ ensemble_best_score = ensemble_strategy_res[ensemble_best_strategy]['ensemble_score']
45
+ if ensemble_best_score > single_best_score: # 比较选出 单模型 和 集成模型 中更好的
46
+ final_ml_pred, final_ml_strategy = ensemble_test_pred, ensemble_best_combination
47
+ final_ml_score, strategy_type = ensemble_best_score, "ensemble ml"
48
+ else:
49
+ final_ml_pred, final_ml_strategy = single_test_pred, single_best_combination
50
+ final_ml_score, strategy_type = single_best_score, "single ml"
51
+ print(f"{'='*50}\n\tBest ML strategy: {strategy_type} - {final_ml_strategy}\nBest score: {final_ml_score:.6f}")
52
+
53
+
54
+ # DL mlp
55
+ mlp_predictions, mlp_score = train_mlp_model(train, test)
56
+ print(f"{'='*50}\n\tMLP score: {mlp_score:.5f}")
57
+
58
+ # generate submission
59
+ if mlp_predictions is not None: # mlp和最好的 ml模型进行集成制作 submission
60
+ best_predictions, best_filename = create_multiple_submissions(
61
+ train, final_ml_pred, mlp_predictions, submission,
62
+ final_ml_strategy.replace(' ', '_').lower(), final_ml_score, mlp_score)
63
+ else: # ML only
64
+ submission[Config.TARGET] = final_ml_pred
65
+ best_filename = f"submission_{final_ml_strategy.replace(' ', '_').lower()}_{final_ml_score:.6f}.csv"
66
+ best_filepath = os.path.join(Config.SUBMISSION_DIR, best_filename)
67
+ submission.to_csv(best_filepath, index = False)
68
+ print(f"ML submission saved to {best_filepath}")
69
+ best_predictions, final_score = final_ml_pred, final_ml_score
70
+ print(best_predictions, '\n', best_filename, '\n', final_score)
71
+
72
+ # summary analysis
73
+ results_summary = {
74
+ 'ml_single_best': {'strategy': single_best_combination, 'score': single_best_score},
75
+ 'ml_ensemble_best': {'strategy': ensemble_best_combination, 'score': ensemble_best_score},
76
+ 'ml_final': {'strategy': final_ml_strategy, 'score': final_ml_score, 'type': strategy_type},
77
+ 'mlp_score': mlp_score if mlp_predictions is not None else 'N/A',
78
+ 'best_filename': best_filename
79
+ }
80
+ results_df = pd.DataFrame([results_summary])
81
+ summary_filepath = os.path.join(Config.RESULTS_DIR, 'comprehensive_results_summary.csv')
82
+ results_df.to_csv(summary_filepath, index = False)
83
+
84
+
85
+
86
+ if __name__ == "__main__":
87
+ flow()
88
+
89
+
DRW/DRW-Crypto/optimize_params.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2025/7
3
+ # @Author : Lukax
4
+ # @Email : Lukarxiang@gmail.com
5
+ # @File : optimize_params.py
6
+ # -*- presentd: PyCharm -*-
7
+
8
+
9
+ import os
10
+ import json
11
+ import argparse
12
+ import pandas as pd
13
+ from Utils import set_seed
14
+ from Settings import Config
15
+ from inplemental import load_data
16
+ from HyperparameterOptimizer import HyperparameterManager, quick_optimize_single_model
17
+
18
+
19
+
20
+ def parse_args():
21
+ parser = argparse.ArgumentParser(description='超参数优化工具')
22
+
23
+ parser.add_argument('--model', type = str, choices = ['xgb', 'lgb', 'cat', 'rf'], help = '选择要优化的模型')
24
+ parser.add_argument('--all', action = 'store_true', help = '优化所有模型')
25
+ parser.add_argument('--trials', type = int, default = 200, help = '搜参尝试次数')
26
+ parser.add_argument('--cv-folds', type = int, default = 5, help = '交叉验证折数')
27
+ parser.add_argument('--sample-ratio', type = float, default = None, help = '数据采样比例,用于快速测试 (默认全量)')
28
+ parser.add_argument('--update-config', action = 'store_true', help = '是否自动更新Config文件')
29
+ parser.add_argument('--output-dir', type = str, default = os.path.join('results', 'optimization_results'), help = '结果输出目录')
30
+ return parser.parse_args()
31
+
32
+
33
+ def prepare_data(sample_ratio = None):
34
+ train, test, submission = load_data()
35
+ X, y = train[Config.FEATURES].fillna(0).values, train[Config.TARGET].values
36
+
37
+ if sample_ratio and sample_ratio < 1:
38
+ sample_size = int(len(X) * sample_ratio)
39
+ print(f"sample ratio {sample_ratio}, num {sample_size}")
40
+ indices = pd.Series(range(len(X))).sample(sample_size, random_state = Config.RANDOM_STATE)
41
+ X, y = X[indices], y[indices]
42
+
43
+ return X, y
44
+
45
+
46
+ def optimize_single_model(model_name, X, y, trials, cv_folds, output_dir):
47
+ result = quick_optimize_single_model(model_name, X, y, n_trials = trials)
48
+ result_path = os.path.join(output_dir, f'{model_name}_optimization_result.json')
49
+ with open(result_path, 'w', encoding = 'utf-8') as f:
50
+ json.dump({
51
+ 'model_name': model_name,
52
+ 'best_params': result['best_params'],
53
+ 'best_score': result['best_score'],
54
+ 'n_trials': result['n_trials'],
55
+ 'optimization_time': str(pd.Timestamp.now())
56
+ }, f, indent = 2, ensure_ascii = False)
57
+ print(f"{model_name} optimization completed!")
58
+ print(f"Results saved to: {result_path}")
59
+
60
+ return result
61
+
62
+
63
+ def optimize_all_models(X, y, trials, cv_folds, output_dir):
64
+ manager = HyperparameterManager()
65
+ results = manager.optimize_all_models(X, y, n_trials = trials, cv_folds = cv_folds)
66
+
67
+ history_path = os.path.join(output_dir, 'optimization_history.png') # 绘制优化历史
68
+ manager.plot_optimization_history(history_path)
69
+
70
+ summary_path = os.path.join(output_dir, 'optimization_summary.json') # 保存所有结果摘要
71
+ summary = {}
72
+ for model_name, result in results.items():
73
+ summary[model_name] = {
74
+ 'best_score': result['best_score'],
75
+ 'n_trials': result['n_trials'],
76
+ 'best_params': result['best_params']
77
+ }
78
+
79
+ with open(summary_path, 'w', encoding = 'utf-8') as f:
80
+ json.dump(summary, f, indent = 2, ensure_ascii = False)
81
+
82
+ print(f"Optimization summary saved to: {summary_path}")
83
+ return manager, results
84
+
85
+
86
+ def print_optimization_summary(results):
87
+ if not results:
88
+ return
89
+
90
+ print("\n" + "="*60)
91
+ print("Optimization Results Summary")
92
+ print("="*60)
93
+
94
+ sorted_results = sorted(results.items(), key = lambda x: x[1]['best_score'], reverse = True)
95
+ for model_name, result in sorted_results:
96
+ print(f"\n{model_name.upper()}")
97
+ print(f" Best score: {result['best_score']:.6f}")
98
+ print(f" Trials: {result['n_trials']}")
99
+ print(f" Key parameters:")
100
+
101
+ key_params = ['learning_rate', 'n_estimators', 'max_depth', 'reg_alpha', 'reg_lambda']
102
+ for param in key_params:
103
+ if param in result['best_params']:
104
+ value = result['best_params'][param]
105
+ if isinstance(value, float):
106
+ print(f" {param}: {value:.5f}")
107
+ else:
108
+ print(f" {param}: {value}")
109
+
110
+
111
+ def flow():
112
+ args = parse_args()
113
+ set_seed(Config.RANDOM_STATE)
114
+ os.makedirs(args.output_dir, exist_ok = True)
115
+ print(f"Output directory: {args.output_dir}")
116
+
117
+ X, y = prepare_data(getattr(args, 'sample_ratio', None))
118
+ results, manager = None, None
119
+ if args.model: # 单模型搜参
120
+ result = optimize_single_model(args.model, X, y, args.trials, args.cv_folds, args.output_dir)
121
+ if result:
122
+ results = {args.model: result}
123
+ elif args.all: # 全模型搜参
124
+ manager, results = optimize_all_models(X, y, args.trials, args.cv_folds, args.output_dir)
125
+ else:
126
+ raise ValueError("Please specify --model or --all parameter")
127
+
128
+ if results:
129
+ print_optimization_summary(results)
130
+ if args.update_config and manager: # 是否自动更新 Config中的参数
131
+ try:
132
+ manager.update_config()
133
+ print("Config file automatically updated")
134
+ except Exception as e:
135
+ print(f"Config file update failed: {str(e)}")
136
+ print("Please manually copy best parameters to Settings.py")
137
+
138
+ print(f"\nHyperparameter optimization completed! Results saved in: {args.output_dir}")
139
+
140
+
141
+ if __name__ == "__main__":
142
+ flow()
143
+
DRW/DRW-Crypto/pyproject.toml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "drw-crypto"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.11"
7
+ dependencies = [
8
+ "catboost>=1.2.8",
9
+ "hyperopt>=0.2.7",
10
+ "ipykernel>=6.29.5",
11
+ "lightgbm>=4.6.0",
12
+ "matplotlib>=3.10.3",
13
+ "mlflow>=3.1.1",
14
+ "numpy>=2.3.0",
15
+ "optuna>=4.4.0",
16
+ "optuna-dashboard>=0.19.0",
17
+ "pandas>=2.3.0",
18
+ "pandas-stubs>=2.3.0.250703",
19
+ "pyarrow>=20.0.0",
20
+ "ray>=2.47.1",
21
+ "scikit-learn>=1.7.0",
22
+ "scipy>=1.15.3",
23
+ "seaborn>=0.13.2",
24
+ "torch>=2.7.1",
25
+ "tqdm>=4.67.1",
26
+ "wandb>=0.21.0",
27
+ "xgboost>=3.0.2",
28
+ ]
DRW/DRW-Crypto/sub-sample-vs-super-sample-noisy-rows.ipynb ADDED
@@ -0,0 +1,1676 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "29f93930",
7
+ "metadata": {
8
+ "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19",
9
+ "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5",
10
+ "execution": {
11
+ "iopub.execute_input": "2025-06-30T18:33:21.460315Z",
12
+ "iopub.status.busy": "2025-06-30T18:33:21.459991Z",
13
+ "iopub.status.idle": "2025-06-30T18:33:22.876157Z",
14
+ "shell.execute_reply": "2025-06-30T18:33:22.875331Z"
15
+ },
16
+ "papermill": {
17
+ "duration": 1.420892,
18
+ "end_time": "2025-06-30T18:33:22.877295",
19
+ "exception": false,
20
+ "start_time": "2025-06-30T18:33:21.456403",
21
+ "status": "completed"
22
+ },
23
+ "tags": []
24
+ },
25
+ "outputs": [
26
+ {
27
+ "name": "stdout",
28
+ "output_type": "stream",
29
+ "text": [
30
+ "/kaggle/input/drw-crypto-market-prediction/sample_submission.csv\n",
31
+ "/kaggle/input/drw-crypto-market-prediction/train.parquet\n",
32
+ "/kaggle/input/drw-crypto-market-prediction/test.parquet\n"
33
+ ]
34
+ }
35
+ ],
36
+ "source": [
37
+ "# This Python 3 environment comes with many helpful analytics libraries installed\n",
38
+ "# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n",
39
+ "# For example, here's several helpful packages to load\n",
40
+ "\n",
41
+ "import numpy as np # linear algebra\n",
42
+ "import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n",
43
+ "\n",
44
+ "# Input data files are available in the read-only \"../input/\" directory\n",
45
+ "# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n",
46
+ "\n",
47
+ "import os\n",
48
+ "for dirname, _, filenames in os.walk('/kaggle/input'):\n",
49
+ " for filename in filenames:\n",
50
+ " print(os.path.join(dirname, filename))\n",
51
+ "\n",
52
+ "# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n",
53
+ "# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session"
54
+ ]
55
+ },
56
+ {
57
+ "cell_type": "code",
58
+ "execution_count": 2,
59
+ "id": "722ea210",
60
+ "metadata": {
61
+ "execution": {
62
+ "iopub.execute_input": "2025-06-30T18:33:22.882612Z",
63
+ "iopub.status.busy": "2025-06-30T18:33:22.882260Z",
64
+ "iopub.status.idle": "2025-07-01T04:45:09.686323Z",
65
+ "shell.execute_reply": "2025-07-01T04:45:09.685475Z"
66
+ },
67
+ "papermill": {
68
+ "duration": 36706.813726,
69
+ "end_time": "2025-07-01T04:45:09.693024",
70
+ "exception": false,
71
+ "start_time": "2025-06-30T18:33:22.879298",
72
+ "status": "completed"
73
+ },
74
+ "tags": []
75
+ },
76
+ "outputs": [
77
+ {
78
+ "name": "stdout",
79
+ "output_type": "stream",
80
+ "text": [
81
+ "Using device: cpu\n",
82
+ "Loaded data - Train: (525887, 32), Test: (538150, 31), Submission: (538150, 2)\n",
83
+ "\n",
84
+ "=== Outlier Analysis ===\n",
85
+ " Strategy 'reduce': Adjusted 525 outliers (0.1% of data)\n",
86
+ "\n",
87
+ "Total outliers detected: 525 (0.10%)\n",
88
+ "\n",
89
+ "Label statistics:\n",
90
+ " Normal samples - Mean: 0.0379, Std: 0.9730\n",
91
+ " Outlier samples - Mean: -1.7577, Std: 8.4272\n",
92
+ " Label range - Normal: [-15.8988, 20.7403]\n",
93
+ " Label range - Outliers: [-24.4166, 13.1532]\n",
94
+ "\n",
95
+ "Top features with extreme values in outliers:\n",
96
+ " X345: 974.9% difference (outlier: -0.2079, normal: 0.0238)\n",
97
+ " X598: 688.8% difference (outlier: -0.1776, normal: 0.0302)\n",
98
+ " buy_qty: 367.4% difference (outlier: 613.4614, normal: 131.2453)\n",
99
+ " X385: 121.7% difference (outlier: -0.0153, normal: 0.0704)\n",
100
+ " X168: 82.7% difference (outlier: 0.0255, normal: 0.1475)\n",
101
+ " X603: 82.2% difference (outlier: 0.2858, normal: 0.1568)\n",
102
+ " X174: 79.9% difference (outlier: 0.0296, normal: 0.1474)\n",
103
+ " X302: 72.7% difference (outlier: 0.0634, normal: 0.2318)\n",
104
+ " X415: 61.9% difference (outlier: 0.0684, normal: 0.1798)\n",
105
+ " X862: 58.3% difference (outlier: -0.8237, normal: -0.5202)\n",
106
+ "\n",
107
+ "=== Training XGBoost Models with Outlier Strategy Comparison ===\n",
108
+ "\n",
109
+ "==================================================\n",
110
+ "Testing outlier strategy: REDUCE\n",
111
+ "==================================================\n",
112
+ "\n",
113
+ "--- Fold 1/3 ---\n",
114
+ " Training slice: full_data, samples: 350591\n",
115
+ " Training slice: last_90pct, samples: 350591\n",
116
+ " Training slice: last_85pct, samples: 350591\n",
117
+ " Training slice: last_80pct, samples: 350591\n",
118
+ " Training slice: oldest_25pct, samples: 0\n",
119
+ " Strategy 'reduce': Adjusted 350 outliers (0.1% of data)\n",
120
+ " Training slice: full_data_outlier_adj, samples: 350591\n",
121
+ " Strategy 'reduce': Adjusted 350 outliers (0.1% of data)\n",
122
+ " Training slice: last_90pct_outlier_adj, samples: 350591\n",
123
+ " Strategy 'reduce': Adjusted 350 outliers (0.1% of data)\n",
124
+ " Training slice: last_85pct_outlier_adj, samples: 350591\n",
125
+ " Strategy 'reduce': Adjusted 350 outliers (0.1% of data)\n",
126
+ " Training slice: last_80pct_outlier_adj, samples: 350591\n",
127
+ " Training slice: oldest_25pct_outlier_adj, samples: 0\n",
128
+ "\n",
129
+ "--- Fold 2/3 ---\n",
130
+ " Training slice: full_data, samples: 350591\n",
131
+ " Training slice: last_90pct, samples: 298003\n",
132
+ " Training slice: last_85pct, samples: 271708\n",
133
+ " Training slice: last_80pct, samples: 245414\n",
134
+ " Training slice: oldest_25pct, samples: 131471\n",
135
+ " Strategy 'reduce': Adjusted 350 outliers (0.1% of data)\n",
136
+ " Training slice: full_data_outlier_adj, samples: 350591\n",
137
+ " Strategy 'reduce': Adjusted 298 outliers (0.1% of data)\n",
138
+ " Training slice: last_90pct_outlier_adj, samples: 298003\n",
139
+ " Strategy 'reduce': Adjusted 271 outliers (0.1% of data)\n",
140
+ " Training slice: last_85pct_outlier_adj, samples: 271708\n",
141
+ " Strategy 'reduce': Adjusted 245 outliers (0.1% of data)\n",
142
+ " Training slice: last_80pct_outlier_adj, samples: 245414\n",
143
+ " Strategy 'reduce': Adjusted 131 outliers (0.1% of data)\n",
144
+ " Training slice: oldest_25pct_outlier_adj, samples: 131471\n",
145
+ "\n",
146
+ "--- Fold 3/3 ---\n",
147
+ " Training slice: full_data, samples: 350592\n",
148
+ " Training slice: last_90pct, samples: 298004\n",
149
+ " Training slice: last_85pct, samples: 271709\n",
150
+ " Training slice: last_80pct, samples: 245415\n",
151
+ " Training slice: oldest_25pct, samples: 131471\n",
152
+ " Strategy 'reduce': Adjusted 350 outliers (0.1% of data)\n",
153
+ " Training slice: full_data_outlier_adj, samples: 350592\n",
154
+ " Strategy 'reduce': Adjusted 298 outliers (0.1% of data)\n",
155
+ " Training slice: last_90pct_outlier_adj, samples: 298004\n",
156
+ " Strategy 'reduce': Adjusted 271 outliers (0.1% of data)\n",
157
+ " Training slice: last_85pct_outlier_adj, samples: 271709\n",
158
+ " Strategy 'reduce': Adjusted 245 outliers (0.1% of data)\n",
159
+ " Training slice: last_80pct_outlier_adj, samples: 245415\n",
160
+ " Strategy 'reduce': Adjusted 131 outliers (0.1% of data)\n",
161
+ " Training slice: oldest_25pct_outlier_adj, samples: 131471\n",
162
+ "\n",
163
+ "REDUCE Strategy - Weighted Ensemble Pearson: 0.1101\n",
164
+ " full_data_outlier_adj: 0.1077 (weight: 0.112)\n",
165
+ " last_90pct_outlier_adj: 0.1096 (weight: 0.112)\n",
166
+ " last_85pct_outlier_adj: 0.1010 (weight: 0.112)\n",
167
+ " last_80pct_outlier_adj: 0.0992 (weight: 0.112)\n",
168
+ " oldest_25pct_outlier_adj: 0.0727 (weight: 0.025)\n",
169
+ "\n",
170
+ "==================================================\n",
171
+ "Testing outlier strategy: REMOVE\n",
172
+ "==================================================\n",
173
+ "\n",
174
+ "--- Fold 1/3 ---\n",
175
+ " Training slice: full_data, samples: 350591\n",
176
+ " Training slice: last_90pct, samples: 350591\n",
177
+ " Training slice: last_85pct, samples: 350591\n",
178
+ " Training slice: last_80pct, samples: 350591\n",
179
+ " Training slice: oldest_25pct, samples: 0\n",
180
+ " Strategy 'remove': Adjusted 350 outliers (0.1% of data)\n",
181
+ " Training slice: full_data_outlier_adj, samples: 350591\n",
182
+ " Strategy 'remove': Adjusted 350 outliers (0.1% of data)\n",
183
+ " Training slice: last_90pct_outlier_adj, samples: 350591\n",
184
+ " Strategy 'remove': Adjusted 350 outliers (0.1% of data)\n",
185
+ " Training slice: last_85pct_outlier_adj, samples: 350591\n",
186
+ " Strategy 'remove': Adjusted 350 outliers (0.1% of data)\n",
187
+ " Training slice: last_80pct_outlier_adj, samples: 350591\n",
188
+ " Training slice: oldest_25pct_outlier_adj, samples: 0\n",
189
+ "\n",
190
+ "--- Fold 2/3 ---\n",
191
+ " Training slice: full_data, samples: 350591\n",
192
+ " Training slice: last_90pct, samples: 298003\n",
193
+ " Training slice: last_85pct, samples: 271708\n",
194
+ " Training slice: last_80pct, samples: 245414\n",
195
+ " Training slice: oldest_25pct, samples: 131471\n",
196
+ " Strategy 'remove': Adjusted 350 outliers (0.1% of data)\n",
197
+ " Training slice: full_data_outlier_adj, samples: 350591\n",
198
+ " Strategy 'remove': Adjusted 298 outliers (0.1% of data)\n",
199
+ " Training slice: last_90pct_outlier_adj, samples: 298003\n",
200
+ " Strategy 'remove': Adjusted 271 outliers (0.1% of data)\n",
201
+ " Training slice: last_85pct_outlier_adj, samples: 271708\n",
202
+ " Strategy 'remove': Adjusted 245 outliers (0.1% of data)\n",
203
+ " Training slice: last_80pct_outlier_adj, samples: 245414\n",
204
+ " Strategy 'remove': Adjusted 131 outliers (0.1% of data)\n",
205
+ " Training slice: oldest_25pct_outlier_adj, samples: 131471\n",
206
+ "\n",
207
+ "--- Fold 3/3 ---\n",
208
+ " Training slice: full_data, samples: 350592\n",
209
+ " Training slice: last_90pct, samples: 298004\n",
210
+ " Training slice: last_85pct, samples: 271709\n",
211
+ " Training slice: last_80pct, samples: 245415\n",
212
+ " Training slice: oldest_25pct, samples: 131471\n",
213
+ " Strategy 'remove': Adjusted 350 outliers (0.1% of data)\n",
214
+ " Training slice: full_data_outlier_adj, samples: 350592\n",
215
+ " Strategy 'remove': Adjusted 298 outliers (0.1% of data)\n",
216
+ " Training slice: last_90pct_outlier_adj, samples: 298004\n",
217
+ " Strategy 'remove': Adjusted 271 outliers (0.1% of data)\n",
218
+ " Training slice: last_85pct_outlier_adj, samples: 271709\n",
219
+ " Strategy 'remove': Adjusted 245 outliers (0.1% of data)\n",
220
+ " Training slice: last_80pct_outlier_adj, samples: 245415\n",
221
+ " Strategy 'remove': Adjusted 131 outliers (0.1% of data)\n",
222
+ " Training slice: oldest_25pct_outlier_adj, samples: 131471\n",
223
+ "\n",
224
+ "REMOVE Strategy - Weighted Ensemble Pearson: 0.1107\n",
225
+ " full_data_outlier_adj: 0.1092 (weight: 0.112)\n",
226
+ " last_90pct_outlier_adj: 0.1140 (weight: 0.112)\n",
227
+ " last_85pct_outlier_adj: 0.1009 (weight: 0.112)\n",
228
+ " last_80pct_outlier_adj: 0.0978 (weight: 0.112)\n",
229
+ " oldest_25pct_outlier_adj: 0.0738 (weight: 0.025)\n",
230
+ "\n",
231
+ "==================================================\n",
232
+ "Testing outlier strategy: DOUBLE\n",
233
+ "==================================================\n",
234
+ "\n",
235
+ "--- Fold 1/3 ---\n",
236
+ " Training slice: full_data, samples: 350591\n",
237
+ " Training slice: last_90pct, samples: 350591\n",
238
+ " Training slice: last_85pct, samples: 350591\n",
239
+ " Training slice: last_80pct, samples: 350591\n",
240
+ " Training slice: oldest_25pct, samples: 0\n",
241
+ " Strategy 'double': Adjusted 350 outliers (0.1% of data)\n",
242
+ " Training slice: full_data_outlier_adj, samples: 350591\n",
243
+ " Strategy 'double': Adjusted 350 outliers (0.1% of data)\n",
244
+ " Training slice: last_90pct_outlier_adj, samples: 350591\n",
245
+ " Strategy 'double': Adjusted 350 outliers (0.1% of data)\n",
246
+ " Training slice: last_85pct_outlier_adj, samples: 350591\n",
247
+ " Strategy 'double': Adjusted 350 outliers (0.1% of data)\n",
248
+ " Training slice: last_80pct_outlier_adj, samples: 350591\n",
249
+ " Training slice: oldest_25pct_outlier_adj, samples: 0\n",
250
+ "\n",
251
+ "--- Fold 2/3 ---\n",
252
+ " Training slice: full_data, samples: 350591\n",
253
+ " Training slice: last_90pct, samples: 298003\n",
254
+ " Training slice: last_85pct, samples: 271708\n",
255
+ " Training slice: last_80pct, samples: 245414\n",
256
+ " Training slice: oldest_25pct, samples: 131471\n",
257
+ " Strategy 'double': Adjusted 350 outliers (0.1% of data)\n",
258
+ " Training slice: full_data_outlier_adj, samples: 350591\n",
259
+ " Strategy 'double': Adjusted 298 outliers (0.1% of data)\n",
260
+ " Training slice: last_90pct_outlier_adj, samples: 298003\n",
261
+ " Strategy 'double': Adjusted 271 outliers (0.1% of data)\n",
262
+ " Training slice: last_85pct_outlier_adj, samples: 271708\n",
263
+ " Strategy 'double': Adjusted 245 outliers (0.1% of data)\n",
264
+ " Training slice: last_80pct_outlier_adj, samples: 245414\n",
265
+ " Strategy 'double': Adjusted 131 outliers (0.1% of data)\n",
266
+ " Training slice: oldest_25pct_outlier_adj, samples: 131471\n",
267
+ "\n",
268
+ "--- Fold 3/3 ---\n",
269
+ " Training slice: full_data, samples: 350592\n",
270
+ " Training slice: last_90pct, samples: 298004\n",
271
+ " Training slice: last_85pct, samples: 271709\n",
272
+ " Training slice: last_80pct, samples: 245415\n",
273
+ " Training slice: oldest_25pct, samples: 131471\n",
274
+ " Strategy 'double': Adjusted 350 outliers (0.1% of data)\n",
275
+ " Training slice: full_data_outlier_adj, samples: 350592\n",
276
+ " Strategy 'double': Adjusted 298 outliers (0.1% of data)\n",
277
+ " Training slice: last_90pct_outlier_adj, samples: 298004\n",
278
+ " Strategy 'double': Adjusted 271 outliers (0.1% of data)\n",
279
+ " Training slice: last_85pct_outlier_adj, samples: 271709\n",
280
+ " Strategy 'double': Adjusted 245 outliers (0.1% of data)\n",
281
+ " Training slice: last_80pct_outlier_adj, samples: 245415\n",
282
+ " Strategy 'double': Adjusted 131 outliers (0.1% of data)\n",
283
+ " Training slice: oldest_25pct_outlier_adj, samples: 131471\n",
284
+ "\n",
285
+ "DOUBLE Strategy - Weighted Ensemble Pearson: 0.1108\n",
286
+ " full_data_outlier_adj: 0.1085 (weight: 0.112)\n",
287
+ " last_90pct_outlier_adj: 0.1073 (weight: 0.112)\n",
288
+ " last_85pct_outlier_adj: 0.1060 (weight: 0.112)\n",
289
+ " last_80pct_outlier_adj: 0.1007 (weight: 0.112)\n",
290
+ " oldest_25pct_outlier_adj: 0.0684 (weight: 0.025)\n",
291
+ "\n",
292
+ "==================================================\n",
293
+ "Testing outlier strategy: NONE\n",
294
+ "==================================================\n",
295
+ "\n",
296
+ "--- Fold 1/3 ---\n",
297
+ " Training slice: full_data, samples: 350591\n",
298
+ " Training slice: last_90pct, samples: 350591\n",
299
+ " Training slice: last_85pct, samples: 350591\n",
300
+ " Training slice: last_80pct, samples: 350591\n",
301
+ " Training slice: oldest_25pct, samples: 0\n",
302
+ " Training slice: full_data_outlier_adj, samples: 350591\n",
303
+ " Training slice: last_90pct_outlier_adj, samples: 350591\n",
304
+ " Training slice: last_85pct_outlier_adj, samples: 350591\n",
305
+ " Training slice: last_80pct_outlier_adj, samples: 350591\n",
306
+ " Training slice: oldest_25pct_outlier_adj, samples: 0\n",
307
+ "\n",
308
+ "--- Fold 2/3 ---\n",
309
+ " Training slice: full_data, samples: 350591\n",
310
+ " Training slice: last_90pct, samples: 298003\n",
311
+ " Training slice: last_85pct, samples: 271708\n",
312
+ " Training slice: last_80pct, samples: 245414\n",
313
+ " Training slice: oldest_25pct, samples: 131471\n",
314
+ " Training slice: full_data_outlier_adj, samples: 350591\n",
315
+ " Training slice: last_90pct_outlier_adj, samples: 298003\n",
316
+ " Training slice: last_85pct_outlier_adj, samples: 271708\n",
317
+ " Training slice: last_80pct_outlier_adj, samples: 245414\n",
318
+ " Training slice: oldest_25pct_outlier_adj, samples: 131471\n",
319
+ "\n",
320
+ "--- Fold 3/3 ---\n",
321
+ " Training slice: full_data, samples: 350592\n",
322
+ " Training slice: last_90pct, samples: 298004\n",
323
+ " Training slice: last_85pct, samples: 271709\n",
324
+ " Training slice: last_80pct, samples: 245415\n",
325
+ " Training slice: oldest_25pct, samples: 131471\n",
326
+ " Training slice: full_data_outlier_adj, samples: 350592\n",
327
+ " Training slice: last_90pct_outlier_adj, samples: 298004\n",
328
+ " Training slice: last_85pct_outlier_adj, samples: 271709\n",
329
+ " Training slice: last_80pct_outlier_adj, samples: 245415\n",
330
+ " Training slice: oldest_25pct_outlier_adj, samples: 131471\n",
331
+ "\n",
332
+ "NONE Strategy - Weighted Ensemble Pearson: 0.1106\n",
333
+ " full_data_outlier_adj: 0.1121 (weight: 0.112)\n",
334
+ " last_90pct_outlier_adj: 0.1084 (weight: 0.112)\n",
335
+ " last_85pct_outlier_adj: 0.1032 (weight: 0.112)\n",
336
+ " last_80pct_outlier_adj: 0.1004 (weight: 0.112)\n",
337
+ " oldest_25pct_outlier_adj: 0.0737 (weight: 0.025)\n",
338
+ "\n",
339
+ "==================================================\n",
340
+ "OUTLIER STRATEGY COMPARISON SUMMARY\n",
341
+ "==================================================\n",
342
+ "REDUCE: 0.1101 \n",
343
+ "REMOVE: 0.1107 \n",
344
+ "DOUBLE: 0.1108 ← BEST\n",
345
+ "NONE: 0.1106 \n",
346
+ "\n",
347
+ "Relative performance vs 'reduce' strategy:\n",
348
+ " remove: +0.59%\n",
349
+ " double: +0.65%\n",
350
+ " none: +0.50%\n",
351
+ "\n",
352
+ "XGB Weighted Ensemble Pearson: 0.1108\n",
353
+ "\n",
354
+ "Individual slice OOF scores and weights:\n",
355
+ " full_data: 0.1121 (weight: 0.124)\n",
356
+ " last_90pct: 0.1084 (weight: 0.124)\n",
357
+ " last_85pct: 0.1032 (weight: 0.124)\n",
358
+ " last_80pct: 0.1004 (weight: 0.124)\n",
359
+ " oldest_25pct: 0.0737 (weight: 0.031)\n",
360
+ " full_data_outlier_adj: 0.1085 (weight: 0.112)\n",
361
+ " last_90pct_outlier_adj: 0.1073 (weight: 0.112)\n",
362
+ " last_85pct_outlier_adj: 0.1060 (weight: 0.112)\n",
363
+ " last_80pct_outlier_adj: 0.1007 (weight: 0.112)\n",
364
+ " oldest_25pct_outlier_adj: 0.0684 (weight: 0.025)\n",
365
+ "\n",
366
+ "Saved: submission_xgboost_double.csv\n",
367
+ "\n",
368
+ "=== Training MLP Model ===\n"
369
+ ]
370
+ },
371
+ {
372
+ "name": "stderr",
373
+ "output_type": "stream",
374
+ "text": [
375
+ "Epoch 1/10: 100%|██████████| 13/13 [00:07<00:00, 1.68it/s]\n"
376
+ ]
377
+ },
378
+ {
379
+ "name": "stdout",
380
+ "output_type": "stream",
381
+ "text": [
382
+ "Training Loss: 16059.1512\n"
383
+ ]
384
+ },
385
+ {
386
+ "name": "stderr",
387
+ "output_type": "stream",
388
+ "text": [
389
+ "Validation: 100%|██████████| 4/4 [00:01<00:00, 2.93it/s]\n"
390
+ ]
391
+ },
392
+ {
393
+ "name": "stdout",
394
+ "output_type": "stream",
395
+ "text": [
396
+ "Validation Pearson Coef: 0.0700 | Loss: 15180.1740\n",
397
+ "✅ New best model saved with Pearson: 0.0700\n"
398
+ ]
399
+ },
400
+ {
401
+ "name": "stderr",
402
+ "output_type": "stream",
403
+ "text": [
404
+ "Epoch 2/10: 100%|██████████| 13/13 [00:06<00:00, 2.02it/s]\n"
405
+ ]
406
+ },
407
+ {
408
+ "name": "stdout",
409
+ "output_type": "stream",
410
+ "text": [
411
+ "Training Loss: 15595.0242\n"
412
+ ]
413
+ },
414
+ {
415
+ "name": "stderr",
416
+ "output_type": "stream",
417
+ "text": [
418
+ "Validation: 100%|██████████| 4/4 [00:01<00:00, 3.01it/s]\n"
419
+ ]
420
+ },
421
+ {
422
+ "name": "stdout",
423
+ "output_type": "stream",
424
+ "text": [
425
+ "Validation Pearson Coef: 0.0838 | Loss: 15198.4545\n",
426
+ "✅ New best model saved with Pearson: 0.0838\n"
427
+ ]
428
+ },
429
+ {
430
+ "name": "stderr",
431
+ "output_type": "stream",
432
+ "text": [
433
+ "Epoch 3/10: 100%|██████████| 13/13 [00:06<00:00, 1.91it/s]\n"
434
+ ]
435
+ },
436
+ {
437
+ "name": "stdout",
438
+ "output_type": "stream",
439
+ "text": [
440
+ "Training Loss: 15456.7179\n"
441
+ ]
442
+ },
443
+ {
444
+ "name": "stderr",
445
+ "output_type": "stream",
446
+ "text": [
447
+ "Validation: 100%|██████████| 4/4 [00:01<00:00, 3.01it/s]\n"
448
+ ]
449
+ },
450
+ {
451
+ "name": "stdout",
452
+ "output_type": "stream",
453
+ "text": [
454
+ "Validation Pearson Coef: 0.0992 | Loss: 15219.1834\n",
455
+ "✅ New best model saved with Pearson: 0.0992\n"
456
+ ]
457
+ },
458
+ {
459
+ "name": "stderr",
460
+ "output_type": "stream",
461
+ "text": [
462
+ "Epoch 4/10: 100%|██████████| 13/13 [00:06<00:00, 2.04it/s]\n"
463
+ ]
464
+ },
465
+ {
466
+ "name": "stdout",
467
+ "output_type": "stream",
468
+ "text": [
469
+ "Training Loss: 15348.7826\n"
470
+ ]
471
+ },
472
+ {
473
+ "name": "stderr",
474
+ "output_type": "stream",
475
+ "text": [
476
+ "Validation: 100%|██████████| 4/4 [00:01<00:00, 2.97it/s]\n"
477
+ ]
478
+ },
479
+ {
480
+ "name": "stdout",
481
+ "output_type": "stream",
482
+ "text": [
483
+ "Validation Pearson Coef: 0.1021 | Loss: 15224.5020\n",
484
+ "✅ New best model saved with Pearson: 0.1021\n"
485
+ ]
486
+ },
487
+ {
488
+ "name": "stderr",
489
+ "output_type": "stream",
490
+ "text": [
491
+ "Epoch 5/10: 100%|██████████| 13/13 [00:06<00:00, 2.01it/s]\n"
492
+ ]
493
+ },
494
+ {
495
+ "name": "stdout",
496
+ "output_type": "stream",
497
+ "text": [
498
+ "Training Loss: 15229.8266\n"
499
+ ]
500
+ },
501
+ {
502
+ "name": "stderr",
503
+ "output_type": "stream",
504
+ "text": [
505
+ "Validation: 100%|██████████| 4/4 [00:01<00:00, 2.84it/s]\n"
506
+ ]
507
+ },
508
+ {
509
+ "name": "stdout",
510
+ "output_type": "stream",
511
+ "text": [
512
+ "Validation Pearson Coef: 0.1045 | Loss: 15242.1837\n",
513
+ "✅ New best model saved with Pearson: 0.1045\n"
514
+ ]
515
+ },
516
+ {
517
+ "name": "stderr",
518
+ "output_type": "stream",
519
+ "text": [
520
+ "Epoch 6/10: 100%|██████████| 13/13 [00:06<00:00, 1.89it/s]\n"
521
+ ]
522
+ },
523
+ {
524
+ "name": "stdout",
525
+ "output_type": "stream",
526
+ "text": [
527
+ "Training Loss: 15138.1736\n"
528
+ ]
529
+ },
530
+ {
531
+ "name": "stderr",
532
+ "output_type": "stream",
533
+ "text": [
534
+ "Validation: 100%|██████████| 4/4 [00:01<00:00, 2.99it/s]\n"
535
+ ]
536
+ },
537
+ {
538
+ "name": "stdout",
539
+ "output_type": "stream",
540
+ "text": [
541
+ "Validation Pearson Coef: 0.1075 | Loss: 15256.6681\n",
542
+ "✅ New best model saved with Pearson: 0.1075\n"
543
+ ]
544
+ },
545
+ {
546
+ "name": "stderr",
547
+ "output_type": "stream",
548
+ "text": [
549
+ "Epoch 7/10: 100%|██████████| 13/13 [00:06<00:00, 1.96it/s]\n"
550
+ ]
551
+ },
552
+ {
553
+ "name": "stdout",
554
+ "output_type": "stream",
555
+ "text": [
556
+ "Training Loss: 15046.1952\n"
557
+ ]
558
+ },
559
+ {
560
+ "name": "stderr",
561
+ "output_type": "stream",
562
+ "text": [
563
+ "Validation: 100%|██████████| 4/4 [00:01<00:00, 2.80it/s]\n"
564
+ ]
565
+ },
566
+ {
567
+ "name": "stdout",
568
+ "output_type": "stream",
569
+ "text": [
570
+ "Validation Pearson Coef: 0.1046 | Loss: 15321.9877\n"
571
+ ]
572
+ },
573
+ {
574
+ "name": "stderr",
575
+ "output_type": "stream",
576
+ "text": [
577
+ "Epoch 8/10: 100%|██████████| 13/13 [00:06<00:00, 1.91it/s]\n"
578
+ ]
579
+ },
580
+ {
581
+ "name": "stdout",
582
+ "output_type": "stream",
583
+ "text": [
584
+ "Training Loss: 14938.4061\n"
585
+ ]
586
+ },
587
+ {
588
+ "name": "stderr",
589
+ "output_type": "stream",
590
+ "text": [
591
+ "Validation: 100%|██████████| 4/4 [00:01<00:00, 2.91it/s]\n"
592
+ ]
593
+ },
594
+ {
595
+ "name": "stdout",
596
+ "output_type": "stream",
597
+ "text": [
598
+ "Validation Pearson Coef: 0.1099 | Loss: 15279.2798\n",
599
+ "✅ New best model saved with Pearson: 0.1099\n"
600
+ ]
601
+ },
602
+ {
603
+ "name": "stderr",
604
+ "output_type": "stream",
605
+ "text": [
606
+ "Epoch 9/10: 100%|██████████| 13/13 [00:06<00:00, 1.89it/s]\n"
607
+ ]
608
+ },
609
+ {
610
+ "name": "stdout",
611
+ "output_type": "stream",
612
+ "text": [
613
+ "Training Loss: 14854.4186\n"
614
+ ]
615
+ },
616
+ {
617
+ "name": "stderr",
618
+ "output_type": "stream",
619
+ "text": [
620
+ "Validation: 100%|██████████| 4/4 [00:01<00:00, 2.88it/s]\n"
621
+ ]
622
+ },
623
+ {
624
+ "name": "stdout",
625
+ "output_type": "stream",
626
+ "text": [
627
+ "Validation Pearson Coef: 0.1111 | Loss: 15291.8269\n",
628
+ "✅ New best model saved with Pearson: 0.1111\n"
629
+ ]
630
+ },
631
+ {
632
+ "name": "stderr",
633
+ "output_type": "stream",
634
+ "text": [
635
+ "Epoch 10/10: 100%|██████████| 13/13 [00:07<00:00, 1.85it/s]\n"
636
+ ]
637
+ },
638
+ {
639
+ "name": "stdout",
640
+ "output_type": "stream",
641
+ "text": [
642
+ "Training Loss: 14737.5036\n"
643
+ ]
644
+ },
645
+ {
646
+ "name": "stderr",
647
+ "output_type": "stream",
648
+ "text": [
649
+ "Validation: 100%|██████████| 4/4 [00:01<00:00, 2.71it/s]\n"
650
+ ]
651
+ },
652
+ {
653
+ "name": "stdout",
654
+ "output_type": "stream",
655
+ "text": [
656
+ "Validation Pearson Coef: 0.1117 | Loss: 15297.1565\n",
657
+ "✅ New best model saved with Pearson: 0.1117\n",
658
+ "Model loaded from best_mlp_model.pt with best Pearson: 0.1117\n"
659
+ ]
660
+ },
661
+ {
662
+ "name": "stderr",
663
+ "output_type": "stream",
664
+ "text": [
665
+ "Predicting: 100%|██████████| 17/17 [00:04<00:00, 3.75it/s]\n"
666
+ ]
667
+ },
668
+ {
669
+ "name": "stdout",
670
+ "output_type": "stream",
671
+ "text": [
672
+ "\n",
673
+ "Saved: submission_mlp.csv\n",
674
+ "\n",
675
+ "Saved: submission_ensemble_double.csv (XGBoost: 90.0%, MLP: 10.0%)\n",
676
+ "\n",
677
+ "============================================================\n",
678
+ "FINAL SUMMARY\n",
679
+ "============================================================\n",
680
+ "\n",
681
+ "Best outlier strategy: DOUBLE\n",
682
+ "Best XGBoost CV score: 0.1108\n",
683
+ "\n",
684
+ "Strategy comparison (XGBoost ensemble scores):\n",
685
+ " reduce: 0.1101\n",
686
+ " remove: 0.1107\n",
687
+ " double: 0.1108\n",
688
+ " none: 0.1106\n",
689
+ "\n",
690
+ "Created submission files:\n",
691
+ "1. submission_xgboost_double.csv - XGBoost with double strategy\n",
692
+ "2. submission_mlp.csv - MLP only\n",
693
+ "3. submission_ensemble_double.csv - 90% XGBoost + 10% MLP\n",
694
+ "\n",
695
+ "Sample predictions (first 10 rows):\n",
696
+ " ID XGBoost MLP Ensemble\n",
697
+ "0 1 0.035090 0.217608 0.053342\n",
698
+ "1 2 0.018095 -0.070474 0.009238\n",
699
+ "2 3 0.136497 0.070964 0.129943\n",
700
+ "3 4 -0.085066 -0.007894 -0.077348\n",
701
+ "4 5 0.211597 0.238769 0.214314\n",
702
+ "5 6 -0.172887 -0.198609 -0.175459\n",
703
+ "6 7 -0.419656 -0.523997 -0.430090\n",
704
+ "7 8 -0.154374 -0.422084 -0.181145\n",
705
+ "8 9 0.222065 -0.204875 0.179371\n",
706
+ "9 10 0.096913 0.106206 0.097842\n",
707
+ "\n",
708
+ "============================================================\n",
709
+ "RECOMMENDATIONS\n",
710
+ "============================================================\n",
711
+ "\n",
712
+ "1. Outlier Handling Impact:\n",
713
+ " ! Doubling outlier weights performs better\n",
714
+ " → This suggests outliers contain valuable signal for extreme movements\n",
715
+ "\n",
716
+ "2. Overfitting Risk Assessment:\n",
717
+ " ✓ Emphasizing outliers improves performance\n",
718
+ " → Model benefits from learning extreme patterns\n",
719
+ "\n",
720
+ "3. Next Steps:\n",
721
+ " • Test different outlier fractions (0.05%, 0.2%, 0.5%)\n",
722
+ " • Try adaptive outlier detection per time slice\n",
723
+ " • Consider feature-specific outlier handling\n",
724
+ " • Monitor LB score vs CV score for overfitting signs\n",
725
+ "\n",
726
+ "4. Outlier Insights:\n",
727
+ " • Detected 525 outliers (0.10% of data)\n",
728
+ " • Consider creating synthetic outliers if 'double' strategy works well\n",
729
+ " • Analyze time distribution of outliers for market regime insights\n"
730
+ ]
731
+ }
732
+ ],
733
+ "source": [
734
+ "from sklearn.model_selection import KFold, train_test_split\n",
735
+ "from xgboost import XGBRegressor\n",
736
+ "from scipy.stats import pearsonr\n",
737
+ "import numpy as np\n",
738
+ "import pandas as pd\n",
739
+ "from sklearn.ensemble import RandomForestRegressor\n",
740
+ "from sklearn.preprocessing import StandardScaler\n",
741
+ "from tqdm import tqdm\n",
742
+ "import random\n",
743
+ "import warnings\n",
744
+ "warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n",
745
+ "\n",
746
+ "# Deep Learning imports\n",
747
+ "import torch\n",
748
+ "import torch.nn as nn\n",
749
+ "import torch.optim as optim\n",
750
+ "from torch.utils.data import DataLoader, TensorDataset\n",
751
+ "\n",
752
+ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
753
+ "print(f\"Using device: {device}\")\n",
754
+ "\n",
755
+ "# =========================\n",
756
+ "# Configuration\n",
757
+ "# =========================\n",
758
+ "class Config:\n",
759
+ " TRAIN_PATH = \"/kaggle/input/drw-crypto-market-prediction/train.parquet\"\n",
760
+ " TEST_PATH = \"/kaggle/input/drw-crypto-market-prediction/test.parquet\"\n",
761
+ " SUBMISSION_PATH = \"/kaggle/input/drw-crypto-market-prediction/sample_submission.csv\"\n",
762
+ "\n",
763
+ " FEATURES = [\n",
764
+ " \"X863\", \"X856\", \"X598\", \"X862\", \"X385\", \"X852\", \"X603\", \"X860\", \"X674\",\n",
765
+ " \"X415\", \"X345\", \"X855\", \"X174\", \"X302\", \"X178\", \"X168\", \"X612\", \"bid_qty\",\n",
766
+ " \"ask_qty\", \"buy_qty\", \"sell_qty\", \"volume\", \"X888\", \"X421\", \"X333\",\"X817\", \n",
767
+ " \"X586\", \"X292\"\n",
768
+ " ]\n",
769
+ " \n",
770
+ " # Features for MLP (subset)\n",
771
+ " MLP_FEATURES = [\n",
772
+ " \"X863\", \"X856\", \"X344\", \"X598\", \"X862\", \"X385\", \"X852\", \"X603\", \"X860\", \"X674\",\n",
773
+ " \"X415\", \"X345\", \"X137\", \"X855\", \"X174\", \"X302\", \"X178\", \"X532\", \"X168\", \"X612\",\n",
774
+ " \"bid_qty\", \"ask_qty\", \"buy_qty\", \"sell_qty\", \"volume\"\n",
775
+ " ]\n",
776
+ "\n",
777
+ " LABEL_COLUMN = \"label\"\n",
778
+ " N_FOLDS = 3\n",
779
+ " RANDOM_STATE = 42\n",
780
+ " OUTLIER_FRACTION = 0.001 # 0.1% of records\n",
781
+ " \n",
782
+ " # Outlier handling strategies to test\n",
783
+ " OUTLIER_STRATEGIES = [\"reduce\", \"remove\", \"double\", \"none\"]\n",
784
+ "\n",
785
+ "XGB_PARAMS = {\n",
786
+ " \"tree_method\": \"hist\",\n",
787
+ " \"device\": \"gpu\" if torch.cuda.is_available() else \"cpu\",\n",
788
+ " \"colsample_bylevel\": 0.4778,\n",
789
+ " \"colsample_bynode\": 0.3628,\n",
790
+ " \"colsample_bytree\": 0.7107,\n",
791
+ " \"gamma\": 1.7095,\n",
792
+ " \"learning_rate\": 0.02213,\n",
793
+ " \"max_depth\": 20,\n",
794
+ " \"max_leaves\": 12,\n",
795
+ " \"min_child_weight\": 16,\n",
796
+ " \"n_estimators\": 1667,\n",
797
+ " \"subsample\": 0.06567,\n",
798
+ " \"reg_alpha\": 39.3524,\n",
799
+ " \"reg_lambda\": 75.4484,\n",
800
+ " \"verbosity\": 0,\n",
801
+ " \"random_state\": Config.RANDOM_STATE,\n",
802
+ " \"n_jobs\": -1\n",
803
+ "}\n",
804
+ "\n",
805
+ "LEARNERS = [\n",
806
+ " {\"name\": \"xgb\", \"Estimator\": XGBRegressor, \"params\": XGB_PARAMS}\n",
807
+ "]\n",
808
+ "\n",
809
+ "# =========================\n",
810
+ "# Deep Learning Components\n",
811
+ "# =========================\n",
812
+ "def set_seed(seed=42):\n",
813
+ " random.seed(seed)\n",
814
+ " np.random.seed(seed)\n",
815
+ " torch.manual_seed(seed)\n",
816
+ " torch.cuda.manual_seed(seed)\n",
817
+ " torch.cuda.manual_seed_all(seed)\n",
818
+ " torch.backends.cudnn.deterministic = True\n",
819
+ " torch.backends.cudnn.benchmark = False\n",
820
+ "\n",
821
+ "def get_activation_function(name):\n",
822
+ " \"\"\"Return the activation function based on the name.\"\"\"\n",
823
+ " if name == None:\n",
824
+ " return None\n",
825
+ " name = name.lower()\n",
826
+ " if name == 'relu':\n",
827
+ " return nn.ReLU()\n",
828
+ " elif name == 'tanh':\n",
829
+ " return nn.Tanh()\n",
830
+ " elif name == 'sigmoid':\n",
831
+ " return nn.Sigmoid()\n",
832
+ " else:\n",
833
+ " raise ValueError(f\"Unsupported activation function: {name}\")\n",
834
+ "\n",
835
+ "class MLP(nn.Module):\n",
836
+ " def __init__(self, dropout_rate=0.6, \n",
837
+ " layers=[128, 64], activation='relu', last_activation=None):\n",
838
+ " super(MLP, self).__init__()\n",
839
+ " \n",
840
+ " self.linears = nn.ModuleList()\n",
841
+ " self.activation = get_activation_function(activation)\n",
842
+ " self.last_activation = get_activation_function(last_activation)\n",
843
+ "\n",
844
+ " for i in range(len(layers) - 1):\n",
845
+ " self.linears.append(nn.Linear(layers[i], layers[i + 1]))\n",
846
+ "\n",
847
+ " self.dropout = nn.Dropout(dropout_rate)\n",
848
+ "\n",
849
+ " def forward(self, x):\n",
850
+ " for k in range(len(self.linears) - 1):\n",
851
+ " x = self.activation(self.linears[k](x))\n",
852
+ " x = self.dropout(x)\n",
853
+ " x = self.linears[-1](x)\n",
854
+ " if self.last_activation is not None:\n",
855
+ " x = self.last_activation(x)\n",
856
+ " return x\n",
857
+ "\n",
858
+ "class Checkpointer:\n",
859
+ " def __init__(self, path=\"best_model.pt\"):\n",
860
+ " self.path = path\n",
861
+ " self.best_pearson = -np.inf\n",
862
+ "\n",
863
+ " def load(self, model):\n",
864
+ " \"\"\"Load the best model weights.\"\"\"\n",
865
+ " model.load_state_dict(torch.load(self.path, map_location=device))\n",
866
+ " print(f\"Model loaded from {self.path} with best Pearson: {self.best_pearson:.4f}\")\n",
867
+ " return model\n",
868
+ "\n",
869
+ " def __call__(self, pearson_coef, model):\n",
870
+ " \"\"\"Call method to save the model if the Pearson coefficient is better than the best one.\"\"\"\n",
871
+ " if pearson_coef > self.best_pearson:\n",
872
+ " self.best_pearson = pearson_coef\n",
873
+ " torch.save(model.state_dict(), self.path)\n",
874
+ " print(f\"✅ New best model saved with Pearson: {pearson_coef:.4f}\")\n",
875
+ "\n",
876
+ "def get_dataloaders(X, Y, hparams, device, shuffle=True):\n",
877
+ " \"\"\"Create DataLoader for training and validation datasets.\"\"\"\n",
878
+ " X_tensor = torch.tensor(X, dtype=torch.float32, device=device)\n",
879
+ " if Y is not None:\n",
880
+ " Y_tensor = torch.tensor(Y.values if hasattr(Y, 'values') else Y, \n",
881
+ " dtype=torch.float32, device=device).unsqueeze(1)\n",
882
+ " dataset = TensorDataset(X_tensor, Y_tensor)\n",
883
+ " else:\n",
884
+ " dataset = TensorDataset(X_tensor)\n",
885
+ " \n",
886
+ " dataloader = DataLoader(dataset, batch_size=hparams[\"batch_size\"], shuffle=shuffle, \n",
887
+ " generator=torch.Generator().manual_seed(hparams[\"seed\"]))\n",
888
+ " return dataloader\n",
889
+ "\n",
890
+ "# =========================\n",
891
+ "# Feature Engineering\n",
892
+ "# =========================\n",
893
+ "def add_features(df):\n",
894
+ " # Original features\n",
895
+ " df['bid_ask_interaction'] = df['bid_qty'] * df['ask_qty']\n",
896
+ " df['bid_buy_interaction'] = df['bid_qty'] * df['buy_qty']\n",
897
+ " df['bid_sell_interaction'] = df['bid_qty'] * df['sell_qty']\n",
898
+ " df['ask_buy_interaction'] = df['ask_qty'] * df['buy_qty']\n",
899
+ " df['ask_sell_interaction'] = df['ask_qty'] * df['sell_qty']\n",
900
+ "\n",
901
+ " df['volume_weighted_sell'] = df['sell_qty'] * df['volume']\n",
902
+ " df['buy_sell_ratio'] = df['buy_qty'] / (df['sell_qty'] + 1e-10)\n",
903
+ " df['selling_pressure'] = df['sell_qty'] / (df['volume'] + 1e-10)\n",
904
+ " df['log_volume'] = np.log1p(df['volume'])\n",
905
+ "\n",
906
+ " df['effective_spread_proxy'] = np.abs(df['buy_qty'] - df['sell_qty']) / (df['volume'] + 1e-10)\n",
907
+ " df['bid_ask_imbalance'] = (df['bid_qty'] - df['ask_qty']) / (df['bid_qty'] + df['ask_qty'] + 1e-10)\n",
908
+ " df['order_flow_imbalance'] = (df['buy_qty'] - df['sell_qty']) / (df['buy_qty'] + df['sell_qty'] + 1e-10)\n",
909
+ " df['liquidity_ratio'] = (df['bid_qty'] + df['ask_qty']) / (df['volume'] + 1e-10)\n",
910
+ " \n",
911
+ " # === NEW MICROSTRUCTURE FEATURES ===\n",
912
+ " \n",
913
+ " # Price Pressure Indicators\n",
914
+ " df['net_order_flow'] = df['buy_qty'] - df['sell_qty']\n",
915
+ " df['normalized_net_flow'] = df['net_order_flow'] / (df['volume'] + 1e-10)\n",
916
+ " df['buying_pressure'] = df['buy_qty'] / (df['volume'] + 1e-10)\n",
917
+ " df['volume_weighted_buy'] = df['buy_qty'] * df['volume']\n",
918
+ " \n",
919
+ " # Liquidity Depth Measures\n",
920
+ " df['total_depth'] = df['bid_qty'] + df['ask_qty']\n",
921
+ " df['depth_imbalance'] = (df['bid_qty'] - df['ask_qty']) / (df['total_depth'] + 1e-10)\n",
922
+ " df['relative_spread'] = np.abs(df['bid_qty'] - df['ask_qty']) / (df['total_depth'] + 1e-10)\n",
923
+ " df['log_depth'] = np.log1p(df['total_depth'])\n",
924
+ " \n",
925
+ " # Order Flow Toxicity Proxies\n",
926
+ " df['kyle_lambda'] = np.abs(df['net_order_flow']) / (df['volume'] + 1e-10)\n",
927
+ " df['flow_toxicity'] = np.abs(df['order_flow_imbalance']) * df['volume']\n",
928
+ " df['aggressive_flow_ratio'] = (df['buy_qty'] + df['sell_qty']) / (df['total_depth'] + 1e-10)\n",
929
+ " \n",
930
+ " # Market Activity Indicators\n",
931
+ " df['volume_depth_ratio'] = df['volume'] / (df['total_depth'] + 1e-10)\n",
932
+ " df['activity_intensity'] = (df['buy_qty'] + df['sell_qty']) / (df['volume'] + 1e-10)\n",
933
+ " df['log_buy_qty'] = np.log1p(df['buy_qty'])\n",
934
+ " df['log_sell_qty'] = np.log1p(df['sell_qty'])\n",
935
+ " df['log_bid_qty'] = np.log1p(df['bid_qty'])\n",
936
+ " df['log_ask_qty'] = np.log1p(df['ask_qty'])\n",
937
+ " \n",
938
+ " # Microstructure Volatility Proxies\n",
939
+ " df['realized_spread_proxy'] = 2 * np.abs(df['net_order_flow']) / (df['volume'] + 1e-10)\n",
940
+ " df['price_impact_proxy'] = df['net_order_flow'] / (df['total_depth'] + 1e-10)\n",
941
+ " df['quote_volatility_proxy'] = np.abs(df['depth_imbalance'])\n",
942
+ " \n",
943
+ " # Complex Interaction Terms\n",
944
+ " df['flow_depth_interaction'] = df['net_order_flow'] * df['total_depth']\n",
945
+ " df['imbalance_volume_interaction'] = df['order_flow_imbalance'] * df['volume']\n",
946
+ " df['depth_volume_interaction'] = df['total_depth'] * df['volume']\n",
947
+ " df['buy_sell_spread'] = np.abs(df['buy_qty'] - df['sell_qty'])\n",
948
+ " df['bid_ask_spread'] = np.abs(df['bid_qty'] - df['ask_qty'])\n",
949
+ " \n",
950
+ " # Information Asymmetry Measures\n",
951
+ " df['trade_informativeness'] = df['net_order_flow'] / (df['bid_qty'] + df['ask_qty'] + 1e-10)\n",
952
+ " df['execution_shortfall_proxy'] = df['buy_sell_spread'] / (df['volume'] + 1e-10)\n",
953
+ " df['adverse_selection_proxy'] = df['net_order_flow'] / (df['total_depth'] + 1e-10) * df['volume']\n",
954
+ " \n",
955
+ " # Market Efficiency Indicators\n",
956
+ " df['fill_probability'] = df['volume'] / (df['buy_qty'] + df['sell_qty'] + 1e-10)\n",
957
+ " df['execution_rate'] = (df['buy_qty'] + df['sell_qty']) / (df['total_depth'] + 1e-10)\n",
958
+ " df['market_efficiency'] = df['volume'] / (df['bid_ask_spread'] + 1e-10)\n",
959
+ " \n",
960
+ " # Non-linear Transformations\n",
961
+ " df['sqrt_volume'] = np.sqrt(df['volume'])\n",
962
+ " df['sqrt_depth'] = np.sqrt(df['total_depth'])\n",
963
+ " df['volume_squared'] = df['volume'] ** 2\n",
964
+ " df['imbalance_squared'] = df['order_flow_imbalance'] ** 2\n",
965
+ " \n",
966
+ " # Relative Measures\n",
967
+ " df['bid_ratio'] = df['bid_qty'] / (df['total_depth'] + 1e-10)\n",
968
+ " df['ask_ratio'] = df['ask_qty'] / (df['total_depth'] + 1e-10)\n",
969
+ " df['buy_ratio'] = df['buy_qty'] / (df['buy_qty'] + df['sell_qty'] + 1e-10)\n",
970
+ " df['sell_ratio'] = df['sell_qty'] / (df['buy_qty'] + df['sell_qty'] + 1e-10)\n",
971
+ " \n",
972
+ " # Market Stress Indicators\n",
973
+ " df['liquidity_consumption'] = (df['buy_qty'] + df['sell_qty']) / (df['total_depth'] + 1e-10)\n",
974
+ " df['market_stress'] = df['volume'] / (df['total_depth'] + 1e-10) * np.abs(df['order_flow_imbalance'])\n",
975
+ " df['depth_depletion'] = df['volume'] / (df['bid_qty'] + df['ask_qty'] + 1e-10)\n",
976
+ " \n",
977
+ " # Directional Indicators\n",
978
+ " df['net_buying_ratio'] = df['net_order_flow'] / (df['volume'] + 1e-10)\n",
979
+ " df['directional_volume'] = df['net_order_flow'] * np.log1p(df['volume'])\n",
980
+ " df['signed_volume'] = np.sign(df['net_order_flow']) * df['volume']\n",
981
+ " \n",
982
+ " # Replace infinities and NaNs\n",
983
+ " df = df.replace([np.inf, -np.inf], 0).fillna(0)\n",
984
+ " \n",
985
+ " return df\n",
986
+ "\n",
987
+ "def create_time_decay_weights(n: int, decay: float = 0.9) -> np.ndarray:\n",
988
+ " positions = np.arange(n)\n",
989
+ " normalized = positions / (n - 1) if n > 1 else positions\n",
990
+ " weights = decay ** (1.0 - normalized)\n",
991
+ " return weights * n / weights.sum()\n",
992
+ "\n",
993
+ "def detect_outliers_and_adjust_weights(X, y, sample_weights, outlier_fraction=0.001, strategy=\"reduce\"):\n",
994
+ " \"\"\"\n",
995
+ " Detect outliers based on prediction residuals and adjust their weights.\n",
996
+ " \n",
997
+ " Strategies:\n",
998
+ " - \"reduce\": Current approach - reduce weights to 0.2-0.8x\n",
999
+ " - \"remove\": Set outlier weights to 0 (effectively removing them)\n",
1000
+ " - \"double\": Double the weights of outliers\n",
1001
+ " - \"none\": No adjustment\n",
1002
+ " \"\"\"\n",
1003
+ " if strategy == \"none\":\n",
1004
+ " return sample_weights, np.zeros(len(y), dtype=bool)\n",
1005
+ " \n",
1006
+ " # Ensure we have at least some samples to detect outliers\n",
1007
+ " n_samples = len(y)\n",
1008
+ " if n_samples < 100: # Not enough samples for meaningful outlier detection\n",
1009
+ " print(f\" Too few samples ({n_samples}) for outlier detection\")\n",
1010
+ " return sample_weights, np.zeros(n_samples, dtype=bool)\n",
1011
+ " \n",
1012
+ " # Train a simple model to get residuals\n",
1013
+ " rf = RandomForestRegressor(n_estimators=50, max_depth=10, random_state=42, n_jobs=-1)\n",
1014
+ " rf.fit(X, y, sample_weight=sample_weights)\n",
1015
+ " \n",
1016
+ " # Calculate residuals\n",
1017
+ " predictions = rf.predict(X)\n",
1018
+ " residuals = np.abs(y - predictions)\n",
1019
+ " \n",
1020
+ " # Find threshold for top outlier_fraction\n",
1021
+ " # Ensure we have at least 1 outlier\n",
1022
+ " n_outliers = max(1, int(len(residuals) * outlier_fraction))\n",
1023
+ " \n",
1024
+ " # Sort residuals and get threshold\n",
1025
+ " sorted_residuals = np.sort(residuals)\n",
1026
+ " threshold = sorted_residuals[-n_outliers] if n_outliers <= len(residuals) else sorted_residuals[-1]\n",
1027
+ " \n",
1028
+ " # Create outlier mask\n",
1029
+ " outlier_mask = residuals >= threshold\n",
1030
+ " \n",
1031
+ " # Ensure we have exactly n_outliers (handle ties at threshold)\n",
1032
+ " if np.sum(outlier_mask) > n_outliers:\n",
1033
+ " # If we have too many due to ties, randomly select to get exact number\n",
1034
+ " outlier_indices = np.where(outlier_mask)[0]\n",
1035
+ " np.random.seed(42)\n",
1036
+ " selected_indices = np.random.choice(outlier_indices, n_outliers, replace=False)\n",
1037
+ " outlier_mask = np.zeros(len(y), dtype=bool)\n",
1038
+ " outlier_mask[selected_indices] = True\n",
1039
+ " \n",
1040
+ " # Adjust weights based on strategy\n",
1041
+ " adjusted_weights = sample_weights.copy()\n",
1042
+ " \n",
1043
+ " if outlier_mask.any():\n",
1044
+ " if strategy == \"reduce\":\n",
1045
+ " # Original approach: reduce weights proportionally\n",
1046
+ " outlier_residuals = residuals[outlier_mask]\n",
1047
+ " min_outlier_res = outlier_residuals.min()\n",
1048
+ " max_outlier_res = outlier_residuals.max()\n",
1049
+ " \n",
1050
+ " if max_outlier_res > min_outlier_res:\n",
1051
+ " normalized_residuals = (outlier_residuals - min_outlier_res) / (max_outlier_res - min_outlier_res)\n",
1052
+ " else:\n",
1053
+ " normalized_residuals = np.ones_like(outlier_residuals)\n",
1054
+ " \n",
1055
+ " weight_factors = 0.8 - 0.6 * normalized_residuals\n",
1056
+ " adjusted_weights[outlier_mask] *= weight_factors\n",
1057
+ " \n",
1058
+ " elif strategy == \"remove\":\n",
1059
+ " # Set outlier weights to 0\n",
1060
+ " adjusted_weights[outlier_mask] = 0\n",
1061
+ " \n",
1062
+ " elif strategy == \"double\":\n",
1063
+ " # Double the weights of outliers\n",
1064
+ " adjusted_weights[outlier_mask] *= 2.0\n",
1065
+ " \n",
1066
+ " print(f\" Strategy '{strategy}': Adjusted {n_outliers} outliers ({outlier_fraction*100:.1f}% of data)\")\n",
1067
+ " \n",
1068
+ " return adjusted_weights, outlier_mask\n",
1069
+ "\n",
1070
+ "def load_data():\n",
1071
+ " # Load data with all features available\n",
1072
+ " all_features = list(set(Config.FEATURES + Config.MLP_FEATURES))\n",
1073
+ " train_df = pd.read_parquet(Config.TRAIN_PATH, columns=all_features + [Config.LABEL_COLUMN])\n",
1074
+ " test_df = pd.read_parquet(Config.TEST_PATH, columns=all_features)\n",
1075
+ " submission_df = pd.read_csv(Config.SUBMISSION_PATH)\n",
1076
+ " print(f\"Loaded data - Train: {train_df.shape}, Test: {test_df.shape}, Submission: {submission_df.shape}\")\n",
1077
+ "\n",
1078
+ " # Add features\n",
1079
+ " train_df = add_features(train_df)\n",
1080
+ " test_df = add_features(test_df)\n",
1081
+ "\n",
1082
+ " # Update Config.FEATURES with new features\n",
1083
+ " Config.FEATURES += [\n",
1084
+ " \"log_volume\", 'bid_ask_interaction', 'bid_buy_interaction', 'bid_sell_interaction', \n",
1085
+ " 'ask_buy_interaction', 'ask_sell_interaction', 'net_order_flow', 'normalized_net_flow',\n",
1086
+ " 'buying_pressure', 'volume_weighted_buy', 'total_depth', 'depth_imbalance',\n",
1087
+ " 'relative_spread', 'log_depth', 'kyle_lambda', 'flow_toxicity', 'aggressive_flow_ratio',\n",
1088
+ " 'volume_depth_ratio', 'activity_intensity', 'log_buy_qty', 'log_sell_qty',\n",
1089
+ " 'log_bid_qty', 'log_ask_qty', 'realized_spread_proxy', 'price_impact_proxy',\n",
1090
+ " 'quote_volatility_proxy', 'flow_depth_interaction', 'imbalance_volume_interaction',\n",
1091
+ " 'depth_volume_interaction', 'buy_sell_spread', 'bid_ask_spread', 'trade_informativeness',\n",
1092
+ " 'execution_shortfall_proxy', 'adverse_selection_proxy', 'fill_probability',\n",
1093
+ " 'execution_rate', 'market_efficiency', 'sqrt_volume', 'sqrt_depth', 'volume_squared',\n",
1094
+ " 'imbalance_squared', 'bid_ratio', 'ask_ratio', 'buy_ratio', 'sell_ratio',\n",
1095
+ " 'liquidity_consumption', 'market_stress', 'depth_depletion', 'net_buying_ratio',\n",
1096
+ " 'directional_volume', 'signed_volume'\n",
1097
+ " ]\n",
1098
+ "\n",
1099
+ " return train_df.reset_index(drop=True), test_df.reset_index(drop=True), submission_df\n",
1100
+ "\n",
1101
+ "def get_model_slices(n_samples: int):\n",
1102
+ " # Original 5 slices\n",
1103
+ " base_slices = [\n",
1104
+ " {\"name\": \"full_data\", \"cutoff\": 0, \"is_oldest\": False, \"outlier_adjusted\": False},\n",
1105
+ " {\"name\": \"last_90pct\", \"cutoff\": int(0.10 * n_samples), \"is_oldest\": False, \"outlier_adjusted\": False},\n",
1106
+ " {\"name\": \"last_85pct\", \"cutoff\": int(0.15 * n_samples), \"is_oldest\": False, \"outlier_adjusted\": False},\n",
1107
+ " {\"name\": \"last_80pct\", \"cutoff\": int(0.20 * n_samples), \"is_oldest\": False, \"outlier_adjusted\": False},\n",
1108
+ " {\"name\": \"oldest_25pct\", \"cutoff\": int(0.25 * n_samples), \"is_oldest\": True, \"outlier_adjusted\": False},\n",
1109
+ " ]\n",
1110
+ " \n",
1111
+ " # Duplicate slices with outlier adjustment\n",
1112
+ " outlier_adjusted_slices = []\n",
1113
+ " for slice_info in base_slices:\n",
1114
+ " adjusted_slice = slice_info.copy()\n",
1115
+ " adjusted_slice[\"name\"] = f\"{slice_info['name']}_outlier_adj\"\n",
1116
+ " adjusted_slice[\"outlier_adjusted\"] = True\n",
1117
+ " outlier_adjusted_slices.append(adjusted_slice)\n",
1118
+ " \n",
1119
+ " return base_slices + outlier_adjusted_slices\n",
1120
+ "\n",
1121
+ "# =========================\n",
1122
+ "# Outlier Analysis Functions\n",
1123
+ "# =========================\n",
1124
+ "def analyze_outliers(train_df):\n",
1125
+ " \"\"\"Analyze outliers in the training data\"\"\"\n",
1126
+ " print(\"\\n=== Outlier Analysis ===\")\n",
1127
+ " \n",
1128
+ " X = train_df[Config.FEATURES].values\n",
1129
+ " y = train_df[Config.LABEL_COLUMN].values\n",
1130
+ " \n",
1131
+ " # Get base weights\n",
1132
+ " sample_weights = create_time_decay_weights(len(train_df))\n",
1133
+ " \n",
1134
+ " # Detect outliers\n",
1135
+ " _, outlier_mask = detect_outliers_and_adjust_weights(\n",
1136
+ " X, y, sample_weights, outlier_fraction=Config.OUTLIER_FRACTION, strategy=\"reduce\"\n",
1137
+ " )\n",
1138
+ " \n",
1139
+ " # Analyze outlier characteristics\n",
1140
+ " outlier_indices = np.where(outlier_mask)[0]\n",
1141
+ " n_outliers = len(outlier_indices)\n",
1142
+ " \n",
1143
+ " print(f\"\\nTotal outliers detected: {n_outliers} ({n_outliers/len(train_df)*100:.2f}%)\")\n",
1144
+ " \n",
1145
+ " if n_outliers > 0:\n",
1146
+ " # Statistical analysis\n",
1147
+ " outlier_labels = y[outlier_mask]\n",
1148
+ " normal_labels = y[~outlier_mask]\n",
1149
+ " \n",
1150
+ " print(f\"\\nLabel statistics:\")\n",
1151
+ " print(f\" Normal samples - Mean: {normal_labels.mean():.4f}, Std: {normal_labels.std():.4f}\")\n",
1152
+ " print(f\" Outlier samples - Mean: {outlier_labels.mean():.4f}, Std: {outlier_labels.std():.4f}\")\n",
1153
+ " print(f\" Label range - Normal: [{normal_labels.min():.4f}, {normal_labels.max():.4f}]\")\n",
1154
+ " print(f\" Label range - Outliers: [{outlier_labels.min():.4f}, {outlier_labels.max():.4f}]\")\n",
1155
+ " \n",
1156
+ " # Feature analysis for outliers\n",
1157
+ " print(f\"\\nTop features with extreme values in outliers:\")\n",
1158
+ " feature_names = Config.FEATURES[:20] # Analyze first 20 features\n",
1159
+ " outlier_features = train_df.iloc[outlier_indices][feature_names]\n",
1160
+ " normal_features = train_df.iloc[~outlier_mask][feature_names]\n",
1161
+ " \n",
1162
+ " feature_diffs = []\n",
1163
+ " for feat in feature_names:\n",
1164
+ " outlier_mean = outlier_features[feat].mean()\n",
1165
+ " normal_mean = normal_features[feat].mean()\n",
1166
+ " if normal_mean != 0:\n",
1167
+ " rel_diff = abs(outlier_mean - normal_mean) / abs(normal_mean)\n",
1168
+ " feature_diffs.append((feat, rel_diff, outlier_mean, normal_mean))\n",
1169
+ " \n",
1170
+ " feature_diffs.sort(key=lambda x: x[1], reverse=True)\n",
1171
+ " for feat, diff, out_mean, norm_mean in feature_diffs[:10]:\n",
1172
+ " print(f\" {feat}: {diff*100:.1f}% difference (outlier: {out_mean:.4f}, normal: {norm_mean:.4f})\")\n",
1173
+ " else:\n",
1174
+ " print(\"\\nNo outliers detected with current threshold. Consider adjusting outlier_fraction.\")\n",
1175
+ " \n",
1176
+ " return outlier_indices\n",
1177
+ "\n",
1178
+ "# =========================\n",
1179
+ "# XGBoost Training with Outlier Strategy Comparison\n",
1180
+ "# =========================\n",
1181
+ "def train_xgboost_with_outlier_comparison(train_df, test_df):\n",
1182
+ " \"\"\"Train XGBoost with different outlier handling strategies and compare results\"\"\"\n",
1183
+ " n_samples = len(train_df)\n",
1184
+ " \n",
1185
+ " # Store results for each strategy\n",
1186
+ " strategy_results = {strategy: {\"oof_scores\": [], \"slice_scores\": {}} \n",
1187
+ " for strategy in Config.OUTLIER_STRATEGIES}\n",
1188
+ " \n",
1189
+ " # For final ensemble\n",
1190
+ " best_strategy = \"reduce\" # Default to current approach\n",
1191
+ " best_score = -np.inf\n",
1192
+ " best_oof_preds = None\n",
1193
+ " best_test_preds = None\n",
1194
+ " \n",
1195
+ " for strategy in Config.OUTLIER_STRATEGIES:\n",
1196
+ " print(f\"\\n{'='*50}\")\n",
1197
+ " print(f\"Testing outlier strategy: {strategy.upper()}\")\n",
1198
+ " print(f\"{'='*50}\")\n",
1199
+ " \n",
1200
+ " # Get model slices for this strategy\n",
1201
+ " model_slices = get_model_slices(n_samples)\n",
1202
+ " \n",
1203
+ " oof_preds = {\n",
1204
+ " learner[\"name\"]: {s[\"name\"]: np.zeros(n_samples) for s in model_slices}\n",
1205
+ " for learner in LEARNERS\n",
1206
+ " }\n",
1207
+ " test_preds = {\n",
1208
+ " learner[\"name\"]: {s[\"name\"]: np.zeros(len(test_df)) for s in model_slices}\n",
1209
+ " for learner in LEARNERS\n",
1210
+ " }\n",
1211
+ " \n",
1212
+ " full_weights = create_time_decay_weights(n_samples)\n",
1213
+ " kf = KFold(n_splits=Config.N_FOLDS, shuffle=False)\n",
1214
+ " \n",
1215
+ " for fold, (train_idx, valid_idx) in enumerate(kf.split(train_df), start=1):\n",
1216
+ " print(f\"\\n--- Fold {fold}/{Config.N_FOLDS} ---\")\n",
1217
+ " X_valid = train_df.iloc[valid_idx][Config.FEATURES]\n",
1218
+ " y_valid = train_df.iloc[valid_idx][Config.LABEL_COLUMN]\n",
1219
+ " \n",
1220
+ " for s in model_slices:\n",
1221
+ " cutoff = s[\"cutoff\"]\n",
1222
+ " slice_name = s[\"name\"]\n",
1223
+ " is_oldest = s[\"is_oldest\"]\n",
1224
+ " outlier_adjusted = s.get(\"outlier_adjusted\", False)\n",
1225
+ " \n",
1226
+ " if is_oldest:\n",
1227
+ " subset = train_df.iloc[:cutoff].reset_index(drop=True)\n",
1228
+ " rel_idx = train_idx[train_idx < cutoff]\n",
1229
+ " sw = np.ones(len(rel_idx))\n",
1230
+ " else:\n",
1231
+ " subset = train_df.iloc[cutoff:].reset_index(drop=True)\n",
1232
+ " rel_idx = train_idx[train_idx >= cutoff] - cutoff\n",
1233
+ " sw = create_time_decay_weights(len(subset))[rel_idx] if cutoff > 0 else full_weights[train_idx]\n",
1234
+ " \n",
1235
+ " X_train = subset.iloc[rel_idx][Config.FEATURES]\n",
1236
+ " y_train = subset.iloc[rel_idx][Config.LABEL_COLUMN]\n",
1237
+ " \n",
1238
+ " # Apply outlier strategy if this is an outlier-adjusted slice\n",
1239
+ " if outlier_adjusted and len(X_train) > 100:\n",
1240
+ " sw, _ = detect_outliers_and_adjust_weights(\n",
1241
+ " X_train.values, \n",
1242
+ " y_train.values, \n",
1243
+ " sw, \n",
1244
+ " outlier_fraction=Config.OUTLIER_FRACTION,\n",
1245
+ " strategy=strategy\n",
1246
+ " )\n",
1247
+ " \n",
1248
+ " print(f\" Training slice: {slice_name}, samples: {len(X_train)}\")\n",
1249
+ " \n",
1250
+ " for learner in LEARNERS:\n",
1251
+ " model = learner[\"Estimator\"](**learner[\"params\"])\n",
1252
+ " model.fit(X_train, y_train, sample_weight=sw, eval_set=[(X_valid, y_valid)], verbose=False)\n",
1253
+ " \n",
1254
+ " if is_oldest:\n",
1255
+ " oof_preds[learner[\"name\"]][slice_name][valid_idx] = model.predict(\n",
1256
+ " train_df.iloc[valid_idx][Config.FEATURES]\n",
1257
+ " )\n",
1258
+ " else:\n",
1259
+ " mask = valid_idx >= cutoff\n",
1260
+ " if mask.any():\n",
1261
+ " idxs = valid_idx[mask]\n",
1262
+ " oof_preds[learner[\"name\"]][slice_name][idxs] = model.predict(\n",
1263
+ " train_df.iloc[idxs][Config.FEATURES]\n",
1264
+ " )\n",
1265
+ " if cutoff > 0 and (~mask).any():\n",
1266
+ " base_slice_name = slice_name.replace(\"_outlier_adj\", \"\")\n",
1267
+ " if base_slice_name == slice_name:\n",
1268
+ " fallback_slice = \"full_data\"\n",
1269
+ " else:\n",
1270
+ " fallback_slice = \"full_data_outlier_adj\"\n",
1271
+ " oof_preds[learner[\"name\"]][slice_name][valid_idx[~mask]] = oof_preds[learner[\"name\"]][fallback_slice][\n",
1272
+ " valid_idx[~mask]\n",
1273
+ " ]\n",
1274
+ " \n",
1275
+ " test_preds[learner[\"name\"]][slice_name] += model.predict(test_df[Config.FEATURES])\n",
1276
+ " \n",
1277
+ " # Normalize test predictions\n",
1278
+ " for learner_name in test_preds:\n",
1279
+ " for slice_name in test_preds[learner_name]:\n",
1280
+ " test_preds[learner_name][slice_name] /= Config.N_FOLDS\n",
1281
+ " \n",
1282
+ " # Evaluate this strategy\n",
1283
+ " learner_name = 'xgb'\n",
1284
+ " \n",
1285
+ " # Weights for ensemble\n",
1286
+ " weights = np.array([\n",
1287
+ " 1.0, # full_data\n",
1288
+ " 1.0, # last_90pct\n",
1289
+ " 1.0, # last_85pct\n",
1290
+ " 1.0, # last_80pct\n",
1291
+ " 0.25, # oldest_25pct\n",
1292
+ " 0.9, # full_data_outlier_adj\n",
1293
+ " 0.9, # last_90pct_outlier_adj\n",
1294
+ " 0.9, # last_85pct_outlier_adj\n",
1295
+ " 0.9, # last_80pct_outlier_adj\n",
1296
+ " 0.2 # oldest_25pct_outlier_adj\n",
1297
+ " ])\n",
1298
+ " weights = weights / weights.sum()\n",
1299
+ " \n",
1300
+ " oof_weighted = pd.DataFrame(oof_preds[learner_name]).values @ weights\n",
1301
+ " test_weighted = pd.DataFrame(test_preds[learner_name]).values @ weights\n",
1302
+ " score_weighted = pearsonr(train_df[Config.LABEL_COLUMN], oof_weighted)[0]\n",
1303
+ " \n",
1304
+ " print(f\"\\n{strategy.upper()} Strategy - Weighted Ensemble Pearson: {score_weighted:.4f}\")\n",
1305
+ " \n",
1306
+ " # Store individual slice scores\n",
1307
+ " slice_names = list(oof_preds[learner_name].keys())\n",
1308
+ " for i, slice_name in enumerate(slice_names):\n",
1309
+ " score = pearsonr(train_df[Config.LABEL_COLUMN], oof_preds[learner_name][slice_name])[0]\n",
1310
+ " strategy_results[strategy][\"slice_scores\"][slice_name] = score\n",
1311
+ " if \"outlier_adj\" in slice_name:\n",
1312
+ " print(f\" {slice_name}: {score:.4f} (weight: {weights[i]:.3f})\")\n",
1313
+ " \n",
1314
+ " strategy_results[strategy][\"oof_scores\"].append(score_weighted)\n",
1315
+ " strategy_results[strategy][\"ensemble_score\"] = score_weighted\n",
1316
+ " strategy_results[strategy][\"oof_preds\"] = oof_weighted\n",
1317
+ " strategy_results[strategy][\"test_preds\"] = test_weighted\n",
1318
+ " \n",
1319
+ " # Track best strategy\n",
1320
+ " if score_weighted > best_score:\n",
1321
+ " best_score = score_weighted\n",
1322
+ " best_strategy = strategy\n",
1323
+ " best_oof_preds = oof_preds\n",
1324
+ " best_test_preds = test_preds\n",
1325
+ " \n",
1326
+ " # Print comparison summary\n",
1327
+ " print(f\"\\n{'='*50}\")\n",
1328
+ " print(\"OUTLIER STRATEGY COMPARISON SUMMARY\")\n",
1329
+ " print(f\"{'='*50}\")\n",
1330
+ " \n",
1331
+ " for strategy in Config.OUTLIER_STRATEGIES:\n",
1332
+ " score = strategy_results[strategy][\"ensemble_score\"]\n",
1333
+ " print(f\"{strategy.upper()}: {score:.4f} {'← BEST' if strategy == best_strategy else ''}\")\n",
1334
+ " \n",
1335
+ " # Analyze differences\n",
1336
+ " print(f\"\\nRelative performance vs 'reduce' strategy:\")\n",
1337
+ " reduce_score = strategy_results[\"reduce\"][\"ensemble_score\"]\n",
1338
+ " for strategy in Config.OUTLIER_STRATEGIES:\n",
1339
+ " if strategy != \"reduce\":\n",
1340
+ " score = strategy_results[strategy][\"ensemble_score\"]\n",
1341
+ " diff = (score - reduce_score) / reduce_score * 100\n",
1342
+ " print(f\" {strategy}: {diff:+.2f}%\")\n",
1343
+ " \n",
1344
+ " return best_oof_preds, best_test_preds, model_slices, strategy_results, best_strategy\n",
1345
+ "\n",
1346
+ "# =========================\n",
1347
+ "# MLP Training (unchanged)\n",
1348
+ "# =========================\n",
1349
+ "def train_mlp(train_df, test_df):\n",
1350
+ " print(\"\\n=== Training MLP Model ===\")\n",
1351
+ " \n",
1352
+ " # Hyperparameters\n",
1353
+ " hparams = {\n",
1354
+ " \"seed\": 42,\n",
1355
+ " \"num_epochs\": 10,\n",
1356
+ " \"batch_size\": 1024 * 8 * 4,\n",
1357
+ " \"learning_rate\": 0.001,\n",
1358
+ " \"weight_decay\": 1e-3,\n",
1359
+ " \"dropout_rate\": 0.6,\n",
1360
+ " \"layers\": [len(Config.MLP_FEATURES), 256, 64, 1],\n",
1361
+ " \"hidden_activation\": None,\n",
1362
+ " \"activation\": \"relu\",\n",
1363
+ " \"delta\": 5,\n",
1364
+ " \"noise_factor\": 0.005\n",
1365
+ " }\n",
1366
+ " \n",
1367
+ " set_seed(hparams[\"seed\"])\n",
1368
+ " \n",
1369
+ " # Prepare data for MLP\n",
1370
+ " X_train_full = train_df[Config.MLP_FEATURES].values\n",
1371
+ " y_train_full = train_df[Config.LABEL_COLUMN].values\n",
1372
+ " \n",
1373
+ " # Split for validation\n",
1374
+ " X_train, X_val, y_train, y_val = train_test_split(\n",
1375
+ " X_train_full, y_train_full, test_size=0.2, shuffle=False, random_state=42\n",
1376
+ " )\n",
1377
+ " \n",
1378
+ " # Scale data\n",
1379
+ " scaler = StandardScaler()\n",
1380
+ " X_train = scaler.fit_transform(X_train)\n",
1381
+ " X_val = scaler.transform(X_val)\n",
1382
+ " X_test = scaler.transform(test_df[Config.MLP_FEATURES].values)\n",
1383
+ " \n",
1384
+ " # Create dataloaders\n",
1385
+ " train_loader = get_dataloaders(X_train, y_train, hparams, device, shuffle=True)\n",
1386
+ " val_loader = get_dataloaders(X_val, y_val, hparams, device, shuffle=False)\n",
1387
+ " test_loader = get_dataloaders(X_test, None, hparams, device, shuffle=False)\n",
1388
+ " \n",
1389
+ " # Initialize model\n",
1390
+ " model = MLP(\n",
1391
+ " layers=hparams[\"layers\"],\n",
1392
+ " dropout_rate=hparams[\"dropout_rate\"],\n",
1393
+ " activation=hparams[\"activation\"],\n",
1394
+ " last_activation=hparams[\"hidden_activation\"],\n",
1395
+ " ).to(device)\n",
1396
+ " \n",
1397
+ " criterion = nn.HuberLoss(delta=hparams[\"delta\"], reduction='sum')\n",
1398
+ " optimizer = optim.Adam(model.parameters(), lr=hparams[\"learning_rate\"], \n",
1399
+ " weight_decay=hparams[\"weight_decay\"])\n",
1400
+ " \n",
1401
+ " checkpointer = Checkpointer(path=\"best_mlp_model.pt\")\n",
1402
+ " \n",
1403
+ " # Training loop\n",
1404
+ " num_epochs = hparams[\"num_epochs\"]\n",
1405
+ " for epoch in range(num_epochs):\n",
1406
+ " model.train()\n",
1407
+ " running_loss = 0.0\n",
1408
+ "\n",
1409
+ " for inputs, targets in tqdm(train_loader, desc=f\"Epoch {epoch+1}/{num_epochs}\"):\n",
1410
+ " inputs, targets = inputs.to(device), targets.to(device)\n",
1411
+ " \n",
1412
+ " # Add noise for robustness\n",
1413
+ " inputs = inputs + torch.randn_like(inputs) * hparams[\"noise_factor\"]\n",
1414
+ " \n",
1415
+ " optimizer.zero_grad()\n",
1416
+ " outputs = model(inputs)\n",
1417
+ " loss = criterion(outputs, targets)\n",
1418
+ " \n",
1419
+ " loss.backward()\n",
1420
+ " optimizer.step()\n",
1421
+ " \n",
1422
+ " running_loss += loss.item() * inputs.size(0)\n",
1423
+ " \n",
1424
+ " running_loss = running_loss / len(train_loader.dataset)\n",
1425
+ " print(f\"Training Loss: {running_loss:.4f}\")\n",
1426
+ "\n",
1427
+ " # Validation phase\n",
1428
+ " model.eval()\n",
1429
+ " val_loss = 0.0\n",
1430
+ " preds = []\n",
1431
+ " trues = []\n",
1432
+ " with torch.no_grad():\n",
1433
+ " for inputs, targets in tqdm(val_loader, desc=\"Validation\"):\n",
1434
+ " inputs, targets = inputs.to(device), targets.to(device)\n",
1435
+ " outputs = model(inputs)\n",
1436
+ " loss = criterion(outputs, targets)\n",
1437
+ " val_loss += loss.item() * inputs.size(0)\n",
1438
+ " preds.append(outputs.cpu().numpy())\n",
1439
+ " trues.append(targets.cpu().numpy())\n",
1440
+ "\n",
1441
+ " val_loss /= len(val_loader.dataset)\n",
1442
+ " preds = np.concatenate(preds).flatten()\n",
1443
+ " trues = np.concatenate(trues).flatten()\n",
1444
+ " pearson_coef = pearsonr(preds, trues)[0]\n",
1445
+ " print(f\"Validation Pearson Coef: {pearson_coef:.4f} | Loss: {val_loss:.4f}\")\n",
1446
+ "\n",
1447
+ " checkpointer(pearson_coef, model)\n",
1448
+ " \n",
1449
+ " # Load best model and make predictions\n",
1450
+ " model = checkpointer.load(model)\n",
1451
+ " model.eval()\n",
1452
+ " predictions = []\n",
1453
+ " with torch.no_grad():\n",
1454
+ " for inputs in tqdm(test_loader, desc=\"Predicting\"):\n",
1455
+ " inputs = inputs[0].to(device)\n",
1456
+ " outputs = model(inputs)\n",
1457
+ " predictions.append(outputs.cpu().numpy())\n",
1458
+ "\n",
1459
+ " predictions = np.concatenate(predictions).flatten()\n",
1460
+ " \n",
1461
+ " return predictions\n",
1462
+ "\n",
1463
+ "# =========================\n",
1464
+ "# Ensemble & Submission Functions\n",
1465
+ "# =========================\n",
1466
+ "def create_xgboost_submission(train_df, oof_preds, test_preds, submission_df, strategy=\"reduce\"):\n",
1467
+ " learner_name = 'xgb'\n",
1468
+ " \n",
1469
+ " # Weights for 10 slices\n",
1470
+ " weights = np.array([\n",
1471
+ " 1.0, # full_data\n",
1472
+ " 1.0, # last_90pct\n",
1473
+ " 1.0, # last_85pct\n",
1474
+ " 1.0, # last_80pct\n",
1475
+ " 0.25, # oldest_25pct\n",
1476
+ " 0.9, # full_data_outlier_adj\n",
1477
+ " 0.9, # last_90pct_outlier_adj\n",
1478
+ " 0.9, # last_85pct_outlier_adj\n",
1479
+ " 0.9, # last_80pct_outlier_adj\n",
1480
+ " 0.2 # oldest_25pct_outlier_adj\n",
1481
+ " ])\n",
1482
+ " \n",
1483
+ " # Normalize weights\n",
1484
+ " weights = weights / weights.sum()\n",
1485
+ "\n",
1486
+ " oof_weighted = pd.DataFrame(oof_preds[learner_name]).values @ weights\n",
1487
+ " test_weighted = pd.DataFrame(test_preds[learner_name]).values @ weights\n",
1488
+ " score_weighted = pearsonr(train_df[Config.LABEL_COLUMN], oof_weighted)[0]\n",
1489
+ " print(f\"\\n{learner_name.upper()} Weighted Ensemble Pearson: {score_weighted:.4f}\")\n",
1490
+ "\n",
1491
+ " # Print individual slice scores and weights for analysis\n",
1492
+ " print(\"\\nIndividual slice OOF scores and weights:\")\n",
1493
+ " slice_names = list(oof_preds[learner_name].keys())\n",
1494
+ " for i, slice_name in enumerate(slice_names):\n",
1495
+ " score = pearsonr(train_df[Config.LABEL_COLUMN], oof_preds[learner_name][slice_name])[0]\n",
1496
+ " print(f\" {slice_name}: {score:.4f} (weight: {weights[i]:.3f})\")\n",
1497
+ "\n",
1498
+ " # Save XGBoost submission\n",
1499
+ " xgb_submission = submission_df.copy()\n",
1500
+ " xgb_submission[\"prediction\"] = test_weighted\n",
1501
+ " xgb_submission.to_csv(f\"submission_xgboost_{strategy}.csv\", index=False)\n",
1502
+ " print(f\"\\nSaved: submission_xgboost_{strategy}.csv\")\n",
1503
+ " \n",
1504
+ " return test_weighted\n",
1505
+ "\n",
1506
+ "def create_ensemble_submission(xgb_predictions, mlp_predictions, submission_df, \n",
1507
+ " xgb_weight=0.9, mlp_weight=0.1, suffix=\"\"):\n",
1508
+ " # Ensemble predictions\n",
1509
+ " ensemble_predictions = (xgb_weight * xgb_predictions + \n",
1510
+ " mlp_weight * mlp_predictions)\n",
1511
+ " \n",
1512
+ " # Save ensemble submission\n",
1513
+ " ensemble_submission = submission_df.copy()\n",
1514
+ " ensemble_submission[\"prediction\"] = ensemble_predictions\n",
1515
+ " filename = f\"submission_ensemble{suffix}.csv\"\n",
1516
+ " ensemble_submission.to_csv(filename, index=False)\n",
1517
+ " print(f\"\\nSaved: {filename} (XGBoost: {xgb_weight*100}%, MLP: {mlp_weight*100}%)\")\n",
1518
+ " \n",
1519
+ " return ensemble_predictions\n",
1520
+ "\n",
1521
+ "# =========================\n",
1522
+ "# Main Execution\n",
1523
+ "# =========================\n",
1524
+ "if __name__ == \"__main__\":\n",
1525
+ " # Load data\n",
1526
+ " train_df, test_df, submission_df = load_data()\n",
1527
+ " \n",
1528
+ " # Analyze outliers\n",
1529
+ " outlier_indices = analyze_outliers(train_df)\n",
1530
+ " \n",
1531
+ " # Train XGBoost with outlier comparison\n",
1532
+ " print(\"\\n=== Training XGBoost Models with Outlier Strategy Comparison ===\")\n",
1533
+ " best_oof_preds, best_test_preds, model_slices, strategy_results, best_strategy = \\\n",
1534
+ " train_xgboost_with_outlier_comparison(train_df, test_df)\n",
1535
+ " \n",
1536
+ " # Create XGBoost submission with best strategy\n",
1537
+ " xgb_predictions = create_xgboost_submission(\n",
1538
+ " train_df, best_oof_preds, best_test_preds, submission_df, strategy=best_strategy\n",
1539
+ " )\n",
1540
+ " \n",
1541
+ " # Train MLP model\n",
1542
+ " mlp_predictions = train_mlp(train_df, test_df)\n",
1543
+ " \n",
1544
+ " # Save MLP submission\n",
1545
+ " mlp_submission = submission_df.copy()\n",
1546
+ " mlp_submission[\"prediction\"] = mlp_predictions\n",
1547
+ " mlp_submission.to_csv(\"submission_mlp.csv\", index=False)\n",
1548
+ " print(\"\\nSaved: submission_mlp.csv\")\n",
1549
+ " \n",
1550
+ " # Create ensemble submission\n",
1551
+ " ensemble_predictions = create_ensemble_submission(\n",
1552
+ " xgb_predictions, mlp_predictions, submission_df,\n",
1553
+ " xgb_weight=0.9, mlp_weight=0.1, suffix=f\"_{best_strategy}\"\n",
1554
+ " )\n",
1555
+ " \n",
1556
+ " # Print final summary\n",
1557
+ " print(\"\\n\" + \"=\"*60)\n",
1558
+ " print(\"FINAL SUMMARY\")\n",
1559
+ " print(\"=\"*60)\n",
1560
+ " print(f\"\\nBest outlier strategy: {best_strategy.upper()}\")\n",
1561
+ " print(f\"Best XGBoost CV score: {strategy_results[best_strategy]['ensemble_score']:.4f}\")\n",
1562
+ " \n",
1563
+ " print(\"\\nStrategy comparison (XGBoost ensemble scores):\")\n",
1564
+ " for strategy in Config.OUTLIER_STRATEGIES:\n",
1565
+ " score = strategy_results[strategy][\"ensemble_score\"]\n",
1566
+ " print(f\" {strategy}: {score:.4f}\")\n",
1567
+ " \n",
1568
+ " print(\"\\nCreated submission files:\")\n",
1569
+ " print(f\"1. submission_xgboost_{best_strategy}.csv - XGBoost with {best_strategy} strategy\")\n",
1570
+ " print(f\"2. submission_mlp.csv - MLP only\")\n",
1571
+ " print(f\"3. submission_ensemble_{best_strategy}.csv - 90% XGBoost + 10% MLP\")\n",
1572
+ " \n",
1573
+ " # Show sample predictions\n",
1574
+ " print(\"\\nSample predictions (first 10 rows):\")\n",
1575
+ " comparison_df = pd.DataFrame({\n",
1576
+ " 'ID': submission_df['ID'][:10],\n",
1577
+ " 'XGBoost': xgb_predictions[:10],\n",
1578
+ " 'MLP': mlp_predictions[:10],\n",
1579
+ " 'Ensemble': ensemble_predictions[:10]\n",
1580
+ " })\n",
1581
+ " print(comparison_df)\n",
1582
+ " \n",
1583
+ " # Provide recommendations\n",
1584
+ " print(\"\\n\" + \"=\"*60)\n",
1585
+ " print(\"RECOMMENDATIONS\")\n",
1586
+ " print(\"=\"*60)\n",
1587
+ " \n",
1588
+ " reduce_score = strategy_results[\"reduce\"][\"ensemble_score\"]\n",
1589
+ " remove_score = strategy_results[\"remove\"][\"ensemble_score\"]\n",
1590
+ " double_score = strategy_results[\"double\"][\"ensemble_score\"]\n",
1591
+ " none_score = strategy_results[\"none\"][\"ensemble_score\"]\n",
1592
+ " \n",
1593
+ " print(\"\\n1. Outlier Handling Impact:\")\n",
1594
+ " if best_strategy == \"reduce\":\n",
1595
+ " print(\" ✓ Current approach (reduce weights) is optimal\")\n",
1596
+ " elif best_strategy == \"remove\":\n",
1597
+ " print(\" ! Removing outliers completely performs better\")\n",
1598
+ " print(\" → This suggests outliers are noise rather than informative extremes\")\n",
1599
+ " elif best_strategy == \"double\":\n",
1600
+ " print(\" ! Doubling outlier weights performs better\")\n",
1601
+ " print(\" → This suggests outliers contain valuable signal for extreme movements\")\n",
1602
+ " else:\n",
1603
+ " print(\" ! No outlier adjustment performs better\")\n",
1604
+ " print(\" → This suggests the model can handle outliers naturally\")\n",
1605
+ " \n",
1606
+ " print(\"\\n2. Overfitting Risk Assessment:\")\n",
1607
+ " if remove_score > reduce_score and remove_score > double_score:\n",
1608
+ " print(\" ⚠ Removing outliers improves CV but may increase overfitting risk\")\n",
1609
+ " print(\" → Consider using reduce strategy for better generalization\")\n",
1610
+ " elif double_score > reduce_score:\n",
1611
+ " print(\" ✓ Emphasizing outliers improves performance\")\n",
1612
+ " print(\" → Model benefits from learning extreme patterns\")\n",
1613
+ " \n",
1614
+ " print(\"\\n3. Next Steps:\")\n",
1615
+ " print(\" • Test different outlier fractions (0.05%, 0.2%, 0.5%)\")\n",
1616
+ " print(\" • Try adaptive outlier detection per time slice\")\n",
1617
+ " print(\" • Consider feature-specific outlier handling\")\n",
1618
+ " print(\" • Monitor LB score vs CV score for overfitting signs\")\n",
1619
+ " \n",
1620
+ " # Additional insights based on outlier analysis\n",
1621
+ " if len(outlier_indices) > 0:\n",
1622
+ " print(f\"\\n4. Outlier Insights:\")\n",
1623
+ " print(f\" • Detected {len(outlier_indices)} outliers ({len(outlier_indices)/len(train_df)*100:.2f}% of data)\")\n",
1624
+ " print(\" • Consider creating synthetic outliers if 'double' strategy works well\")\n",
1625
+ " print(\" • Analyze time distribution of outliers for market regime insights\")"
1626
+ ]
1627
+ }
1628
+ ],
1629
+ "metadata": {
1630
+ "kaggle": {
1631
+ "accelerator": "none",
1632
+ "dataSources": [
1633
+ {
1634
+ "databundleVersionId": 11418275,
1635
+ "sourceId": 96164,
1636
+ "sourceType": "competition"
1637
+ }
1638
+ ],
1639
+ "isGpuEnabled": false,
1640
+ "isInternetEnabled": true,
1641
+ "language": "python",
1642
+ "sourceType": "notebook"
1643
+ },
1644
+ "kernelspec": {
1645
+ "display_name": "Python 3",
1646
+ "language": "python",
1647
+ "name": "python3"
1648
+ },
1649
+ "language_info": {
1650
+ "codemirror_mode": {
1651
+ "name": "ipython",
1652
+ "version": 3
1653
+ },
1654
+ "file_extension": ".py",
1655
+ "mimetype": "text/x-python",
1656
+ "name": "python",
1657
+ "nbconvert_exporter": "python",
1658
+ "pygments_lexer": "ipython3",
1659
+ "version": "3.11.11"
1660
+ },
1661
+ "papermill": {
1662
+ "default_parameters": {},
1663
+ "duration": 36715.5898,
1664
+ "end_time": "2025-07-01T04:45:13.424631",
1665
+ "environment_variables": {},
1666
+ "exception": null,
1667
+ "input_path": "__notebook__.ipynb",
1668
+ "output_path": "__notebook__.ipynb",
1669
+ "parameters": {},
1670
+ "start_time": "2025-06-30T18:33:17.834831",
1671
+ "version": "2.6.0"
1672
+ }
1673
+ },
1674
+ "nbformat": 4,
1675
+ "nbformat_minor": 5
1676
+ }
DRW/DRW-Crypto/uv.lock ADDED
The diff for this file is too large to render. See raw diff
 
LYY/baseline1/pipeline1.py ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import pandas as pd
3
+ import numpy as np
4
+ from sklearn.model_selection import KFold
5
+ from xgboost import XGBRegressor
6
+ from sklearn.linear_model import (
7
+ HuberRegressor, RANSACRegressor, TheilSenRegressor,
8
+ Lasso, ElasticNet, Ridge
9
+ )
10
+ from sklearn.cross_decomposition import PLSRegression
11
+ from sklearn.preprocessing import StandardScaler, RobustScaler
12
+ from sklearn.ensemble import RandomForestRegressor
13
+ from scipy.stats import pearsonr
14
+ import warnings
15
+ warnings.filterwarnings('ignore')
16
+
17
+ # ===== Feature Engineering =====
18
+ def feature_engineering(df):
19
+ """Original features plus new robust features"""
20
+ # Original features
21
+ df['volume_weighted_sell'] = df['sell_qty'] * df['volume']
22
+ df['buy_sell_ratio'] = df['buy_qty'] / (df['sell_qty'] + 1e-8)
23
+ df['selling_pressure'] = df['sell_qty'] / (df['volume'] + 1e-8)
24
+ df['effective_spread_proxy'] = np.abs(df['buy_qty'] - df['sell_qty']) / (df['volume'] + 1e-8)
25
+
26
+ # New robust features
27
+ df['log_volume'] = np.log1p(df['volume'])
28
+ df['bid_ask_imbalance'] = (df['bid_qty'] - df['ask_qty']) / (df['bid_qty'] + df['ask_qty'] + 1e-8)
29
+ df['order_flow_imbalance'] = (df['buy_qty'] - df['sell_qty']) / (df['buy_qty'] + df['sell_qty'] + 1e-8)
30
+ df['liquidity_ratio'] = (df['bid_qty'] + df['ask_qty']) / (df['volume'] + 1e-8)
31
+
32
+ # Handle infinities and NaN
33
+ df = df.replace([np.inf, -np.inf], np.nan)
34
+
35
+ # For each column, replace NaN with median for robustness
36
+ for col in df.columns:
37
+ if df[col].isna().any():
38
+ median_val = df[col].median()
39
+ df[col] = df[col].fillna(median_val if not pd.isna(median_val) else 0)
40
+
41
+ return df
42
+
43
+ # ===== Configuration =====
44
+ class Config:
45
+ TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/train.parquet"
46
+ TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/test.parquet"
47
+ SUBMISSION_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/sample_submission.csv"
48
+
49
+ # Original features plus additional market features
50
+ FEATURES = [
51
+ "X863", "X856", "X598", "X862", "X385", "X852", "X603", "X860", "X674",
52
+ "X345", "X855", "X302", "X178", "X168", "X612", "sell_qty",
53
+ "bid_qty", "ask_qty", "buy_qty", "volume"]
54
+
55
+ LABEL_COLUMN = "label"
56
+ N_FOLDS = 3
57
+ RANDOM_STATE = 42
58
+
59
+ # ===== Model Parameters =====
60
+ # Original XGBoost parameters
61
+ XGB_PARAMS = {'colsample_bylevel': 0.6560588273380593, 'colsample_bynode': 0.6769350579560919,
62
+ 'colsample_bytree': 0.3510322798718793, 'gamma': 2.595734345886362, 'learning_rate': 0.04113611485673781,
63
+ 'max_depth': 16, 'max_leaves': 19, 'min_child_weight': 19, 'n_estimators': 733,
64
+ 'subsample': 0.19361102113761736,
65
+ 'reg_alpha': 11.540628202315595, 'reg_lambda': 64.64706922056,
66
+ "verbosity": 0,
67
+ "random_state": Config.RANDOM_STATE,
68
+ "n_jobs": -1}
69
+
70
+ # Define all learners
71
+ LEARNERS = [
72
+ {"name": "xgb_baseline", "Estimator": XGBRegressor, "params": XGB_PARAMS, "need_scale": False},
73
+ {"name": "huber", "Estimator": HuberRegressor, "params": {"epsilon": 1.5, "alpha": 0.01, "max_iter": 500}, "need_scale": True},
74
+ {"name": "ransac", "Estimator": RANSACRegressor, "params": {"min_samples": 0.7, "max_trials": 100, "random_state": Config.RANDOM_STATE}, "need_scale": True},
75
+ {"name": "theilsen", "Estimator": TheilSenRegressor, "params": {"max_subpopulation": 10000, "random_state": Config.RANDOM_STATE}, "need_scale": True},
76
+ {"name": "lasso", "Estimator": Lasso, "params": {"alpha": 0.001, "max_iter": 1000}, "need_scale": True},
77
+ {"name": "elasticnet", "Estimator": ElasticNet, "params": {"alpha": 0.001, "l1_ratio": 0.5, "max_iter": 1000}, "need_scale": True},
78
+ {"name": "pls", "Estimator": PLSRegression, "params": {"n_components": 50}, "need_scale": True},
79
+ ]
80
+
81
+ # ===== Data Loading =====
82
+ def create_time_decay_weights(n: int, decay: float = 0.9) -> np.ndarray:
83
+ """Create time decay weights for more recent data importance"""
84
+ positions = np.arange(n)
85
+ normalized = positions / (n - 1)
86
+ weights = decay ** (1.0 - normalized)
87
+ return weights * n / weights.sum()
88
+
89
+ def load_data():
90
+ """Load and preprocess data"""
91
+ train_df = pd.read_parquet(Config.TRAIN_PATH, columns=Config.FEATURES + [Config.LABEL_COLUMN])
92
+ test_df = pd.read_parquet(Config.TEST_PATH, columns=Config.FEATURES)
93
+ submission_df = pd.read_csv(Config.SUBMISSION_PATH)
94
+
95
+ # Apply feature engineering
96
+ train_df = feature_engineering(train_df)
97
+ test_df = feature_engineering(test_df)
98
+
99
+ # Update features list with engineered features
100
+ engineered_features = [
101
+ "volume_weighted_sell", "buy_sell_ratio", "selling_pressure",
102
+ "effective_spread_proxy", "log_volume", "bid_ask_imbalance",
103
+ "order_flow_imbalance", "liquidity_ratio"
104
+ ]
105
+ Config.FEATURES = list(set(Config.FEATURES + engineered_features))
106
+
107
+ print(f"Loaded data - Train: {train_df.shape}, Test: {test_df.shape}, Submission: {submission_df.shape}")
108
+ print(f"Total features: {len(Config.FEATURES)}")
109
+
110
+ return train_df.reset_index(drop=True), test_df.reset_index(drop=True), submission_df
111
+
112
+ # ===== Model Training =====
113
+ def get_model_slices(n_samples: int):
114
+ """Define different data slices for training"""
115
+ return [
116
+ {"name": "full_data", "cutoff": 0},
117
+ {"name": "last_75pct", "cutoff": int(0.25 * n_samples)},
118
+ {"name": "last_50pct", "cutoff": int(0.50 * n_samples)},
119
+ ]
120
+
121
+ def train_single_model(X_train, y_train, X_valid, y_valid, X_test, learner, sample_weights=None):
122
+ """Train a single model with appropriate scaling if needed"""
123
+ if learner["need_scale"]:
124
+ scaler = RobustScaler() # More robust to outliers than StandardScaler
125
+ X_train_scaled = scaler.fit_transform(X_train)
126
+ X_valid_scaled = scaler.transform(X_valid)
127
+ X_test_scaled = scaler.transform(X_test)
128
+ else:
129
+ X_train_scaled = X_train
130
+ X_valid_scaled = X_valid
131
+ X_test_scaled = X_test
132
+
133
+ model = learner["Estimator"](**learner["params"])
134
+
135
+ # Handle different model training approaches
136
+ if learner["name"] == "xgb_baseline":
137
+ model.fit(X_train_scaled, y_train, sample_weight=sample_weights,
138
+ eval_set=[(X_valid_scaled, y_valid)], verbose=False)
139
+ elif learner["name"] in ["huber", "lasso", "elasticnet"]:
140
+ model.fit(X_train_scaled, y_train, sample_weight=sample_weights)
141
+ else:
142
+ # RANSAC, TheilSen, PLS don't support sample weights
143
+ model.fit(X_train_scaled, y_train)
144
+
145
+ valid_pred = model.predict(X_valid_scaled)
146
+ test_pred = model.predict(X_test_scaled)
147
+
148
+ return valid_pred, test_pred
149
+
150
+ def train_and_evaluate(train_df, test_df):
151
+ """Train all models with cross-validation"""
152
+ n_samples = len(train_df)
153
+ model_slices = get_model_slices(n_samples)
154
+
155
+ # Initialize prediction dictionaries
156
+ oof_preds = {
157
+ learner["name"]: {s["name"]: np.zeros(n_samples) for s in model_slices}
158
+ for learner in LEARNERS
159
+ }
160
+ test_preds = {
161
+ learner["name"]: {s["name"]: np.zeros(len(test_df)) for s in model_slices}
162
+ for learner in LEARNERS
163
+ }
164
+
165
+ full_weights = create_time_decay_weights(n_samples)
166
+ kf = KFold(n_splits=Config.N_FOLDS, shuffle=False)
167
+
168
+ for fold, (train_idx, valid_idx) in enumerate(kf.split(train_df), start=1):
169
+ print(f"\n--- Fold {fold}/{Config.N_FOLDS} ---")
170
+ X_valid = train_df.iloc[valid_idx][Config.FEATURES]
171
+ y_valid = train_df.iloc[valid_idx][Config.LABEL_COLUMN]
172
+ X_test = test_df[Config.FEATURES]
173
+
174
+ for s in model_slices:
175
+ cutoff = s["cutoff"]
176
+ slice_name = s["name"]
177
+ subset = train_df.iloc[cutoff:].reset_index(drop=True)
178
+ rel_idx = train_idx[train_idx >= cutoff] - cutoff
179
+
180
+ if len(rel_idx) == 0:
181
+ continue
182
+
183
+ X_train = subset.iloc[rel_idx][Config.FEATURES]
184
+ y_train = subset.iloc[rel_idx][Config.LABEL_COLUMN]
185
+ sw = create_time_decay_weights(len(subset))[rel_idx] if cutoff > 0 else full_weights[train_idx]
186
+
187
+ print(f" Training slice: {slice_name}, samples: {len(X_train)}")
188
+
189
+ for learner in LEARNERS:
190
+ try:
191
+ valid_pred, test_pred = train_single_model(
192
+ X_train, y_train, X_valid, y_valid, X_test, learner, sw
193
+ )
194
+
195
+ # Store OOF predictions
196
+ mask = valid_idx >= cutoff
197
+ if mask.any():
198
+ idxs = valid_idx[mask]
199
+ X_valid_subset = train_df.iloc[idxs][Config.FEATURES]
200
+ if learner["need_scale"]:
201
+ scaler = RobustScaler()
202
+ scaler.fit(X_train)
203
+ valid_pred_subset = learner["Estimator"](**learner["params"]).fit(
204
+ scaler.transform(X_train), y_train
205
+ ).predict(scaler.transform(X_valid_subset))
206
+ oof_preds[learner["name"]][slice_name][idxs] = valid_pred_subset
207
+ else:
208
+ oof_preds[learner["name"]][slice_name][idxs] = valid_pred[mask]
209
+
210
+ if cutoff > 0 and (~mask).any():
211
+ oof_preds[learner["name"]][slice_name][valid_idx[~mask]] = \
212
+ oof_preds[learner["name"]]["full_data"][valid_idx[~mask]]
213
+
214
+ test_preds[learner["name"]][slice_name] += test_pred
215
+
216
+ except Exception as e:
217
+ print(f" Error training {learner['name']}: {str(e)}")
218
+ continue
219
+
220
+ # Normalize test predictions
221
+ for learner_name in test_preds:
222
+ for slice_name in test_preds[learner_name]:
223
+ test_preds[learner_name][slice_name] /= Config.N_FOLDS
224
+
225
+ return oof_preds, test_preds, model_slices
226
+
227
+ # ===== Ensemble and Submission =====
228
+ def create_submissions(train_df, oof_preds, test_preds, submission_df):
229
+ """Create multiple submission files for different strategies"""
230
+ all_submissions = {}
231
+
232
+ # 1. Original baseline (XGBoost only)
233
+ if "xgb_baseline" in oof_preds:
234
+ xgb_oof = np.mean(list(oof_preds["xgb_baseline"].values()), axis=0)
235
+ xgb_test = np.mean(list(test_preds["xgb_baseline"].values()), axis=0)
236
+ xgb_score = pearsonr(train_df[Config.LABEL_COLUMN], xgb_oof)[0]
237
+ print(f"\nXGBoost Baseline Score: {xgb_score:.4f}")
238
+
239
+ submission_xgb = submission_df.copy()
240
+ submission_xgb["prediction"] = xgb_test
241
+ submission_xgb.to_csv("submission_xgb_baseline.csv", index=False)
242
+ all_submissions["xgb_baseline"] = xgb_score
243
+
244
+ # 2. Robust methods ensemble
245
+ robust_methods = ["huber", "ransac", "theilsen"]
246
+ robust_oof_list = []
247
+ robust_test_list = []
248
+
249
+ for method in robust_methods:
250
+ if method in oof_preds:
251
+ method_oof = np.mean(list(oof_preds[method].values()), axis=0)
252
+ method_test = np.mean(list(test_preds[method].values()), axis=0)
253
+ method_score = pearsonr(train_df[Config.LABEL_COLUMN], method_oof)[0]
254
+ print(f"{method.upper()} Score: {method_score:.4f}")
255
+
256
+ if not np.isnan(method_score):
257
+ robust_oof_list.append(method_oof)
258
+ robust_test_list.append(method_test)
259
+
260
+ if robust_oof_list:
261
+ robust_oof = np.mean(robust_oof_list, axis=0)
262
+ robust_test = np.mean(robust_test_list, axis=0)
263
+ robust_score = pearsonr(train_df[Config.LABEL_COLUMN], robust_oof)[0]
264
+ print(f"\nRobust Ensemble Score: {robust_score:.4f}")
265
+
266
+ submission_robust = submission_df.copy()
267
+ submission_robust["prediction"] = robust_test
268
+ submission_robust.to_csv("submission_robust_ensemble.csv", index=False)
269
+ all_submissions["robust_ensemble"] = robust_score
270
+
271
+ # 3. Regularized methods ensemble
272
+ regularized_methods = ["lasso", "elasticnet"]
273
+ reg_oof_list = []
274
+ reg_test_list = []
275
+
276
+ for method in regularized_methods:
277
+ if method in oof_preds:
278
+ method_oof = np.mean(list(oof_preds[method].values()), axis=0)
279
+ method_test = np.mean(list(test_preds[method].values()), axis=0)
280
+ method_score = pearsonr(train_df[Config.LABEL_COLUMN], method_oof)[0]
281
+ print(f"{method.upper()} Score: {method_score:.4f}")
282
+
283
+ if not np.isnan(method_score):
284
+ reg_oof_list.append(method_oof)
285
+ reg_test_list.append(method_test)
286
+
287
+ if reg_oof_list:
288
+ reg_oof = np.mean(reg_oof_list, axis=0)
289
+ reg_test = np.mean(reg_test_list, axis=0)
290
+ reg_score = pearsonr(train_df[Config.LABEL_COLUMN], reg_oof)[0]
291
+ print(f"\nRegularized Ensemble Score: {reg_score:.4f}")
292
+
293
+ submission_reg = submission_df.copy()
294
+ submission_reg["prediction"] = reg_test
295
+ submission_reg.to_csv("submission_regularized_ensemble.csv", index=False)
296
+ all_submissions["regularized_ensemble"] = reg_score
297
+
298
+ # 4. Full ensemble (weighted by performance)
299
+ all_oof_scores = {}
300
+ all_oof_preds = {}
301
+ all_test_preds = {}
302
+
303
+ for learner_name in oof_preds:
304
+ learner_oof = np.mean(list(oof_preds[learner_name].values()), axis=0)
305
+ learner_test = np.mean(list(test_preds[learner_name].values()), axis=0)
306
+ score = pearsonr(train_df[Config.LABEL_COLUMN], learner_oof)[0]
307
+
308
+ if not np.isnan(score) and score > 0: # Only include positive correlations
309
+ all_oof_scores[learner_name] = score
310
+ all_oof_preds[learner_name] = learner_oof
311
+ all_test_preds[learner_name] = learner_test
312
+
313
+ # Weighted ensemble
314
+ if all_oof_scores:
315
+ total_score = sum(all_oof_scores.values())
316
+ weights = {k: v/total_score for k, v in all_oof_scores.items()}
317
+
318
+ weighted_oof = sum(weights[k] * all_oof_preds[k] for k in weights)
319
+ weighted_test = sum(weights[k] * all_test_preds[k] for k in weights)
320
+ weighted_score = pearsonr(train_df[Config.LABEL_COLUMN], weighted_oof)[0]
321
+
322
+ print(f"\nWeighted Full Ensemble Score: {weighted_score:.4f}")
323
+ print("Weights:", {k: f"{v:.3f}" for k, v in weights.items()})
324
+
325
+ submission_weighted = submission_df.copy()
326
+ submission_weighted["prediction"] = weighted_test
327
+ submission_weighted.to_csv("submission_weighted_ensemble.csv", index=False)
328
+ all_submissions["weighted_ensemble"] = weighted_score
329
+
330
+ # 6. Simple average of all valid models
331
+ simple_oof = np.mean(list(all_oof_preds.values()), axis=0)
332
+ simple_test = np.mean(list(all_test_preds.values()), axis=0)
333
+ simple_score = pearsonr(train_df[Config.LABEL_COLUMN], simple_oof)[0]
334
+
335
+ print(f"\nSimple Full Ensemble Score: {simple_score:.4f}")
336
+
337
+ submission_simple = submission_df.copy()
338
+ submission_simple["prediction"] = simple_test
339
+ submission_simple.to_csv("submission_simple_ensemble.csv", index=False)
340
+ all_submissions["simple_ensemble"] = simple_score
341
+
342
+ # Print summary
343
+ print("\n" + "="*50)
344
+ print("SUBMISSION SUMMARY:")
345
+ print("="*50)
346
+ for name, score in sorted(all_submissions.items(), key=lambda x: x[1], reverse=True):
347
+ print(f"{name:25s}: {score:.4f}")
348
+
349
+ return all_submissions
350
+
351
+ # ===== Main Execution =====
352
+ if __name__ == "__main__":
353
+ print("Loading data...")
354
+ train_df, test_df, submission_df = load_data()
355
+
356
+ print("\nTraining models...")
357
+ oof_preds, test_preds, model_slices = train_and_evaluate(train_df, test_df)
358
+
359
+ print("\nCreating submissions...")
360
+ submission_scores = create_submissions(train_df, oof_preds, test_preds, submission_df)
361
+
362
+ print("\nAll submissions created successfully!")
363
+ print("Files created:")
364
+ print("- submission_xgb_baseline.csv (original baseline)")
365
+ print("- submission_robust_ensemble.csv (Huber + RANSAC + TheilSen)")
366
+ print("- submission_regularized_ensemble.csv (Lasso + ElasticNet)")
367
+ print("- submission_weighted_ensemble.csv (weighted by performance)")
368
+ print("- submission_simple_ensemble.csv (simple average)")
LYY/baseline1/submission_regularized_ensemble.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ede4ae52eb080351a94bdc12853cfb623b6ca566b87bbe415b43cfe1880e87d
3
+ size 14588439
LYY/baseline1/submission_robust_ensemble.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29523d3694e9647df3c1d13925d05145c7a4c75d9ff9979f91d69ab5c6598630
3
+ size 14317671
LYY/baseline1/submission_simple_ensemble.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d011eb51bc219fe4b5d61a713decd9c4d9735a6d68a1effbad01361ddbab41e
3
+ size 14477893
LYY/baseline1/submission_weighted_ensemble.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:649ec6fa9bb33fef5219b422b243a8e4caa240c2f66d8d4501991a84f4fe5bca
3
+ size 14602456
LYY/baseline1/submission_xgb_baseline.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d55ff5345659500437b59bc653be57981d49964394f1863789263e9983ba3040
3
+ size 14487926
LYY/pipeline.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import pandas as pd
3
+ import numpy as np
4
+ from sklearn.model_selection import KFold
5
+ from xgboost import XGBRegressor
6
+ from sklearn.linear_model import (
7
+ HuberRegressor, RANSACRegressor, TheilSenRegressor,
8
+ Lasso, ElasticNet, Ridge
9
+ )
10
+ from sklearn.cross_decomposition import PLSRegression
11
+ from sklearn.preprocessing import StandardScaler, RobustScaler
12
+ from sklearn.ensemble import RandomForestRegressor
13
+ from scipy.stats import pearsonr
14
+ import warnings
15
+ warnings.filterwarnings('ignore')
16
+
17
+ # ===== Feature Engineering =====
18
+ def feature_engineering(df):
19
+ """Original features plus new robust features"""
20
+ # Original features
21
+ df['volume_weighted_sell'] = df['sell_qty'] * df['volume']
22
+ df['buy_sell_ratio'] = df['buy_qty'] / (df['sell_qty'] + 1e-8)
23
+ df['selling_pressure'] = df['sell_qty'] / (df['volume'] + 1e-8)
24
+ df['effective_spread_proxy'] = np.abs(df['buy_qty'] - df['sell_qty']) / (df['volume'] + 1e-8)
25
+
26
+ # New robust features
27
+ df['log_volume'] = np.log1p(df['volume'])
28
+ df['bid_ask_imbalance'] = (df['bid_qty'] - df['ask_qty']) / (df['bid_qty'] + df['ask_qty'] + 1e-8)
29
+ df['order_flow_imbalance'] = (df['buy_qty'] - df['sell_qty']) / (df['buy_qty'] + df['sell_qty'] + 1e-8)
30
+ df['liquidity_ratio'] = (df['bid_qty'] + df['ask_qty']) / (df['volume'] + 1e-8)
31
+
32
+ # Handle infinities and NaN
33
+ df = df.replace([np.inf, -np.inf], np.nan)
34
+
35
+ # For each column, replace NaN with median for robustness
36
+ for col in df.columns:
37
+ if df[col].isna().any():
38
+ median_val = df[col].median()
39
+ df[col] = df[col].fillna(median_val if not pd.isna(median_val) else 0)
40
+
41
+ return df
42
+
43
+ # ===== Configuration =====
44
+ class Config:
45
+ TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/train.parquet"
46
+ TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/test.parquet"
47
+ SUBMISSION_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/sample_submission.csv"
48
+
49
+ # Original features plus additional market features
50
+ FEATURES = [
51
+ "X863", "X856", "X598", "X862", "X385", "X852", "X603", "X860", "X674",
52
+ "X345", "X855", "X302", "X178", "X168", "X612", "sell_qty",
53
+ "bid_qty", "ask_qty", "buy_qty", "volume"]
54
+
55
+ LABEL_COLUMN = "label"
56
+ N_FOLDS = 3
57
+ RANDOM_STATE = 42
58
+
59
+ # ===== Model Parameters =====
60
+ # Original XGBoost parameters
61
+ XGB_PARAMS = {
62
+ "tree_method": "hist",
63
+ "device": "gpu",
64
+ "colsample_bylevel": 0.4778,
65
+ "colsample_bynode": 0.3628,
66
+ "colsample_bytree": 0.7107,
67
+ "gamma": 1.7095,
68
+ "learning_rate": 0.02213,
69
+ "max_depth": 20,
70
+ "max_leaves": 12,
71
+ "min_child_weight": 16,
72
+ "n_estimators": 1667,
73
+ "subsample": 0.06567,
74
+ "reg_alpha": 39.3524,
75
+ "reg_lambda": 75.4484,
76
+ "verbosity": 0,
77
+ "random_state": Config.RANDOM_STATE,
78
+ "n_jobs": -1
79
+ }
80
+
81
+ # Define all learners
82
+ LEARNERS = [
83
+ {"name": "xgb_baseline", "Estimator": XGBRegressor, "params": XGB_PARAMS, "need_scale": False},
84
+ {"name": "huber", "Estimator": HuberRegressor, "params": {"epsilon": 1.5, "alpha": 0.01, "max_iter": 500}, "need_scale": True},
85
+ {"name": "ransac", "Estimator": RANSACRegressor, "params": {"min_samples": 0.7, "max_trials": 100, "random_state": Config.RANDOM_STATE}, "need_scale": True},
86
+ {"name": "theilsen", "Estimator": TheilSenRegressor, "params": {"max_subpopulation": 10000, "random_state": Config.RANDOM_STATE}, "need_scale": True},
87
+ {"name": "lasso", "Estimator": Lasso, "params": {"alpha": 0.001, "max_iter": 1000}, "need_scale": True},
88
+ {"name": "elasticnet", "Estimator": ElasticNet, "params": {"alpha": 0.001, "l1_ratio": 0.5, "max_iter": 1000}, "need_scale": True},
89
+ {"name": "pls", "Estimator": PLSRegression, "params": {"n_components": 50}, "need_scale": True},
90
+ ]
91
+
92
+ # ===== Data Loading =====
93
+ def create_time_decay_weights(n: int, decay: float = 0.9) -> np.ndarray:
94
+ """Create time decay weights for more recent data importance"""
95
+ positions = np.arange(n)
96
+ normalized = positions / (n - 1)
97
+ weights = decay ** (1.0 - normalized)
98
+ return weights * n / weights.sum()
99
+
100
+ def load_data():
101
+ """Load and preprocess data"""
102
+ train_df = pd.read_parquet(Config.TRAIN_PATH, columns=Config.FEATURES + [Config.LABEL_COLUMN])
103
+ test_df = pd.read_parquet(Config.TEST_PATH, columns=Config.FEATURES)
104
+ submission_df = pd.read_csv(Config.SUBMISSION_PATH)
105
+
106
+ # Apply feature engineering
107
+ train_df = feature_engineering(train_df)
108
+ test_df = feature_engineering(test_df)
109
+
110
+ # Update features list with engineered features
111
+ engineered_features = [
112
+ "volume_weighted_sell", "buy_sell_ratio", "selling_pressure",
113
+ "effective_spread_proxy", "log_volume", "bid_ask_imbalance",
114
+ "order_flow_imbalance", "liquidity_ratio"
115
+ ]
116
+ Config.FEATURES = list(set(Config.FEATURES + engineered_features))
117
+
118
+ print(f"Loaded data - Train: {train_df.shape}, Test: {test_df.shape}, Submission: {submission_df.shape}")
119
+ print(f"Total features: {len(Config.FEATURES)}")
120
+
121
+ return train_df.reset_index(drop=True), test_df.reset_index(drop=True), submission_df
122
+
123
+ # ===== Model Training =====
124
+ def get_model_slices(n_samples: int):
125
+ """Define different data slices for training"""
126
+ return [
127
+ {"name": "full_data", "cutoff": 0},
128
+ {"name": "last_75pct", "cutoff": int(0.25 * n_samples)},
129
+ {"name": "last_50pct", "cutoff": int(0.50 * n_samples)},
130
+ ]
131
+
132
+ def train_single_model(X_train, y_train, X_valid, y_valid, X_test, learner, sample_weights=None):
133
+ """Train a single model with appropriate scaling if needed"""
134
+ if learner["need_scale"]:
135
+ scaler = RobustScaler() # More robust to outliers than StandardScaler
136
+ X_train_scaled = scaler.fit_transform(X_train)
137
+ X_valid_scaled = scaler.transform(X_valid)
138
+ X_test_scaled = scaler.transform(X_test)
139
+ else:
140
+ X_train_scaled = X_train
141
+ X_valid_scaled = X_valid
142
+ X_test_scaled = X_test
143
+
144
+ model = learner["Estimator"](**learner["params"])
145
+
146
+ # Handle different model training approaches
147
+ if learner["name"] == "xgb_baseline":
148
+ model.fit(X_train_scaled, y_train, sample_weight=sample_weights,
149
+ eval_set=[(X_valid_scaled, y_valid)], verbose=False)
150
+ elif learner["name"] in ["huber", "lasso", "elasticnet"]:
151
+ model.fit(X_train_scaled, y_train, sample_weight=sample_weights)
152
+ else:
153
+ # RANSAC, TheilSen, PLS don't support sample weights
154
+ model.fit(X_train_scaled, y_train)
155
+
156
+ valid_pred = model.predict(X_valid_scaled)
157
+ test_pred = model.predict(X_test_scaled)
158
+
159
+ return valid_pred, test_pred
160
+
161
+ def train_and_evaluate(train_df, test_df):
162
+ """Train all models with cross-validation"""
163
+ n_samples = len(train_df)
164
+ model_slices = get_model_slices(n_samples)
165
+
166
+ # Initialize prediction dictionaries
167
+ oof_preds = {
168
+ learner["name"]: {s["name"]: np.zeros(n_samples) for s in model_slices}
169
+ for learner in LEARNERS
170
+ }
171
+ test_preds = {
172
+ learner["name"]: {s["name"]: np.zeros(len(test_df)) for s in model_slices}
173
+ for learner in LEARNERS
174
+ }
175
+
176
+ full_weights = create_time_decay_weights(n_samples)
177
+ kf = KFold(n_splits=Config.N_FOLDS, shuffle=False)
178
+
179
+ for fold, (train_idx, valid_idx) in enumerate(kf.split(train_df), start=1):
180
+ print(f"\n--- Fold {fold}/{Config.N_FOLDS} ---")
181
+ X_valid = train_df.iloc[valid_idx][Config.FEATURES]
182
+ y_valid = train_df.iloc[valid_idx][Config.LABEL_COLUMN]
183
+ X_test = test_df[Config.FEATURES]
184
+
185
+ for s in model_slices:
186
+ cutoff = s["cutoff"]
187
+ slice_name = s["name"]
188
+ subset = train_df.iloc[cutoff:].reset_index(drop=True)
189
+ rel_idx = train_idx[train_idx >= cutoff] - cutoff
190
+
191
+ if len(rel_idx) == 0:
192
+ continue
193
+
194
+ X_train = subset.iloc[rel_idx][Config.FEATURES]
195
+ y_train = subset.iloc[rel_idx][Config.LABEL_COLUMN]
196
+ sw = create_time_decay_weights(len(subset))[rel_idx] if cutoff > 0 else full_weights[train_idx]
197
+
198
+ print(f" Training slice: {slice_name}, samples: {len(X_train)}")
199
+
200
+ for learner in LEARNERS:
201
+ try:
202
+ valid_pred, test_pred = train_single_model(
203
+ X_train, y_train, X_valid, y_valid, X_test, learner, sw
204
+ )
205
+
206
+ # Store OOF predictions
207
+ mask = valid_idx >= cutoff
208
+ if mask.any():
209
+ idxs = valid_idx[mask]
210
+ X_valid_subset = train_df.iloc[idxs][Config.FEATURES]
211
+ if learner["need_scale"]:
212
+ scaler = RobustScaler()
213
+ scaler.fit(X_train)
214
+ valid_pred_subset = learner["Estimator"](**learner["params"]).fit(
215
+ scaler.transform(X_train), y_train
216
+ ).predict(scaler.transform(X_valid_subset))
217
+ oof_preds[learner["name"]][slice_name][idxs] = valid_pred_subset
218
+ else:
219
+ oof_preds[learner["name"]][slice_name][idxs] = valid_pred[mask]
220
+
221
+ if cutoff > 0 and (~mask).any():
222
+ oof_preds[learner["name"]][slice_name][valid_idx[~mask]] = \
223
+ oof_preds[learner["name"]]["full_data"][valid_idx[~mask]]
224
+
225
+ test_preds[learner["name"]][slice_name] += test_pred
226
+
227
+ except Exception as e:
228
+ print(f" Error training {learner['name']}: {str(e)}")
229
+ continue
230
+
231
+ # Normalize test predictions
232
+ for learner_name in test_preds:
233
+ for slice_name in test_preds[learner_name]:
234
+ test_preds[learner_name][slice_name] /= Config.N_FOLDS
235
+
236
+ return oof_preds, test_preds, model_slices
237
+
238
+ # ===== Ensemble and Submission =====
239
+ def create_submissions(train_df, oof_preds, test_preds, submission_df):
240
+ """Create multiple submission files for different strategies"""
241
+ all_submissions = {}
242
+
243
+ # 1. Original baseline (XGBoost only)
244
+ if "xgb_baseline" in oof_preds:
245
+ xgb_oof = np.mean(list(oof_preds["xgb_baseline"].values()), axis=0)
246
+ xgb_test = np.mean(list(test_preds["xgb_baseline"].values()), axis=0)
247
+ xgb_score = pearsonr(train_df[Config.LABEL_COLUMN], xgb_oof)[0]
248
+ print(f"\nXGBoost Baseline Score: {xgb_score:.4f}")
249
+
250
+ submission_xgb = submission_df.copy()
251
+ submission_xgb["prediction"] = xgb_test
252
+ submission_xgb.to_csv("submission_xgb_baseline.csv", index=False)
253
+ all_submissions["xgb_baseline"] = xgb_score
254
+
255
+ # 2. Robust methods ensemble
256
+ robust_methods = ["huber", "ransac", "theilsen"]
257
+ robust_oof_list = []
258
+ robust_test_list = []
259
+
260
+ for method in robust_methods:
261
+ if method in oof_preds:
262
+ method_oof = np.mean(list(oof_preds[method].values()), axis=0)
263
+ method_test = np.mean(list(test_preds[method].values()), axis=0)
264
+ method_score = pearsonr(train_df[Config.LABEL_COLUMN], method_oof)[0]
265
+ print(f"{method.upper()} Score: {method_score:.4f}")
266
+
267
+ if not np.isnan(method_score):
268
+ robust_oof_list.append(method_oof)
269
+ robust_test_list.append(method_test)
270
+
271
+ if robust_oof_list:
272
+ robust_oof = np.mean(robust_oof_list, axis=0)
273
+ robust_test = np.mean(robust_test_list, axis=0)
274
+ robust_score = pearsonr(train_df[Config.LABEL_COLUMN], robust_oof)[0]
275
+ print(f"\nRobust Ensemble Score: {robust_score:.4f}")
276
+
277
+ submission_robust = submission_df.copy()
278
+ submission_robust["prediction"] = robust_test
279
+ submission_robust.to_csv("submission_robust_ensemble.csv", index=False)
280
+ all_submissions["robust_ensemble"] = robust_score
281
+
282
+ # 3. Regularized methods ensemble
283
+ regularized_methods = ["lasso", "elasticnet"]
284
+ reg_oof_list = []
285
+ reg_test_list = []
286
+
287
+ for method in regularized_methods:
288
+ if method in oof_preds:
289
+ method_oof = np.mean(list(oof_preds[method].values()), axis=0)
290
+ method_test = np.mean(list(test_preds[method].values()), axis=0)
291
+ method_score = pearsonr(train_df[Config.LABEL_COLUMN], method_oof)[0]
292
+ print(f"{method.upper()} Score: {method_score:.4f}")
293
+
294
+ if not np.isnan(method_score):
295
+ reg_oof_list.append(method_oof)
296
+ reg_test_list.append(method_test)
297
+
298
+ if reg_oof_list:
299
+ reg_oof = np.mean(reg_oof_list, axis=0)
300
+ reg_test = np.mean(reg_test_list, axis=0)
301
+ reg_score = pearsonr(train_df[Config.LABEL_COLUMN], reg_oof)[0]
302
+ print(f"\nRegularized Ensemble Score: {reg_score:.4f}")
303
+
304
+ submission_reg = submission_df.copy()
305
+ submission_reg["prediction"] = reg_test
306
+ submission_reg.to_csv("submission_regularized_ensemble.csv", index=False)
307
+ all_submissions["regularized_ensemble"] = reg_score
308
+
309
+ # 4. Full ensemble (weighted by performance)
310
+ all_oof_scores = {}
311
+ all_oof_preds = {}
312
+ all_test_preds = {}
313
+
314
+ for learner_name in oof_preds:
315
+ learner_oof = np.mean(list(oof_preds[learner_name].values()), axis=0)
316
+ learner_test = np.mean(list(test_preds[learner_name].values()), axis=0)
317
+ score = pearsonr(train_df[Config.LABEL_COLUMN], learner_oof)[0]
318
+
319
+ if not np.isnan(score) and score > 0: # Only include positive correlations
320
+ all_oof_scores[learner_name] = score
321
+ all_oof_preds[learner_name] = learner_oof
322
+ all_test_preds[learner_name] = learner_test
323
+
324
+ # Weighted ensemble
325
+ if all_oof_scores:
326
+ total_score = sum(all_oof_scores.values())
327
+ weights = {k: v/total_score for k, v in all_oof_scores.items()}
328
+
329
+ weighted_oof = sum(weights[k] * all_oof_preds[k] for k in weights)
330
+ weighted_test = sum(weights[k] * all_test_preds[k] for k in weights)
331
+ weighted_score = pearsonr(train_df[Config.LABEL_COLUMN], weighted_oof)[0]
332
+
333
+ print(f"\nWeighted Full Ensemble Score: {weighted_score:.4f}")
334
+ print("Weights:", {k: f"{v:.3f}" for k, v in weights.items()})
335
+
336
+ submission_weighted = submission_df.copy()
337
+ submission_weighted["prediction"] = weighted_test
338
+ submission_weighted.to_csv("submission_weighted_ensemble.csv", index=False)
339
+ all_submissions["weighted_ensemble"] = weighted_score
340
+
341
+ # 6. Simple average of all valid models
342
+ simple_oof = np.mean(list(all_oof_preds.values()), axis=0)
343
+ simple_test = np.mean(list(all_test_preds.values()), axis=0)
344
+ simple_score = pearsonr(train_df[Config.LABEL_COLUMN], simple_oof)[0]
345
+
346
+ print(f"\nSimple Full Ensemble Score: {simple_score:.4f}")
347
+
348
+ submission_simple = submission_df.copy()
349
+ submission_simple["prediction"] = simple_test
350
+ submission_simple.to_csv("submission_simple_ensemble.csv", index=False)
351
+ all_submissions["simple_ensemble"] = simple_score
352
+
353
+ # Print summary
354
+ print("\n" + "="*50)
355
+ print("SUBMISSION SUMMARY:")
356
+ print("="*50)
357
+ for name, score in sorted(all_submissions.items(), key=lambda x: x[1], reverse=True):
358
+ print(f"{name:25s}: {score:.4f}")
359
+
360
+ return all_submissions
361
+
362
+ # ===== Main Execution =====
363
+ if __name__ == "__main__":
364
+ print("Loading data...")
365
+ train_df, test_df, submission_df = load_data()
366
+
367
+ print("\nTraining models...")
368
+ oof_preds, test_preds, model_slices = train_and_evaluate(train_df, test_df)
369
+
370
+ print("\nCreating submissions...")
371
+ submission_scores = create_submissions(train_df, oof_preds, test_preds, submission_df)
372
+
373
+ print("\nAll submissions created successfully!")
374
+ print("Files created:")
375
+ print("- submission_xgb_baseline.csv (original baseline)")
376
+ print("- submission_robust_ensemble.csv (Huber + RANSAC + TheilSen)")
377
+ print("- submission_regularized_ensemble.csv (Lasso + ElasticNet)")
378
+ print("- submission_weighted_ensemble.csv (weighted by performance)")
379
+ print("- submission_simple_ensemble.csv (simple average)")
LYY/submission_regularized_ensemble.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5654d21c2c4fd4437c26500d11e96cede3215ca5f84f143d39c5e6288a72aed9
3
+ size 14587831
LYY/submission_robust_ensemble.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7610b14ed05a3de9a9114e4b467fea0848e37de1df94cb5d4bddb5377ff9cc55
3
+ size 14323736
LYY/submission_simple_ensemble.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f7e6f77a0c3fb28182580d121e8e2ceb29c5c6f796eb8a4e4f7163f2ef1f39b
3
+ size 14490859
LYY/submission_weighted_ensemble.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fa1c9268a667051893b05ff71e7c79e84d917e15f46854a1412fd7381f1ad16
3
+ size 14619255
LYY/submission_xgb_baseline.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:140b5e22b6b87632d5a0496f60f6db851b91c3e5da1d4942d926fb9754661849
3
+ size 14564582
LYY/xgb_hyper_search.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import optuna
2
+ import pandas as pd
3
+ import numpy as np
4
+ from xgboost import XGBRegressor
5
+ from sklearn.model_selection import KFold, cross_val_score
6
+ from scipy.stats import pearsonr
7
+
8
+ # 配置
9
+ class Config:
10
+ TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/train.parquet"
11
+ FEATURES = [
12
+ "X863", "X856", "X598", "X862", "X385", "X852", "X603", "X860", "X674",
13
+ "X345", "X855", "X302", "X178", "X168", "X612", "sell_qty",
14
+ "bid_qty", "ask_qty", "buy_qty", "volume"
15
+ ]
16
+ LABEL_COLUMN = "label"
17
+ N_FOLDS = 3
18
+ RANDOM_STATE = 42
19
+
20
+ def pearson_scorer(y_true, y_pred):
21
+ return pearsonr(y_true, y_pred)[0]
22
+
23
+ def objective(trial):
24
+ train_df = pd.read_parquet(Config.TRAIN_PATH, columns=Config.FEATURES + [Config.LABEL_COLUMN])
25
+ X = train_df[Config.FEATURES]
26
+ y = train_df[Config.LABEL_COLUMN]
27
+
28
+ params = {
29
+ "tree_method": "hist",
30
+ "device": "gpu",
31
+ "colsample_bylevel": trial.suggest_float("colsample_bylevel", 0.2, 1.0),
32
+ "colsample_bynode": trial.suggest_float("colsample_bynode", 0.2, 1.0),
33
+ "colsample_bytree": trial.suggest_float("colsample_bytree", 0.2, 1.0),
34
+ "gamma": trial.suggest_float("gamma", 0, 5),
35
+ "learning_rate": trial.suggest_float("learning_rate", 0.01, 0.05, log=True),
36
+ "max_depth": trial.suggest_int("max_depth", 3, 24),
37
+ "max_leaves": trial.suggest_int("max_leaves", 4, 32),
38
+ "min_child_weight": trial.suggest_int("min_child_weight", 1, 32),
39
+ "n_estimators": trial.suggest_int("n_estimators", 300, 2000),
40
+ "subsample": trial.suggest_float("subsample", 0.05, 1.0),
41
+ "reg_alpha": trial.suggest_float("reg_alpha", 0, 50),
42
+ "reg_lambda": trial.suggest_float("reg_lambda", 0, 100),
43
+ "verbosity": 0,
44
+ "random_state": Config.RANDOM_STATE,
45
+ "n_jobs": -1
46
+ }
47
+
48
+ model = XGBRegressor(**params)
49
+ kf = KFold(n_splits=Config.N_FOLDS, shuffle=True, random_state=Config.RANDOM_STATE)
50
+ scores = cross_val_score(model, X, y, cv=kf, scoring="r2", n_jobs=-1)
51
+ mean_score = np.mean(scores)
52
+ # 限制分数,防止过拟合
53
+ if mean_score > 0.25:
54
+ return 0 # 或者 return -1,或者 return 0
55
+ return mean_score
56
+
57
+ if __name__ == "__main__":
58
+ study = optuna.create_study(direction="maximize")
59
+ study.optimize(objective, n_trials=15) # 可根据算力调整n_trials
60
+ print("最优参数:", study.best_params)
61
+ print("最优得分:", study.best_value)
ZMJ/alpha_mixed.py ADDED
@@ -0,0 +1,950 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import pandas as pd
3
+ import numpy as np
4
+ from sklearn.model_selection import KFold
5
+ from xgboost import XGBRegressor
6
+ from lightgbm import LGBMRegressor
7
+ from sklearn.linear_model import (
8
+ HuberRegressor, RANSACRegressor, TheilSenRegressor,
9
+ Lasso, ElasticNet, Ridge
10
+ )
11
+ from sklearn.cross_decomposition import PLSRegression
12
+ from sklearn.preprocessing import StandardScaler, RobustScaler
13
+ from sklearn.ensemble import RandomForestRegressor
14
+ from scipy.stats import pearsonr
15
+ import warnings
16
+ import torch
17
+ import matplotlib.pyplot as plt
18
+ import seaborn as sns
19
+ from concurrent.futures import ThreadPoolExecutor, as_completed
20
+ from itertools import combinations
21
+ import time
22
+ warnings.filterwarnings('ignore')
23
+
24
+ # 设置中文字体
25
+ plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
26
+ plt.rcParams['axes.unicode_minus'] = False
27
+
28
+ # ===== Configuration =====
29
+ class Config:
30
+ TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/train.parquet"
31
+ TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/test.parquet"
32
+ # SUBMISSION_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/sample_submission_zmj.csv"
33
+
34
+ # Original features plus additional market features
35
+ # FEATURES = [
36
+ # "X863", "X856", "X598", "X862", "X385", "X852", "X603", "X860", "X674",
37
+ # "X415", "X345", "X855", "X174", "X302", "X178", "X168", "X612",
38
+ # "buy_qty", "sell_qty", "volume", "X888", "X421", "X333",
39
+ # "bid_qty", "ask_qty"
40
+ # ]
41
+
42
+ LABEL_COLUMN = "label"
43
+ N_FOLDS = 3
44
+ RANDOM_STATE = 42
45
+
46
+ # 相关系数分析配置
47
+ CORRELATION_THRESHOLD = 0.8 # 相关系数阈值,大于此值的因子将被聚合
48
+ IC_WEIGHT_METHOD = "abs" # IC权重计算方法: "abs", "square", "rank"
49
+ SAVE_RESULTS = True # 是否保存分析结果
50
+ CREATE_VISUALIZATIONS = True # 是否创建可视化图表
51
+ REMOVE_ORIGINAL_FEATURES = True # 是否删除原始特征
52
+
53
+ # 性能优化配置
54
+ MAX_WORKERS = 4 # 并行计算的工作线程数
55
+ USE_SAMPLING = False # 大数据集是否使用采样计算
56
+ SAMPLE_SIZE = 10000 # 采样大小
57
+ USE_GPU = True # 是否使用GPU加速(需要PyTorch)
58
+ USE_MATRIX_MULTIPLICATION = True # 是否使用矩阵乘法优化
59
+
60
+ # ===== Feature Engineering =====
61
+ def feature_engineering(df):
62
+ """Original features plus new robust features"""
63
+ # Original features
64
+ df['volume_weighted_sell'] = df['sell_qty'] * df['volume']
65
+ df['buy_sell_ratio'] = df['buy_qty'] / (df['sell_qty'] + 1e-8)
66
+ df['selling_pressure'] = df['sell_qty'] / (df['volume'] + 1e-8)
67
+ df['effective_spread_proxy'] = np.abs(df['buy_qty'] - df['sell_qty']) / (df['volume'] + 1e-8)
68
+
69
+ # New robust features
70
+ df['log_volume'] = np.log1p(df['volume'])
71
+ df['bid_ask_imbalance'] = (df['bid_qty'] - df['ask_qty']) / (df['bid_qty'] + df['ask_qty'] + 1e-8)
72
+ df['order_flow_imbalance'] = (df['buy_qty'] - df['sell_qty']) / (df['buy_qty'] + df['sell_qty'] + 1e-8)
73
+ df['liquidity_ratio'] = (df['bid_qty'] + df['ask_qty']) / (df['volume'] + 1e-8)
74
+
75
+ # Handle infinities and NaN
76
+ df = df.replace([np.inf, -np.inf], np.nan)
77
+
78
+ # For each column, replace NaN with median for robustness
79
+ for col in df.columns:
80
+ if df[col].isna().any():
81
+ median_val = df[col].median()
82
+ df[col] = df[col].fillna(median_val if not pd.isna(median_val) else 0)
83
+
84
+ return df
85
+
86
+ train_df = pd.read_parquet(Config.TRAIN_PATH)
87
+ test_df = pd.read_parquet(Config.TEST_PATH)
88
+
89
+ train_df = feature_engineering(train_df)
90
+
91
+ # ===== 相关系数矩阵计算和因子聚合 =====
92
+ def calculate_correlation_matrix_and_ic(train_df, features, label_col='label', correlation_threshold=0.8, max_workers=4, test_df=None):
93
+ """
94
+ 计算特征间的相关系数矩阵和每个特征与标签的IC值(优化版本)
95
+ 并对IC为负的特征先取反,使所有IC为正
96
+ """
97
+ # 确保特征列存在
98
+ available_features = [f for f in features if f in train_df.columns]
99
+ print(f"可用特征数量: {len(available_features)}")
100
+
101
+ # 1. 先计算IC值
102
+ ic_values = fast_ic_calculation(train_df, available_features, label_col, max_workers=max_workers)
103
+ print("初始IC统计:")
104
+ print(ic_values.describe())
105
+
106
+ # 2. 对IC为负的特征取反
107
+ neg_ic_features = ic_values[ic_values < 0].index.tolist()
108
+ print(f"IC为负的特征数量: {len(neg_ic_features)}")
109
+ for f in neg_ic_features:
110
+ train_df[f] = -train_df[f]
111
+ if test_df is not None and f in test_df.columns:
112
+ test_df[f] = -test_df[f]
113
+
114
+ # 3. 重新计算IC值(此时全为正)
115
+ ic_values = fast_ic_calculation(train_df, available_features, label_col, max_workers=max_workers)
116
+ print("IC取正后统计:")
117
+ print(ic_values.describe())
118
+
119
+ # 4. 计算相关系数矩阵
120
+ corr_matrix = fast_correlation_matrix(train_df, available_features, method='pearson', max_workers=max_workers)
121
+
122
+ # 5. 聚合
123
+ feature_groups = aggregate_correlated_features(
124
+ corr_matrix, ic_values, correlation_threshold
125
+ )
126
+
127
+ return corr_matrix, ic_values, feature_groups, train_df, test_df
128
+
129
+ def aggregate_correlated_features(corr_matrix, ic_values, threshold=0.8):
130
+ """
131
+ 基于相关系数和IC值对高相关因子进行聚合
132
+
133
+ Parameters:
134
+ -----------
135
+ corr_matrix : pd.DataFrame
136
+ 相关系数矩阵
137
+ ic_values : pd.Series
138
+ 每个特征的IC值
139
+ threshold : float
140
+ 相关系数阈值
141
+
142
+ Returns:
143
+ --------
144
+ feature_groups : list
145
+ 聚合后的特征组,每个组包含特征名和聚合权重
146
+ """
147
+
148
+ features = list(corr_matrix.columns)
149
+ used_features = set()
150
+ feature_groups = []
151
+
152
+ # 按IC值绝对值排序,优先选择IC值高的特征作为代表
153
+ ic_abs = ic_values.abs().sort_values(ascending=False)
154
+
155
+ for feature in ic_abs.index:
156
+ if feature in used_features:
157
+ continue
158
+
159
+ # 找到与当前特征高度相关的其他特征
160
+ correlated_features = []
161
+ for other_feature in features:
162
+ if other_feature != feature and other_feature not in used_features:
163
+ corr_value = abs(corr_matrix.loc[feature, other_feature])
164
+ if corr_value > threshold:
165
+ correlated_features.append(other_feature)
166
+
167
+ if correlated_features:
168
+ # 创建特征组,包含主特征和相关特征
169
+ group_features = [feature] + correlated_features
170
+ used_features.update(group_features)
171
+
172
+ # 计算基于IC值的权重
173
+ group_ic_values = ic_values[group_features]
174
+ weights = calculate_ic_weighted_weights(group_ic_values, Config.IC_WEIGHT_METHOD)
175
+
176
+ feature_groups.append({
177
+ 'features': group_features,
178
+ 'weights': weights,
179
+ 'representative': feature,
180
+ 'group_ic': group_ic_values.mean()
181
+ })
182
+
183
+ print(f"特征组 {len(feature_groups)}: {feature} (IC={ic_values[feature]:.4f}) "
184
+ f"与 {len(correlated_features)} 个特征聚合")
185
+ print(f"组{len(feature_groups) - 1} 权重: {weights}, 特征: {group_features}")
186
+ # if len(features) == 1:
187
+ # print(f"单特征组: {features[0]}, 权重: {weights[0]}, 非零样本数: {(df[features[0]] != 0).sum()}")
188
+ else:
189
+ # 单独的特征
190
+ used_features.add(feature)
191
+ feature_groups.append({
192
+ 'features': [feature],
193
+ 'weights': [1.0],
194
+ 'representative': feature,
195
+ 'group_ic': ic_values[feature]
196
+ })
197
+
198
+ return feature_groups
199
+
200
+ def calculate_ic_weighted_weights(ic_values, method="abs"):
201
+ """
202
+ 基于IC值计算特征权重
203
+
204
+ Parameters:
205
+ -----------
206
+ ic_values : pd.Series
207
+ 特征IC值
208
+ method : str
209
+ 权重计算方法: "abs", "square", "rank"
210
+
211
+ Returns:
212
+ --------
213
+ weights : list
214
+ 归一化的权重列表
215
+ """
216
+ if method == "abs":
217
+ # 使用IC值的绝对值作为权重基础
218
+ weights_base = ic_values.abs()
219
+ elif method == "square":
220
+ # 使用IC值的平方作为权重基础
221
+ weights_base = ic_values ** 2
222
+ elif method == "rank":
223
+ # 使用IC值排名作为权重基础
224
+ weights_base = ic_values.abs().rank(ascending=False)
225
+ else:
226
+ raise ValueError(f"不支持的权重计算方法: {method}")
227
+
228
+ # 避免零权重
229
+ weights_base = weights_base + 1e-8
230
+
231
+ # 归一化权重
232
+ weights = weights_base / weights_base.sum()
233
+
234
+ return weights.tolist()
235
+
236
+ def calculate_optimal_ic_weights(df, features, label_col):
237
+ """
238
+ 对于给定特征组,使用最大化IC合成法计算最优权重。
239
+ 参数:
240
+ df: pd.DataFrame,包含特征和标签
241
+ features: list,特征名
242
+ label_col: str,标签名
243
+ 返回:
244
+ weights: list,归一化权重
245
+ """
246
+ if len(features) == 1:
247
+ return [1.0]
248
+ Z = df[features].values
249
+ Z = (Z - Z.mean(axis=0)) / (Z.std(axis=0) + 1e-8) # 标准化
250
+ R = df[label_col].values.reshape(-1, 1)
251
+ # 协方差矩阵
252
+ cov_ZZ = np.cov(Z, rowvar=False)
253
+ cov_ZR = np.cov(Z, R, rowvar=False)[:-1, -1]
254
+ # 防止协方差矩阵奇异,加微小正则项
255
+ cov_ZZ += np.eye(cov_ZZ.shape[0]) * 1e-6
256
+ # 求解最优权重
257
+ try:
258
+ w = np.linalg.solve(cov_ZZ, cov_ZR)
259
+ except np.linalg.LinAlgError:
260
+ w = np.linalg.lstsq(cov_ZZ, cov_ZR, rcond=None)[0]
261
+ # 归一化(L1范数)
262
+ if np.sum(np.abs(w)) > 1e-8:
263
+ w = w / np.sum(np.abs(w))
264
+ else:
265
+ w = np.ones_like(w) / len(w)
266
+ return w.tolist()
267
+
268
+ def create_aggregated_features(df, feature_groups, remove_original=True, label_col=None):
269
+ """
270
+ 基于特征组创建聚合特征(只用最大化IC合成法计算权重,并输出与IC加权对比)
271
+ """
272
+ aggregated_df = df.copy()
273
+ aggregated_original_features = set()
274
+ if label_col is None:
275
+ label_col = Config.LABEL_COLUMN
276
+ for i, group in enumerate(feature_groups):
277
+ features = group['features']
278
+ representative = group['representative']
279
+ # 检查所有特征是否都存在
280
+ missing_features = [f for f in features if f not in df.columns]
281
+ if missing_features:
282
+ print(f"警告: 特征组 {i} 中缺少特征: {missing_features}")
283
+ continue
284
+ # 最大化IC合成法权重
285
+ weights = calculate_optimal_ic_weights(df, features, label_col)
286
+ # IC加权(abs),每个特征和标签单独算皮尔逊相关系数
287
+ ic_vec = []
288
+ for f in features:
289
+ try:
290
+ ic = np.corrcoef(df[f], df[label_col])[0, 1]
291
+ except Exception:
292
+ ic = 0.0
293
+ ic_vec.append(ic)
294
+ ic_weights = calculate_ic_weighted_weights(pd.Series(ic_vec, index=features), method='abs')
295
+ print(f"组{i} features: {features}")
296
+ print(f" 最大化IC权重: {weights}")
297
+ print(f" IC加权权重: {ic_weights}")
298
+ if len(features) == 1:
299
+ agg_feature = df[features[0]] * weights[0]
300
+ else:
301
+ agg_feature = sum(df[features[j]] * weights[j] for j in range(len(features)))
302
+ agg_feature_name = f"agg_group_{i}_{representative}"
303
+ aggregated_df[agg_feature_name] = agg_feature
304
+ print(f"创建聚合特征: {agg_feature_name} (包含 {len(features)} 个原始特征)")
305
+ aggregated_original_features.update(features)
306
+ # 删除原始特征
307
+ if remove_original:
308
+ features_to_remove = [f for f in aggregated_original_features if f in aggregated_df.columns]
309
+ if features_to_remove:
310
+ aggregated_df = aggregated_df.drop(columns=features_to_remove)
311
+ print(f"删除了 {len(features_to_remove)} 个原始特征: {features_to_remove}")
312
+ else:
313
+ print("没有需要删除的原始特征")
314
+ return aggregated_df
315
+
316
+ # ===== 可视化函数 =====
317
+ def visualize_correlation_and_ic(corr_matrix, ic_values, feature_groups, save_plots=True):
318
+ """
319
+ Visualize correlation matrix, IC distribution, and feature aggregation results (English version)
320
+ """
321
+ fig, axes = plt.subplots(2, 2, figsize=(20, 16))
322
+ fig.suptitle('Feature Correlation Analysis and IC Distribution', fontsize=16, fontweight='bold')
323
+
324
+ # 1. Correlation matrix heatmap
325
+ mask = np.triu(np.ones_like(corr_matrix, dtype=bool))
326
+ sns.heatmap(corr_matrix, mask=mask, annot=False, cmap='RdBu_r', center=0,
327
+ square=True, linewidths=0.5, cbar_kws={"shrink": .8}, ax=axes[0,0])
328
+ axes[0,0].set_title('Feature Correlation Matrix', fontsize=14, fontweight='bold')
329
+
330
+ # 2. IC distribution histogram
331
+ axes[0,1].hist(ic_values.values, bins=30, alpha=0.7, color='skyblue', edgecolor='black')
332
+ axes[0,1].axvline(ic_values.mean(), color='red', linestyle='--',
333
+ label=f'Mean: {ic_values.mean():.4f}')
334
+ axes[0,1].axvline(0, color='green', linestyle='-', alpha=0.5, label='IC=0')
335
+ axes[0,1].set_xlabel('IC Value')
336
+ axes[0,1].set_ylabel('Frequency')
337
+ axes[0,1].set_title('Feature IC Value Distribution', fontsize=14, fontweight='bold')
338
+ axes[0,1].legend()
339
+ axes[0,1].grid(True, alpha=0.3)
340
+
341
+ # 3. Top 20 highest IC features
342
+ top_ic_features = ic_values.abs().sort_values(ascending=False).head(20)
343
+ colors = ['red' if ic_values[feature] < 0 else 'blue' for feature in top_ic_features.index]
344
+ axes[1,0].barh(range(len(top_ic_features)), top_ic_features.values, color=colors, alpha=0.7)
345
+ axes[1,0].set_yticks(range(len(top_ic_features)))
346
+ axes[1,0].set_yticklabels(top_ic_features.index, fontsize=8)
347
+ axes[1,0].set_xlabel('|IC Value|')
348
+ axes[1,0].set_title('Top 20 |IC Value| Features', fontsize=14, fontweight='bold')
349
+ axes[1,0].grid(True, alpha=0.3)
350
+
351
+ # 4. Feature aggregation results
352
+ group_sizes = [len(group['features']) for group in feature_groups]
353
+ group_ics = [group['group_ic'] for group in feature_groups]
354
+ single_features = [i for i, size in enumerate(group_sizes) if size == 1]
355
+ grouped_features = [i for i, size in enumerate(group_sizes) if size > 1]
356
+ if single_features:
357
+ axes[1,1].scatter([group_sizes[i] for i in single_features],
358
+ [group_ics[i] for i in single_features],
359
+ alpha=0.6, label='Single Feature', s=50)
360
+ if grouped_features:
361
+ axes[1,1].scatter([group_sizes[i] for i in grouped_features],
362
+ [group_ics[i] for i in grouped_features],
363
+ alpha=0.8, label='Aggregated Feature', s=100, color='red')
364
+ axes[1,1].set_xlabel('Feature Group Size')
365
+ axes[1,1].set_ylabel('Group Mean IC Value')
366
+ axes[1,1].set_title('Feature Aggregation Result', fontsize=14, fontweight='bold')
367
+ axes[1,1].legend()
368
+ axes[1,1].grid(True, alpha=0.3)
369
+ plt.tight_layout()
370
+ if save_plots:
371
+ plt.savefig('./max_IC_mixed/feature_analysis.png', dpi=300, bbox_inches='tight')
372
+ print("Saved feature analysis image: feature_analysis.png")
373
+ plt.show()
374
+
375
+ def create_feature_summary_report(corr_matrix, ic_values, feature_groups):
376
+ """
377
+ 创建特征分析报告
378
+
379
+ Parameters:
380
+ -----------
381
+ corr_matrix : pd.DataFrame
382
+ 相关系数矩阵
383
+ ic_values : pd.Series
384
+ 特征IC值
385
+ feature_groups : list
386
+ 特征组列表
387
+ """
388
+
389
+ report = []
390
+ report.append("=" * 60)
391
+ report.append("Feature Analysis Report")
392
+ report.append("=" * 60)
393
+
394
+ # 基本统计
395
+ report.append(f"\n1. Basic Statistical Information:")
396
+ report.append(f" Total Feature Count: {len(ic_values)}")
397
+ report.append(f" Average IC Value: {ic_values.mean():.4f}")
398
+ report.append(f" IC Value Standard Deviation: {ic_values.std():.4f}")
399
+ report.append(f" Maximum IC Value: {ic_values.max():.4f}")
400
+ report.append(f" Minimum IC Value: {ic_values.min():.4f}")
401
+ report.append(f" Positive IC Value Feature Count: {(ic_values > 0).sum()}")
402
+ report.append(f" Negative IC Value Feature Count: {(ic_values < 0).sum()}")
403
+
404
+ # 高相关性分析
405
+ high_corr_count = 0
406
+ for i in range(len(corr_matrix.columns)):
407
+ for j in range(i+1, len(corr_matrix.columns)):
408
+ if abs(corr_matrix.iloc[i, j]) > Config.CORRELATION_THRESHOLD:
409
+ high_corr_count += 1
410
+
411
+ report.append(f"\n2. High Correlation Analysis (|Correlation| > {Config.CORRELATION_THRESHOLD}):")
412
+ report.append(f" High Correlation Feature Pair Count: {high_corr_count}")
413
+ report.append(f" Correlation Matrix Density: {high_corr_count / (len(corr_matrix) * (len(corr_matrix) - 1) / 2):.4f}")
414
+
415
+ # 特征聚合结果
416
+ report.append(f"\n3. Feature Aggregation Results:")
417
+ report.append(f" Feature Group Count: {len(feature_groups)}")
418
+
419
+ single_features = [g for g in feature_groups if len(g['features']) == 1]
420
+ grouped_features = [g for g in feature_groups if len(g['features']) > 1]
421
+
422
+ report.append(f" Single Feature Group Count: {len(single_features)}")
423
+ report.append(f" Aggregated Feature Group Count: {len(grouped_features)}")
424
+
425
+ if grouped_features:
426
+ avg_group_size = np.mean([len(g['features']) for g in grouped_features])
427
+ report.append(f" Average Aggregated Group Size: {avg_group_size:.2f}")
428
+
429
+ # 前10个最高IC值特征
430
+ report.append(f"\n4. Top 10 Highest IC Value Features:")
431
+ top_ic = ic_values.abs().sort_values(ascending=False).head(10)
432
+ for i, (feature, ic_abs) in enumerate(top_ic.items(), 1):
433
+ ic_original = ic_values[feature]
434
+ report.append(f" {i:2d}. {feature:20s} |IC|={ic_abs:.4f} (IC={ic_original:.4f})")
435
+
436
+ # 特征聚合详情
437
+ report.append(f"\n5. Feature Aggregation Details:")
438
+ for i, group in enumerate(grouped_features, 1):
439
+ report.append(f" Group {i}: {group['representative']} (IC={group['group_ic']:.4f})")
440
+ report.append(f" Contains Features: {', '.join(group['features'])}")
441
+ report.append(f" Weights: {[f'{w:.3f}' for w in group['weights']]}")
442
+
443
+ # 保存报告
444
+ with open('./max_IC_mixed/feature_analysis_report.txt', 'w', encoding='utf-8') as f:
445
+ f.write('\n'.join(report))
446
+
447
+ print('\n'.join(report))
448
+ print(f"\nReport Saved to: feature_analysis_report.txt")
449
+
450
+ # ===== 优化的相关系数计算 =====
451
+ def fast_correlation_matrix(df, features, method='pearson', max_workers=4):
452
+ """
453
+ 快速计算相关系数矩阵,支持并行计算和多种优化策略
454
+
455
+ Parameters:
456
+ -----------
457
+ df : pd.DataFrame
458
+ 数据框
459
+ features : list
460
+ 特征列表
461
+ method : str
462
+ 相关系数计算方法: 'pearson', 'spearman'
463
+ max_workers : int
464
+ 并行计算的工作线程数
465
+
466
+ Returns:
467
+ --------
468
+ corr_matrix : pd.DataFrame
469
+ 相关系数矩阵
470
+ """
471
+
472
+ print(f"开始计算相关系数矩阵 (特征数量: {len(features)}, 方法: {method})")
473
+ start_time = time.time()
474
+
475
+ # 方法1: 使用矩阵乘法优化(最快)
476
+ if method == 'pearson' and Config.USE_MATRIX_MULTIPLICATION:
477
+ if Config.USE_GPU and torch.cuda.is_available():
478
+ corr_matrix = torch_correlation(df, features, use_gpu=True)
479
+ print(f"GPU矩阵乘法优化耗时: {time.time() - start_time:.2f}秒")
480
+ else:
481
+ corr_matrix = matrix_correlation(df, features)
482
+ print(f"CPU矩阵乘法优化耗时: {time.time() - start_time:.2f}秒")
483
+ return corr_matrix
484
+
485
+ # 方法2: 对于大数据集,使用采样计算
486
+ if Config.USE_SAMPLING and len(df) > Config.SAMPLE_SIZE:
487
+ print(f"数据量较大,使用采样计算 (采样大小: {Config.SAMPLE_SIZE})...")
488
+ sample_size = min(Config.SAMPLE_SIZE, len(df))
489
+ sample_df = df.sample(n=sample_size, random_state=42)
490
+ feature_data = sample_df[features]
491
+ corr_matrix = feature_data.corr(method=method)
492
+ print(f"采样计算耗时: {time.time() - start_time:.2f}秒")
493
+ return corr_matrix
494
+
495
+ # 方法3: 并行计算(适用于中等规模数据)
496
+ else:
497
+ print(f"使用并行计算 (线程数: {max_workers})...")
498
+ return parallel_correlation_matrix(df, features, method, max_workers)
499
+
500
+ def matrix_correlation(df, features):
501
+ """
502
+ 使用矩阵乘法计算相关系数矩阵 (A * A^T 方法)
503
+
504
+ Parameters:
505
+ -----------
506
+ df : pd.DataFrame
507
+ 数据框
508
+ features : list
509
+ 特征列表
510
+
511
+ Returns:
512
+ --------
513
+ corr_matrix : pd.DataFrame
514
+ 相关系数矩阵
515
+ """
516
+
517
+ # 提取特征数据
518
+ feature_data = df[features].values
519
+
520
+ # 标准化数据 (z-score)
521
+ feature_data_std = (feature_data - feature_data.mean(axis=0)) / feature_data.std(axis=0)
522
+
523
+ # 处理NaN值
524
+ feature_data_std = np.nan_to_num(feature_data_std, nan=0.0)
525
+
526
+ # 计算相关系数矩阵: (A * A^T) / (n-1)
527
+ n = feature_data_std.shape[0]
528
+ corr_matrix_np = np.dot(feature_data_std.T, feature_data_std) / (n - 1)
529
+
530
+ # 确保对角线为1
531
+ np.fill_diagonal(corr_matrix_np, 1.0)
532
+
533
+ # 转换为DataFrame
534
+ corr_matrix = pd.DataFrame(corr_matrix_np, index=features, columns=features)
535
+
536
+ return corr_matrix
537
+
538
+ def torch_correlation(df, features, use_gpu=False):
539
+ """
540
+ 使用PyTorch张量计算相关系数矩阵(可选GPU加速)
541
+
542
+ Parameters:
543
+ -----------
544
+ df : pd.DataFrame
545
+ 数据框
546
+ features : list
547
+ 特征列表
548
+ use_gpu : bool
549
+ 是否使用GPU加速
550
+
551
+ Returns:
552
+ --------
553
+ corr_matrix : pd.DataFrame
554
+ 相关系数矩阵
555
+ """
556
+
557
+ # 提取特征数据
558
+ feature_data = df[features].values
559
+
560
+ # 转换为PyTorch张量
561
+ if use_gpu and torch.cuda.is_available():
562
+ device = torch.device('cuda')
563
+ print("使用GPU加速计算...")
564
+ else:
565
+ device = torch.device('cpu')
566
+ print("使用CPU计算...")
567
+
568
+ # 转换为张量并移动到设备
569
+ X = torch.tensor(feature_data, dtype=torch.float32, device=device)
570
+
571
+ # 标准化数据
572
+ X_mean = torch.mean(X, dim=0, keepdim=True)
573
+ X_std = torch.std(X, dim=0, keepdim=True, unbiased=True)
574
+ X_std = torch.where(X_std == 0, torch.ones_like(X_std), X_std) # 避免除零
575
+ X_norm = (X - X_mean) / X_std
576
+
577
+ # 处理NaN值
578
+ X_norm = torch.nan_to_num(X_norm, nan=0.0)
579
+
580
+ # 计算相关系数矩阵: (X_norm^T * X_norm) / (n-1)
581
+ n = X_norm.shape[0]
582
+ corr_matrix_tensor = torch.mm(X_norm.T, X_norm) / (n - 1)
583
+
584
+ # 确保对角线为1
585
+ torch.diagonal(corr_matrix_tensor)[:] = 1.0
586
+
587
+ # 移回CPU并转换为numpy
588
+ corr_matrix_np = corr_matrix_tensor.cpu().numpy()
589
+
590
+ # 转换为DataFrame
591
+ corr_matrix = pd.DataFrame(corr_matrix_np, index=features, columns=features)
592
+
593
+ return corr_matrix
594
+
595
+ def parallel_correlation_matrix(df, features, method='pearson', max_workers=4):
596
+ """
597
+ 并行计算相关系数矩阵
598
+
599
+ Parameters:
600
+ -----------
601
+ df : pd.DataFrame
602
+ 数据框
603
+ features : list
604
+ 特征列表
605
+ method : str
606
+ 相关系数计算方法
607
+ max_workers : int
608
+ 并行计算的工作线程数
609
+
610
+ Returns:
611
+ --------
612
+ corr_matrix : pd.DataFrame
613
+ 相关系数矩阵
614
+ """
615
+
616
+ def calculate_correlation_pair(pair):
617
+ """计算一对特征的相关系数"""
618
+ feat1, feat2 = pair
619
+ if method == 'pearson':
620
+ corr, _ = pearsonr(df[feat1], df[feat2])
621
+ else: # spearman
622
+ corr = df[feat1].corr(df[feat2], method='spearman')
623
+ return (feat1, feat2, corr)
624
+
625
+ # 生成所有特征对
626
+ feature_pairs = list(combinations(features, 2))
627
+ print(f"需要计算 {len(feature_pairs)} 个特征对的相关系数")
628
+
629
+ # 并行计算
630
+ results = {}
631
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
632
+ future_to_pair = {executor.submit(calculate_correlation_pair, pair): pair for pair in feature_pairs}
633
+
634
+ completed = 0
635
+ for future in as_completed(future_to_pair):
636
+ feat1, feat2, corr = future.result()
637
+ results[(feat1, feat2)] = corr
638
+ results[(feat2, feat1)] = corr # 对称矩阵
639
+ completed += 1
640
+
641
+ if completed % 100 == 0:
642
+ print(f"已完成: {completed}/{len(feature_pairs)} ({completed/len(feature_pairs)*100:.1f}%)")
643
+
644
+ # 构建相关系数矩阵
645
+ corr_matrix = pd.DataFrame(index=features, columns=features)
646
+ for feat1 in features:
647
+ for feat2 in features:
648
+ if feat1 == feat2:
649
+ corr_matrix.loc[feat1, feat2] = 1.0
650
+ else:
651
+ corr_matrix.loc[feat1, feat2] = results.get((feat1, feat2), 0.0)
652
+
653
+ return corr_matrix
654
+
655
+ def fast_ic_calculation(df, features, label_col, max_workers=4):
656
+ """
657
+ 快速计算特征IC值,支持并行计算
658
+
659
+ Parameters:
660
+ -----------
661
+ df : pd.DataFrame
662
+ 数据框
663
+ features : list
664
+ 特征列表
665
+ label_col : str
666
+ 标签列名
667
+ max_workers : int
668
+ 并行计算的工作线程数
669
+
670
+ Returns:
671
+ --------
672
+ ic_values : pd.Series
673
+ 特征IC值
674
+ """
675
+
676
+ print(f"开始计算特征IC值 (特征数量: {len(features)})")
677
+ start_time = time.time()
678
+
679
+ def calculate_ic(feature):
680
+ """计算单个特征的IC值"""
681
+ try:
682
+ ic, _ = pearsonr(df[feature], df[label_col])
683
+ return feature, ic
684
+ except:
685
+ return feature, 0.0
686
+
687
+ # 并行计算IC值
688
+ ic_dict = {}
689
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
690
+ future_to_feature = {executor.submit(calculate_ic, feature): feature for feature in features}
691
+
692
+ completed = 0
693
+ for future in as_completed(future_to_feature):
694
+ feature, ic = future.result()
695
+ ic_dict[feature] = ic
696
+ completed += 1
697
+
698
+ if completed % 50 == 0:
699
+ print(f"IC计算进度: {completed}/{len(features)} ({completed/len(features)*100:.1f}%)")
700
+
701
+ ic_values = pd.Series(ic_dict)
702
+ print(f"IC值计算耗时: {time.time() - start_time:.2f}秒")
703
+
704
+ return ic_values
705
+
706
+ def benchmark_correlation_methods(df, features, sample_size=1000):
707
+ """
708
+ 比较不同相关系数计算方法的性能
709
+
710
+ Parameters:
711
+ -----------
712
+ df : pd.DataFrame
713
+ 数据框
714
+ features : list
715
+ 特征列表
716
+ sample_size : int
717
+ 用于测试的样本大小
718
+
719
+ Returns:
720
+ --------
721
+ results : dict
722
+ 各方法的性能结果
723
+ """
724
+
725
+ print("=" * 60)
726
+ print("相关系数计算方法性能比较")
727
+ print("=" * 60)
728
+
729
+ # 采样数据用于测试
730
+ if len(df) > sample_size:
731
+ test_df = df.sample(n=sample_size, random_state=42)
732
+ else:
733
+ test_df = df
734
+
735
+ test_features = features[:min(50, len(features))] # 限制特征数量用于测试
736
+ print(f"测试数据: {len(test_df)} 行, {len(test_features)} 个特征")
737
+
738
+ results = {}
739
+
740
+ # 方法1: pandas corr()
741
+ print("\n1. 测试 pandas corr() 方法...")
742
+ start_time = time.time()
743
+ try:
744
+ feature_data = test_df[test_features]
745
+ corr_pandas = feature_data.corr()
746
+ pandas_time = time.time() - start_time
747
+ results['pandas_corr'] = {'time': pandas_time, 'success': True}
748
+ print(f" 耗时: {pandas_time:.3f}秒")
749
+ except Exception as e:
750
+ results['pandas_corr'] = {'time': float('inf'), 'success': False, 'error': str(e)}
751
+ print(f" 失败: {e}")
752
+
753
+ # 方法2: 矩阵乘法 (CPU)
754
+ print("\n2. 测试矩阵乘法 (CPU)...")
755
+ start_time = time.time()
756
+ try:
757
+ corr_matrix = matrix_correlation(test_df, test_features)
758
+ matrix_time = time.time() - start_time
759
+ results['matrix_cpu'] = {'time': matrix_time, 'success': True}
760
+ print(f" 耗时: {matrix_time:.3f}秒")
761
+ except Exception as e:
762
+ results['matrix_cpu'] = {'time': float('inf'), 'success': False, 'error': str(e)}
763
+ print(f" 失败: {e}")
764
+
765
+ # 方法3: PyTorch (CPU)
766
+ print("\n3. 测试 PyTorch (CPU)...")
767
+ start_time = time.time()
768
+ try:
769
+ corr_torch_cpu = torch_correlation(test_df, test_features, use_gpu=False)
770
+ torch_cpu_time = time.time() - start_time
771
+ results['torch_cpu'] = {'time': torch_cpu_time, 'success': True}
772
+ print(f" 耗时: {torch_cpu_time:.3f}秒")
773
+ except Exception as e:
774
+ results['torch_cpu'] = {'time': float('inf'), 'success': False, 'error': str(e)}
775
+ print(f" 失败: {e}")
776
+
777
+ # 方法4: PyTorch (GPU)
778
+ if torch.cuda.is_available():
779
+ print("\n4. 测试 PyTorch (GPU)...")
780
+ start_time = time.time()
781
+ try:
782
+ corr_torch_gpu = torch_correlation(test_df, test_features, use_gpu=True)
783
+ torch_gpu_time = time.time() - start_time
784
+ results['torch_gpu'] = {'time': torch_gpu_time, 'success': True}
785
+ print(f" 耗时: {torch_gpu_time:.3f}秒")
786
+ except Exception as e:
787
+ results['torch_gpu'] = {'time': float('inf'), 'success': False, 'error': str(e)}
788
+ print(f" 失败: {e}")
789
+ else:
790
+ print("\n4. GPU不可用,跳过GPU测试")
791
+ results['torch_gpu'] = {'time': float('inf'), 'success': False, 'error': 'GPU not available'}
792
+
793
+ # 方法5: 并行计算
794
+ print("\n5. 测试并行计算...")
795
+ start_time = time.time()
796
+ try:
797
+ corr_parallel = parallel_correlation_matrix(test_df, test_features, method='pearson', max_workers=4)
798
+ parallel_time = time.time() - start_time
799
+ results['parallel'] = {'time': parallel_time, 'success': True}
800
+ print(f" 耗时: {parallel_time:.3f}秒")
801
+ except Exception as e:
802
+ results['parallel'] = {'time': float('inf'), 'success': False, 'error': str(e)}
803
+ print(f" 失败: {e}")
804
+
805
+ # 显示比较结果
806
+ print(f"\n=== 性能比较结果 ===")
807
+ successful_methods = {k: v for k, v in results.items() if v['success']}
808
+
809
+ if successful_methods:
810
+ fastest_method = min(successful_methods.items(), key=lambda x: x[1]['time'])
811
+ print(f"最快方法: {fastest_method[0]} ({fastest_method[1]['time']:.3f}秒)")
812
+
813
+ print(f"\n详细结果:")
814
+ for method, result in sorted(successful_methods.items(), key=lambda x: x[1]['time']):
815
+ speedup = fastest_method[1]['time'] / result['time']
816
+ print(f" {method:12s}: {result['time']:6.3f}秒 (相对速度: {speedup:.2f}x)")
817
+
818
+ # 显示失败的方法
819
+ failed_methods = {k: v for k, v in results.items() if not v['success']}
820
+ if failed_methods:
821
+ print(f"\n失败的方法:")
822
+ for method, result in failed_methods.items():
823
+ print(f" {method}: {result.get('error', 'Unknown error')}")
824
+
825
+ return results
826
+
827
+ if __name__ == "__main__":
828
+ # ===== 主执行流程 =====
829
+
830
+ # 检查是否运行性能测试
831
+ if len(sys.argv) > 1 and sys.argv[1] == '--benchmark':
832
+ print("=" * 60)
833
+ print("运行相关系数计算方法性能测试")
834
+ print("=" * 60)
835
+
836
+ # 加载数据
837
+ train_df = pd.read_parquet(Config.TRAIN_PATH)
838
+ all_features = [col for col in train_df.columns if col != Config.LABEL_COLUMN]
839
+
840
+ # 运行性能测试
841
+ benchmark_correlation_methods(train_df, all_features)
842
+ sys.exit(0)
843
+
844
+ print("=" * 60)
845
+ print("开始特征相关性分析和因子聚合")
846
+ print("=" * 60)
847
+
848
+ # 1. 加载数据
849
+ print("\n1. 加载数据...")
850
+ train_df = pd.read_parquet(Config.TRAIN_PATH)
851
+ test_df = pd.read_parquet(Config.TEST_PATH)
852
+ print(f"训练数据形状: {train_df.shape}")
853
+ print(f"测试数据形状: {test_df.shape}")
854
+
855
+ # 2. 特征工程
856
+ print("\n2. 执行特征工程...")
857
+ train_df = feature_engineering(train_df)
858
+ test_df = feature_engineering(test_df)
859
+ print(f"特征工程后训练数据形状: {train_df.shape}")
860
+ print(f"特征工程后测试数据形状: {test_df.shape}")
861
+
862
+ # 2.5 剔除恒定特征
863
+ print("\n2.5 Remove constant features...")
864
+ feature_cols = [col for col in train_df.columns if col != Config.LABEL_COLUMN]
865
+ constant_features = [col for col in feature_cols if train_df[col].std() == 0]
866
+ if constant_features:
867
+ print(f"Remove {len(constant_features)} constant features: {constant_features}")
868
+ train_df = train_df.drop(columns=constant_features)
869
+ test_df = test_df.drop(columns=[col for col in constant_features if col in test_df.columns])
870
+ else:
871
+ print("No constant features found.")
872
+
873
+ # 3. 获取所有特征
874
+ all_features = [col for col in train_df.columns if col != Config.LABEL_COLUMN]
875
+ print(f"\n特征数量: {len(all_features)}")
876
+
877
+ # 4. 计算相关系数矩阵和IC值,并自动IC取正
878
+ print(f"\n3. 计算相关系数矩阵 (阈值: {Config.CORRELATION_THRESHOLD})...")
879
+ corr_matrix, ic_values, feature_groups, train_df, test_df = calculate_correlation_matrix_and_ic(
880
+ train_df, all_features, Config.LABEL_COLUMN, Config.CORRELATION_THRESHOLD, Config.MAX_WORKERS, test_df
881
+ )
882
+
883
+ # 5. 显示基本统计
884
+ print(f"\n4. 基本统计信息:")
885
+ print(f" 相关系数矩阵形状: {corr_matrix.shape}")
886
+ print(f" 平均IC值: {ic_values.mean():.4f}")
887
+ print(f" 最大IC值: {ic_values.max():.4f}")
888
+ print(f" 最小IC值: {ic_values.min():.4f}")
889
+ print(f" IC值标准差: {ic_values.std():.4f}")
890
+
891
+ # 6. 显示特征聚合结果
892
+ print(f"\n5. 特征聚合结果:")
893
+ print(f" 特征组数量: {len(feature_groups)}")
894
+
895
+ single_features = [g for g in feature_groups if len(g['features']) == 1]
896
+ grouped_features = [g for g in feature_groups if len(g['features']) > 1]
897
+
898
+ print(f" 单独特征组: {len(single_features)}")
899
+ print(f" 聚合特征组: {len(grouped_features)}")
900
+
901
+ # 7. 创建聚合特征
902
+ print(f"\n6. 创建聚合特征...")
903
+ train_df_aggregated = create_aggregated_features(train_df, feature_groups, Config.REMOVE_ORIGINAL_FEATURES)
904
+ test_df_aggregated = create_aggregated_features(test_df, feature_groups, Config.REMOVE_ORIGINAL_FEATURES)
905
+
906
+ print(f" 聚合前训练特征数量: {len(all_features)}")
907
+ print(f" 聚合后训练特征数量: {len([col for col in train_df_aggregated.columns if col != Config.LABEL_COLUMN])}")
908
+ print(f" 聚合后测试特征数量: {len([col for col in test_df_aggregated.columns])}")
909
+
910
+ # 8. 保存结果
911
+ if Config.SAVE_RESULTS:
912
+ print(f"\n7. 保存结果...")
913
+ corr_matrix.to_csv('./max_IC_mixed/correlation_matrix.csv')
914
+ ic_values.to_csv('./max_IC_mixed/ic_values.csv')
915
+ train_df_aggregated.to_parquet('./max_IC_mixed/train_aggregated.parquet')
916
+ test_df_aggregated.to_parquet('./max_IC_mixed/test_aggregated.parquet')
917
+ print(" 相关系数矩阵已保存: correlation_matrix.csv")
918
+ print(" 特征IC值已保存: ic_values.csv")
919
+ print(" 聚合后训练数据已保存: train_aggregated.parquet")
920
+ print(" 聚合后测试数据已保存: test_aggregated.parquet")
921
+
922
+ # 9. 显示高IC值特征
923
+ print(f"\n8. Top 10 highest IC features:")
924
+ print(ic_values.abs().sort_values(ascending=False).head(10))
925
+
926
+ # 10. 显示高相关性特征对
927
+ print(f"\n9. Highly correlated feature pairs (|correlation| > {Config.CORRELATION_THRESHOLD}):")
928
+ high_corr_pairs = []
929
+ for i in range(len(corr_matrix.columns)):
930
+ for j in range(i+1, len(corr_matrix.columns)):
931
+ corr_val = corr_matrix.iloc[i, j]
932
+ if abs(corr_val) > Config.CORRELATION_THRESHOLD:
933
+ high_corr_pairs.append((corr_matrix.columns[i], corr_matrix.columns[j], corr_val))
934
+
935
+ for pair in sorted(high_corr_pairs, key=lambda x: abs(x[2]), reverse=True)[:10]:
936
+ print(f" {pair[0]} <-> {pair[1]}: {pair[2]:.4f}")
937
+
938
+ # 11. 生成可视化
939
+ if Config.CREATE_VISUALIZATIONS:
940
+ print(f"\n10. Generate visualization...")
941
+ visualize_correlation_and_ic(corr_matrix, ic_values, feature_groups, Config.SAVE_RESULTS)
942
+
943
+ # 12. 生成报告
944
+ if Config.SAVE_RESULTS:
945
+ print(f"\n11. Generate feature analysis report...")
946
+ create_feature_summary_report(corr_matrix, ic_values, feature_groups)
947
+
948
+ print(f"\n" + "=" * 60)
949
+ print("Feature correlation analysis and factor aggregation completed!")
950
+ print("=" * 60)
ZMJ/analyze.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ from scipy.stats import pearsonr
4
+ import warnings
5
+ from concurrent.futures import ThreadPoolExecutor, as_completed
6
+ import time
7
+ warnings.filterwarnings('ignore')
8
+
9
+ # ===== Configuration =====
10
+ class Config:
11
+ # 数据路径配置
12
+ TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/train.parquet"
13
+ TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/test.parquet"
14
+
15
+ # 如果使用聚合后的数据
16
+ AGGREGATED_TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/train_aggregated.parquet"
17
+ AGGREGATED_TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/test_aggregated.parquet"
18
+
19
+ LABEL_COLUMN = "label"
20
+
21
+ # 性能配置
22
+ MAX_WORKERS = 4 # 并行计算的工作线程数
23
+ USE_AGGREGATED_DATA = True # 是否使用聚合后的数据
24
+
25
+ # 输出配置
26
+ OUTPUT_DIR = "./ic_analysis_results"
27
+ SAVE_DETAILED_RESULTS = True # 是否保存详细结果
28
+
29
+ def fast_ic_calculation(df, features, label_col, max_workers=4):
30
+ """
31
+ 快速计算特征IC值,支持并行计算
32
+
33
+ Parameters:
34
+ -----------
35
+ df : pd.DataFrame
36
+ 数据框
37
+ features : list
38
+ 特征列表
39
+ label_col : str
40
+ 标签列名
41
+ max_workers : int
42
+ 并行计算的工作线程数
43
+
44
+ Returns:
45
+ --------
46
+ ic_values : pd.Series
47
+ 特征IC值
48
+ """
49
+
50
+ print(f"开始计算特征IC值 (特征数量: {len(features)})")
51
+ start_time = time.time()
52
+
53
+ def calculate_ic(feature):
54
+ """计算单个特征的IC值"""
55
+ try:
56
+ ic, p_value = pearsonr(df[feature], df[label_col])
57
+ return feature, ic, p_value
58
+ except Exception as e:
59
+ print(f"计算特征 {feature} 的IC值时出错: {e}")
60
+ return feature, 0.0, 1.0
61
+
62
+ # 并行计算IC值
63
+ ic_dict = {}
64
+ p_value_dict = {}
65
+
66
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
67
+ future_to_feature = {executor.submit(calculate_ic, feature): feature for feature in features}
68
+
69
+ completed = 0
70
+ for future in as_completed(future_to_feature):
71
+ feature, ic, p_value = future.result()
72
+ ic_dict[feature] = ic
73
+ p_value_dict[feature] = p_value
74
+ completed += 1
75
+
76
+ if completed % 50 == 0:
77
+ print(f"IC计算进度: {completed}/{len(features)} ({completed/len(features)*100:.1f}%)")
78
+
79
+ ic_values = pd.Series(ic_dict)
80
+ p_values = pd.Series(p_value_dict)
81
+
82
+ print(f"IC值计算耗时: {time.time() - start_time:.2f}秒")
83
+
84
+ return ic_values, p_values
85
+
86
+ def calculate_feature_statistics(df, features, label_col):
87
+ """
88
+ 计算特征的统计信息
89
+
90
+ Parameters:
91
+ -----------
92
+ df : pd.DataFrame
93
+ 数据框
94
+ features : list
95
+ 特征列表
96
+ label_col : str
97
+ 标签列名
98
+
99
+ Returns:
100
+ --------
101
+ stats_df : pd.DataFrame
102
+ 特征统计信息
103
+ """
104
+
105
+ print("计算特征统计信息...")
106
+ stats_data = []
107
+
108
+ for feature in features:
109
+ try:
110
+ feature_data = df[feature]
111
+ label_data = df[label_col]
112
+
113
+ # 基本统计
114
+ mean_val = feature_data.mean()
115
+ std_val = feature_data.std()
116
+ min_val = feature_data.min()
117
+ max_val = feature_data.max()
118
+
119
+ # 缺失值统计
120
+ missing_count = feature_data.isna().sum()
121
+ missing_ratio = missing_count / len(feature_data)
122
+
123
+ # 零值统计
124
+ zero_count = (feature_data == 0).sum()
125
+ zero_ratio = zero_count / len(feature_data)
126
+
127
+ # 异常值统计(超过3个标准差)
128
+ outlier_count = ((feature_data - mean_val).abs() > 3 * std_val).sum()
129
+ outlier_ratio = outlier_count / len(feature_data)
130
+
131
+ stats_data.append({
132
+ 'feature': feature,
133
+ 'mean': mean_val,
134
+ 'std': std_val,
135
+ 'min': min_val,
136
+ 'max': max_val,
137
+ 'missing_count': missing_count,
138
+ 'missing_ratio': missing_ratio,
139
+ 'zero_count': zero_count,
140
+ 'zero_ratio': zero_ratio,
141
+ 'outlier_count': outlier_count,
142
+ 'outlier_ratio': outlier_ratio
143
+ })
144
+
145
+ except Exception as e:
146
+ print(f"计算特征 {feature} 统计信息时出错: {e}")
147
+ stats_data.append({
148
+ 'feature': feature,
149
+ 'mean': np.nan,
150
+ 'std': np.nan,
151
+ 'min': np.nan,
152
+ 'max': np.nan,
153
+ 'missing_count': np.nan,
154
+ 'missing_ratio': np.nan,
155
+ 'zero_count': np.nan,
156
+ 'zero_ratio': np.nan,
157
+ 'outlier_count': np.nan,
158
+ 'outlier_ratio': np.nan
159
+ })
160
+
161
+ return pd.DataFrame(stats_data)
162
+
163
+ def create_ic_analysis_report(ic_values, p_values, stats_df, output_dir):
164
+ """
165
+ 创建IC分析报告
166
+
167
+ Parameters:
168
+ -----------
169
+ ic_values : pd.Series
170
+ IC值
171
+ p_values : pd.Series
172
+ P值
173
+ stats_df : pd.DataFrame
174
+ 统计信息
175
+ output_dir : str
176
+ 输出目录
177
+ """
178
+
179
+ print("创建IC分析报告...")
180
+
181
+ # 创建输出目录
182
+ import os
183
+ os.makedirs(output_dir, exist_ok=True)
184
+
185
+ # 1. 合并所有信息
186
+ report_df = pd.DataFrame({
187
+ 'feature': ic_values.index,
188
+ 'ic_value': ic_values.values,
189
+ 'ic_abs': ic_values.abs().values,
190
+ 'p_value': p_values.values,
191
+ 'is_significant': p_values < 0.05
192
+ })
193
+
194
+ # 添加统计信息
195
+ report_df = report_df.merge(stats_df, on='feature', how='left')
196
+
197
+ # 2. 按IC绝对值排序
198
+ report_df = report_df.sort_values('ic_abs', ascending=False)
199
+
200
+ # 3. 添加排名
201
+ report_df['ic_rank'] = report_df['ic_abs'].rank(ascending=False, method='min')
202
+
203
+ # 4. 保存详细报告
204
+ if Config.SAVE_DETAILED_RESULTS:
205
+ detailed_path = os.path.join(output_dir, 'detailed_ic_analysis.csv')
206
+ report_df.to_csv(detailed_path, index=False)
207
+ print(f"详细IC分析报告已保存: {detailed_path}")
208
+
209
+ # 5. 保存简化报告(只包含重要信息)
210
+ simple_df = report_df[['feature', 'ic_value', 'ic_abs', 'ic_rank', 'p_value', 'is_significant']].copy()
211
+ simple_path = os.path.join(output_dir, 'ic_analysis_summary.csv')
212
+ simple_df.to_csv(simple_path, index=False)
213
+ print(f"IC分析摘要已保存: {simple_path}")
214
+
215
+ # 6. 保存统计信息
216
+ stats_path = os.path.join(output_dir, 'feature_statistics.csv')
217
+ stats_df.to_csv(stats_path, index=False)
218
+ print(f"特征统计信息已保存: {stats_path}")
219
+
220
+ # 7. 打印摘要信息
221
+ print("\n" + "="*60)
222
+ print("IC分析摘要")
223
+ print("="*60)
224
+ print(f"总特征数量: {len(ic_values)}")
225
+ print(f"平均IC值: {ic_values.mean():.4f}")
226
+ print(f"IC值标准差: {ic_values.std():.4f}")
227
+ print(f"最大IC值: {ic_values.max():.4f}")
228
+ print(f"最小IC值: {ic_values.min():.4f}")
229
+ print(f"显著特征数量 (p < 0.05): {(p_values < 0.05).sum()}")
230
+ print(f"正IC值特征数量: {(ic_values > 0).sum()}")
231
+ print(f"负IC值特征数量: {(ic_values < 0).sum()}")
232
+
233
+ print(f"\nTop 10 最高IC值特征:")
234
+ top_10 = report_df.head(10)
235
+ for _, row in top_10.iterrows():
236
+ significance = "***" if row['is_significant'] else ""
237
+ print(f" {row['ic_rank']:2.0f}. {row['feature']:20s} IC={row['ic_value']:6.4f} (p={row['p_value']:.4f}) {significance}")
238
+
239
+ print(f"\nBottom 10 最低IC值特征:")
240
+ bottom_10 = report_df.tail(10)
241
+ for _, row in bottom_10.iterrows():
242
+ significance = "***" if row['is_significant'] else ""
243
+ print(f" {row['ic_rank']:2.0f}. {row['feature']:20s} IC={row['ic_value']:6.4f} (p={row['p_value']:.4f}) {significance}")
244
+
245
+ return report_df
246
+
247
+ def main():
248
+ """主函数"""
249
+ print("="*60)
250
+ print("开始IC值分析")
251
+ print("="*60)
252
+
253
+ # 1. 加载数据
254
+ print("\n1. 加载数据...")
255
+ if Config.USE_AGGREGATED_DATA:
256
+ try:
257
+ train_df = pd.read_parquet(Config.AGGREGATED_TRAIN_PATH)
258
+ print(f"使用聚合后的训练数据: {train_df.shape}")
259
+ except FileNotFoundError:
260
+ print("聚合数据文件不存在,使用原始数据...")
261
+ train_df = pd.read_parquet(Config.TRAIN_PATH)
262
+ print(f"使用原始训练数据: {train_df.shape}")
263
+ else:
264
+ train_df = pd.read_parquet(Config.TRAIN_PATH)
265
+ print(f"使用原始训练数据: {train_df.shape}")
266
+
267
+ # 2. 获取特征列表
268
+ print("\n2. 获取特征列表...")
269
+ features = [col for col in train_df.columns if col != Config.LABEL_COLUMN]
270
+ print(f"特征数量: {len(features)}")
271
+
272
+ # 3. 数据预处理
273
+ print("\n3. 数据预处理...")
274
+ # 处理缺失值
275
+ for col in features + [Config.LABEL_COLUMN]:
276
+ if train_df[col].isna().any():
277
+ median_val = train_df[col].median()
278
+ train_df[col] = train_df[col].fillna(median_val if not pd.isna(median_val) else 0)
279
+
280
+ # 处理无穷值
281
+ train_df = train_df.replace([np.inf, -np.inf], np.nan)
282
+ for col in features + [Config.LABEL_COLUMN]:
283
+ if train_df[col].isna().any():
284
+ median_val = train_df[col].median()
285
+ train_df[col] = train_df[col].fillna(median_val if not pd.isna(median_val) else 0)
286
+
287
+ print(f"预处理后数据形状: {train_df.shape}")
288
+
289
+ # 4. 计算IC值
290
+ print("\n4. 计算IC值...")
291
+ ic_values, p_values = fast_ic_calculation(train_df, features, Config.LABEL_COLUMN, Config.MAX_WORKERS)
292
+
293
+ # 5. 计算特征统计信息
294
+ print("\n5. 计算特征统计信息...")
295
+ stats_df = calculate_feature_statistics(train_df, features, Config.LABEL_COLUMN)
296
+
297
+ # 6. 创建分析报告
298
+ print("\n6. 创建分析报告...")
299
+ report_df = create_ic_analysis_report(ic_values, p_values, stats_df, Config.OUTPUT_DIR)
300
+
301
+ # 7. 保存原始IC值
302
+ print("\n7. 保存原始IC值...")
303
+ ic_df = pd.DataFrame({
304
+ 'feature': ic_values.index,
305
+ 'ic_value': ic_values.values,
306
+ 'p_value': p_values.values
307
+ })
308
+ ic_path = f"{Config.OUTPUT_DIR}/ic_values.csv"
309
+ ic_df.to_csv(ic_path, index=False)
310
+ print(f"IC值已保存: {ic_path}")
311
+
312
+ print("\n" + "="*60)
313
+ print("IC值分析完成!")
314
+ print("="*60)
315
+ print(f"所有结果已保存到目录: {Config.OUTPUT_DIR}")
316
+ print("生成的文件:")
317
+ print("- ic_values.csv: 原始IC值")
318
+ print("- ic_analysis_summary.csv: IC分析摘要")
319
+ print("- detailed_ic_analysis.csv: 详细IC分析报告")
320
+ print("- feature_statistics.csv: 特征统计信息")
321
+
322
+ if __name__ == "__main__":
323
+ main()
ZMJ/data_processed/correlation_matrix.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ec7569d99f29e69f69f43f36ffd87ec5a807ce6b9c6b170def77a1257561bb4
3
+ size 16050677
ZMJ/data_processed/feature_analysis.png ADDED

Git LFS Details

  • SHA256: 86dbba6ef56122dfea0b8667305e8aa3101e103c060de860d61b4c028afd2456
  • Pointer size: 131 Bytes
  • Size of remote file: 801 kB
ZMJ/data_processed/feature_analysis_report.txt ADDED
@@ -0,0 +1,576 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ============================================================
2
+ 特征分析报告
3
+ ============================================================
4
+
5
+ 1. 基本统计信息:
6
+ 总特征数量: 903
7
+ 平均IC值: 0.0181
8
+ IC值标准差: 0.0139
9
+ 最大IC值: 0.0694
10
+ 最小IC值: 0.0000
11
+ 正IC值特征数量: 876
12
+ 负IC值特征数量: 0
13
+
14
+ 2. 高相关性分析 (|相关系数| > 0.8):
15
+ 高相关特征对数量: 2283
16
+ 相关系数矩阵密度: 0.0056
17
+
18
+ 3. 特征聚合结果:
19
+ 特征组数量: 364
20
+ 单独特征组: 184
21
+ 聚合特征组: 180
22
+ 平均聚合组大小: 3.99
23
+
24
+ 4. 前10个最高IC值特征:
25
+ 1. X21 |IC|=0.0694 (IC=0.0694)
26
+ 2. X20 |IC|=0.0677 (IC=0.0677)
27
+ 3. X28 |IC|=0.0641 (IC=0.0641)
28
+ 4. X863 |IC|=0.0641 (IC=0.0641)
29
+ 5. X29 |IC|=0.0623 (IC=0.0623)
30
+ 6. X19 |IC|=0.0623 (IC=0.0623)
31
+ 7. X27 |IC|=0.0623 (IC=0.0623)
32
+ 8. X22 |IC|=0.0577 (IC=0.0577)
33
+ 9. X858 |IC|=0.0573 (IC=0.0573)
34
+ 10. X219 |IC|=0.0567 (IC=0.0567)
35
+
36
+ 5. 特征聚合详情:
37
+ 组 1: X21 (IC=0.0643)
38
+ 包含特征: X21, X19, X20, X22
39
+ 权重: ['0.270', '0.242', '0.263', '0.224']
40
+ 组 2: X28 (IC=0.0629)
41
+ 包含特征: X28, X27, X29
42
+ 权重: ['0.340', '0.330', '0.330']
43
+ 组 3: X219 (IC=0.0493)
44
+ 包含特征: X219, X217, X218, X225, X226
45
+ 权重: ['0.230', '0.187', '0.218', '0.183', '0.183']
46
+ 组 4: X531 (IC=0.0484)
47
+ 包含特征: X531, X524, X538
48
+ 权重: ['0.387', '0.331', '0.282']
49
+ 组 5: X287 (IC=0.0470)
50
+ 包含特征: X287, X264, X280, X281, X282, X283, X284, X285, X286, X288, X289, X290, X291, X292, X293, X294, X295, X296, X297, X432, X435, X438, X868
51
+ 权重: ['0.052', '0.027', '0.037', '0.044', '0.038', '0.046', '0.041', '0.049', '0.045', '0.045', '0.052', '0.045', '0.051', '0.048', '0.051', '0.047', '0.051', '0.047', '0.049', '0.035', '0.034', '0.031', '0.035']
52
+ 组 6: X298 (IC=0.0505)
53
+ 包含特征: X298, X272, X299, X300, X301, X302, X303
54
+ 权重: ['0.150', '0.120', '0.148', '0.149', '0.145', '0.146', '0.141']
55
+ 组 7: X30 (IC=0.0409)
56
+ 包含特征: X30, X31, X32
57
+ 权重: ['0.404', '0.333', '0.263']
58
+ 组 8: X465 (IC=0.0426)
59
+ 包含特征: X465, X444, X464, X466, X471, X472, X473
60
+ 权重: ['0.166', '0.132', '0.135', '0.165', '0.124', '0.142', '0.135']
61
+ 组 9: X181 (IC=0.0398)
62
+ 包含特征: X181, X95, X131, X137, X139, X169, X173, X175, X179
63
+ 权重: ['0.137', '0.120', '0.113', '0.124', '0.085', '0.107', '0.089', '0.128', '0.096']
64
+ 组 10: X861 (IC=0.0314)
65
+ 包含特征: X861, X525, X532, X539
66
+ 权重: ['0.362', '0.158', '0.244', '0.236']
67
+ 组 11: X23 (IC=0.0394)
68
+ 包含特征: X23, X24
69
+ 权重: ['0.571', '0.429']
70
+ 组 12: X198 (IC=0.0354)
71
+ 包含特征: X198, X196, X197, X204, X205, X211, X212
72
+ 权重: ['0.173', '0.139', '0.162', '0.143', '0.149', '0.116', '0.117']
73
+ 组 13: X277 (IC=0.0389)
74
+ 包含特征: X277, X276, X278, X279
75
+ 权重: ['0.271', '0.212', '0.260', '0.257']
76
+ 组 14: X580 (IC=0.0375)
77
+ 包含特征: X580, X573, X587
78
+ 权重: ['0.371', '0.309', '0.320']
79
+ 组 15: X224 (IC=0.0331)
80
+ 包含特征: X224, X223
81
+ 权重: ['0.629', '0.371']
82
+ 组 16: X758 (IC=0.0389)
83
+ 包含特征: X758, X750, X754
84
+ 权重: ['0.355', '0.293', '0.353']
85
+ 组 17: X612 (IC=0.0342)
86
+ 包含特征: X612, X610, X611
87
+ 权重: ['0.395', '0.270', '0.335']
88
+ 组 18: X89 (IC=0.0315)
89
+ 包含特征: X89, X83, X125, X167, X336, X728
90
+ 权重: ['0.215', '0.189', '0.173', '0.135', '0.164', '0.123']
91
+ 组 19: X269 (IC=0.0377)
92
+ 包含特征: X269, X268, X270, X271
93
+ 权重: ['0.263', '0.225', '0.257', '0.255']
94
+ 组 20: X731 (IC=0.0318)
95
+ 包含特征: X731, X727, X729, X730
96
+ 权重: ['0.309', '0.212', '0.271', '0.208']
97
+ 组 21: X445 (IC=0.0304)
98
+ 包含特征: X445, X443, X450, X451, X452, X457, X458, X459
99
+ 权重: ['0.158', '0.130', '0.118', '0.145', '0.141', '0.089', '0.111', '0.107']
100
+ 组 22: X373 (IC=0.0380)
101
+ 包含特征: X373, X367, X379, X385
102
+ 权重: ['0.251', '0.248', '0.250', '0.251']
103
+ 组 23: X504 (IC=0.0371)
104
+ 包含特征: X504, X511
105
+ 权重: ['0.504', '0.496']
106
+ 组 24: X540 (IC=0.0373)
107
+ 包含特征: X540, X533
108
+ 权重: ['0.502', '0.498']
109
+ 组 25: X186 (IC=0.0346)
110
+ 包含特征: X186, X183, X189
111
+ 权重: ['0.359', '0.305', '0.335']
112
+ 组 26: X361 (IC=0.0365)
113
+ 包含特征: X361, X355
114
+ 权重: ['0.509', '0.491']
115
+ 组 27: X331 (IC=0.0326)
116
+ 包含特征: X331, X325, X337, X343
117
+ 权重: ['0.279', '0.263', '0.246', '0.213']
118
+ 组 28: X517 (IC=0.0341)
119
+ 包含特征: X517, X510
120
+ 权重: ['0.530', '0.470']
121
+ 组 29: X814 (IC=0.0322)
122
+ 包含特征: X814, X806, X810
123
+ 权重: ['0.370', '0.283', '0.347']
124
+ 组 30: X519 (IC=0.0328)
125
+ 包含特征: X519, X512
126
+ 权重: ['0.526', '0.474']
127
+ 组 31: X266 (IC=0.0327)
128
+ 包含特征: X266, X265, X267
129
+ 权重: ['0.348', '0.314', '0.338']
130
+ 组 32: X588 (IC=0.0330)
131
+ 包含特征: X588, X581
132
+ 权重: ['0.515', '0.485']
133
+ 组 33: X44 (IC=0.0295)
134
+ 包含特征: X44, X38, X39, X40, X41, X42, X43, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54
135
+ 权重: ['0.066', '0.056', '0.055', '0.065', '0.057', '0.065', '0.059', '0.056', '0.066', '0.056', '0.065', '0.056', '0.064', '0.050', '0.057', '0.050', '0.056']
136
+ 组 34: X111 (IC=0.0321)
137
+ 包含特征: X111, X105, X117, X153
138
+ 权重: ['0.258', '0.238', '0.248', '0.257']
139
+ 组 35: X77 (IC=0.0239)
140
+ 包含特征: X77, X65, X71, X113, X119, X161
141
+ 权重: ['0.230', '0.176', '0.213', '0.110', '0.152', '0.119']
142
+ 组 36: X238 (IC=0.0260)
143
+ 包含特征: X238, X239, X245, X246, X485, X492
144
+ 权重: ['0.211', '0.197', '0.206', '0.202', '0.092', '0.092']
145
+ 组 37: X773 (IC=0.0244)
146
+ 包含特征: X773, X765, X768, X769, X772, X776, X777, X859
147
+ 权重: ['0.167', '0.118', '0.122', '0.150', '0.134', '0.082', '0.124', '0.103']
148
+ 组 38: X384 (IC=0.0276)
149
+ 包含特征: X384, X342, X372, X378, X420, X426
150
+ 权重: ['0.194', '0.180', '0.151', '0.181', '0.133', '0.161']
151
+ 组 39: X319 (IC=0.0314)
152
+ 包含特征: X319, X313
153
+ 权重: ['0.510', '0.490']
154
+ 组 40: X690 (IC=0.0252)
155
+ 包含特征: X690, X678, X684, X696
156
+ 权重: ['0.316', '0.183', '0.185', '0.316']
157
+ 组 41: X203 (IC=0.0216)
158
+ 包含特征: X203, X195, X202, X209, X210
159
+ 权重: ['0.292', '0.168', '0.159', '0.136', '0.244']
160
+ 组 42: X147 (IC=0.0281)
161
+ 包含特征: X147, X141, X159
162
+ 权重: ['0.374', '0.274', '0.352']
163
+ 组 43: X604 (IC=0.0308)
164
+ 包含特征: X604, X603, X605
165
+ 权重: ['0.340', '0.323', '0.337']
166
+ 组 44: X94 (IC=0.0221)
167
+ 包含特征: X94, X82, X88, X130, X136, X178
168
+ 权重: ['0.236', '0.155', '0.199', '0.145', '0.168', '0.097']
169
+ 组 45: X133 (IC=0.0297)
170
+ 包含特征: X133, X127
171
+ 权重: ['0.519', '0.481']
172
+ 组 46: X274 (IC=0.0300)
173
+ 包含特征: X274, X273, X275
174
+ 权重: ['0.338', '0.331', '0.331']
175
+ 组 47: X163 (IC=0.0270)
176
+ 包含特征: X163, X121, X151, X157
177
+ 权重: ['0.280', '0.259', '0.214', '0.247']
178
+ 组 48: X431 (IC=0.0251)
179
+ 包含特征: X431, X430, X433, X434, X436, X437
180
+ 权重: ['0.200', '0.146', '0.162', '0.185', '0.153', '0.155']
181
+ 组 49: X842 (IC=0.0211)
182
+ 包含特征: X842, X834, X838
183
+ 权重: ['0.451', '0.186', '0.363']
184
+ 组 50: X553 (IC=0.0274)
185
+ 包含特征: X553, X560
186
+ 权重: ['0.513', '0.487']
187
+ 组 51: X330 (IC=0.0250)
188
+ 包含特征: X330, X324
189
+ 权重: ['0.557', '0.443']
190
+ 组 52: X233 (IC=0.0268)
191
+ 包含特征: X233, X232
192
+ 权重: ['0.510', '0.490']
193
+ 组 53: X829 (IC=0.0213)
194
+ 包含特征: X829, X821, X824, X825, X828, X832, X833
195
+ 权重: ['0.183', '0.116', '0.136', '0.152', '0.156', '0.107', '0.151']
196
+ 组 54: X880 (IC=0.0230)
197
+ 包含特征: X880, X878, X879, X881
198
+ 权重: ['0.297', '0.203', '0.266', '0.234']
199
+ 组 55: X56 (IC=0.0257)
200
+ 包含特征: X56, X55
201
+ 权重: ['0.527', '0.473']
202
+ 组 56: X187 (IC=0.0231)
203
+ 包含特征: X187, X33, X34, X35, X184, X185, X188, X190, X191
204
+ 权重: ['0.129', '0.084', '0.115', '0.093', '0.128', '0.099', '0.109', '0.126', '0.117']
205
+ 组 57: X566 (IC=0.0255)
206
+ 包含特征: X566, X559
207
+ 权重: ['0.522', '0.478']
208
+ 组 58: X115 (IC=0.0242)
209
+ 包含特征: X115, X109
210
+ 权重: ['0.542', '0.458']
211
+ 组 59: X237 (IC=0.0192)
212
+ 包含特征: X237, X236, X243, X244, X484, X491
213
+ 权重: ['0.222', '0.175', '0.158', '0.203', '0.126', '0.117']
214
+ 组 60: X36 (IC=0.0242)
215
+ 包含特征: X36, X37
216
+ 权重: ['0.526', '0.474']
217
+ 组 61: X579 (IC=0.0201)
218
+ 包含特征: X579, X572, X586
219
+ 权重: ['0.412', '0.373', '0.215']
220
+ 组 62: X123 (IC=0.0162)
221
+ 包含特征: X123, X129, X135, X165, X171
222
+ 权重: ['0.306', '0.202', '0.094', '0.244', '0.154']
223
+ 组 63: X506 (IC=0.0237)
224
+ 包含特征: X506, X508
225
+ 权重: ['0.519', '0.481']
226
+ 组 64: X247 (IC=0.0151)
227
+ 包含特征: X247, X240, X487, X494
228
+ 权重: ['0.400', '0.365', '0.109', '0.126']
229
+ 组 65: X686 (IC=0.0179)
230
+ 包含特征: X686, X662, X668, X674, X680, X683, X692, X695
231
+ 权重: ['0.169', '0.097', '0.063', '0.148', '0.105', '0.145', '0.123', '0.149']
232
+ 组 66: X602 (IC=0.0203)
233
+ 包含特征: X602, X600, X601
234
+ 权重: ['0.394', '0.279', '0.326']
235
+ 组 67: X216 (IC=0.0157)
236
+ 包含特征: X216, X215, X222
237
+ 权重: ['0.508', '0.229', '0.263']
238
+ 组 68: X79 (IC=0.0215)
239
+ 包含特征: X79, X67, X73, X85
240
+ 权重: ['0.273', '0.233', '0.266', '0.228']
241
+ 组 69: X479 (IC=0.0217)
242
+ 包含特征: X479, X478, X480
243
+ 权重: ['0.360', '0.302', '0.337']
244
+ 组 70: X63 (IC=0.0206)
245
+ 包含特征: X63, X57, X69, X75
246
+ 权重: ['0.276', '0.221', '0.271', '0.232']
247
+ 组 71: X865 (IC=0.0213)
248
+ 包含特征: X865, X17, X25
249
+ 权重: ['0.350', '0.302', '0.349']
250
+ 组 72: X231 (IC=0.0178)
251
+ 包含特征: X231, X230
252
+ 权重: ['0.625', '0.375']
253
+ 组 73: X609 (IC=0.0207)
254
+ 包含特征: X609, X607, X608
255
+ 权重: ['0.356', '0.309', '0.335']
256
+ 组 74: X513 (IC=0.0217)
257
+ 包含特征: X513, X515
258
+ 权重: ['0.503', '0.497']
259
+ 组 75: X653 (IC=0.0181)
260
+ 包含特征: X653, X641, X659, X665
261
+ 权重: ['0.284', '0.276', '0.230', '0.210']
262
+ 组 76: X10 (IC=0.0198)
263
+ 包含特征: X10, X11
264
+ 权重: ['0.513', '0.487']
265
+ 组 77: X366 (IC=0.0160)
266
+ 包含特征: X366, X354, X360, X408
267
+ 权重: ['0.317', '0.224', '0.279', '0.179']
268
+ 组 78: X318 (IC=0.0189)
269
+ 包含特征: X318, X312
270
+ 权重: ['0.523', '0.477']
271
+ 组 79: X428 (IC=0.0140)
272
+ 包含特征: X428, X380, X386, X416, X422
273
+ 权重: ['0.276', '0.198', '0.128', '0.158', '0.240']
274
+ 组 80: X786 (IC=0.0131)
275
+ 包含特征: X786, X778, X782
276
+ 权重: ['0.489', '0.146', '0.365']
277
+ 组 81: X14 (IC=0.0142)
278
+ 包含特征: X14, X9, X12, X13, X15, X16
279
+ 权重: ['0.226', '0.003', '0.195', '0.186', '0.212', '0.177']
280
+ 组 82: X394 (IC=0.0172)
281
+ 包含特征: X394, X388, X400, X406
282
+ 权重: ['0.279', '0.226', '0.268', '0.227']
283
+ 组 83: X764 (IC=0.0147)
284
+ 包含特征: X764, X760, X761
285
+ 权重: ['0.428', '0.262', '0.310']
286
+ 组 84: X677 (IC=0.0176)
287
+ 包含特征: X677, X671, X689
288
+ 权重: ['0.357', '0.288', '0.356']
289
+ 组 85: X352 (IC=0.0157)
290
+ 包含特征: X352, X346, X358, X364
291
+ 权重: ['0.282', '0.264', '0.252', '0.202']
292
+ 组 86: X499 (IC=0.0156)
293
+ 包含特征: X499, X501
294
+ 权重: ['0.565', '0.435']
295
+ 组 87: X726 (IC=0.0132)
296
+ 包含特征: X726, X724, X725
297
+ 权重: ['0.437', '0.192', '0.371']
298
+ 组 88: X145 (IC=0.0158)
299
+ 包含特征: X145, X103
300
+ 权重: ['0.548', '0.452']
301
+ 组 89: X362 (IC=0.0132)
302
+ 包含特征: X362, X356, X368, X398, X404, X410
303
+ 权重: ['0.216', '0.209', '0.201', '0.133', '0.128', '0.113']
304
+ 组 90: X340 (IC=0.0147)
305
+ 包含特征: X340, X328, X334
306
+ 权重: ['0.385', '0.260', '0.355']
307
+ 组 91: X418 (IC=0.0163)
308
+ 包含特征: X418, X412, X424
309
+ 权重: ['0.346', '0.322', '0.332']
310
+ 组 92: X470 (IC=0.0107)
311
+ 包含特征: X470, X442, X449, X456, X463, X469, X476, X477
312
+ 权重: ['0.196', '0.166', '0.162', '0.120', '0.150', '0.090', '0.029', '0.086']
313
+ 组 93: X382 (IC=0.0154)
314
+ 包含特征: X382, X370, X376
315
+ 权重: ['0.360', '0.293', '0.346']
316
+ 组 94: X882 (IC=0.0149)
317
+ 包含特征: X882, X883
318
+ 权重: ['0.557', '0.443']
319
+ 组 95: X124 (IC=0.0106)
320
+ 包含特征: X124, X118, X166, X172
321
+ 权重: ['0.384', '0.192', '0.187', '0.236']
322
+ 组 96: X310 (IC=0.0138)
323
+ 包含特征: X310, X304, X316, X322
324
+ 权重: ['0.296', '0.274', '0.250', '0.180']
325
+ 组 97: X650 (IC=0.0133)
326
+ 包含特征: X650, X638, X656
327
+ 权重: ['0.406', '0.384', '0.209']
328
+ 组 98: X820 (IC=0.0129)
329
+ 包含特征: X820, X816, X817
330
+ 权重: ['0.417', '0.288', '0.295']
331
+ 组 99: X81 (IC=0.0134)
332
+ 包含特征: X81, X87, X93
333
+ 权重: ['0.396', '0.342', '0.262']
334
+ 组 100: X162 (IC=0.0154)
335
+ 包含特征: X162, X150, X156, X168
336
+ 权重: ['0.257', '0.250', '0.253', '0.240']
337
+ 组 101: X2 (IC=0.0115)
338
+ 包含特征: X2, X3, X4
339
+ 权重: ['0.456', '0.355', '0.189']
340
+ 组 102: X746 (IC=0.0079)
341
+ 包含特征: X746, X738, X742
342
+ 权重: ['0.662', '0.055', '0.283']
343
+ 组 103: X688 (IC=0.0146)
344
+ 包含特征: X688, X664, X676, X682, X694
345
+ 权重: ['0.214', '0.170', '0.211', '0.198', '0.206']
346
+ 组 104: X565 (IC=0.0114)
347
+ 包含特征: X565, X558
348
+ 权重: ['0.683', '0.317']
349
+ 组 105: X629 (IC=0.0123)
350
+ 包含特征: X629, X617
351
+ 权重: ['0.628', '0.372']
352
+ 组 106: X520 (IC=0.0139)
353
+ 包含特征: X520, X522
354
+ 权重: ['0.552', '0.448']
355
+ 组 107: X781 (IC=0.0083)
356
+ 包含特征: X781, X780, X784, X785
357
+ 权重: ['0.461', '0.223', '0.069', '0.248']
358
+ 组 108: X837 (IC=0.0094)
359
+ 包含特征: X837, X836, X840, X841
360
+ 权重: ['0.404', '0.218', '0.072', '0.306']
361
+ 组 109: X802 (IC=0.0096)
362
+ 包含特征: X802, X794, X798
363
+ 权重: ['0.518', '0.180', '0.303']
364
+ 组 110: X235 (IC=0.0118)
365
+ 包含特征: X235, X242, X482, X489
366
+ 权重: ['0.316', '0.285', '0.200', '0.198']
367
+ 组 111: X649 (IC=0.0135)
368
+ 包含特征: X649, X625, X637, X643, X655, X661, X667
369
+ 权重: ['0.156', '0.122', '0.152', '0.143', '0.145', '0.148', '0.134']
370
+ 组 112: X427 (IC=0.0109)
371
+ 包含特征: X427, X415, X421
372
+ 权重: ['0.421', '0.253', '0.326']
373
+ 组 113: X673 (IC=0.0122)
374
+ 包含特征: X673, X679, X685, X691
375
+ 权重: ['0.281', '0.258', '0.236', '0.225']
376
+ 组 114: X60 (IC=0.0112)
377
+ 包含特征: X60, X66
378
+ 权重: ['0.594', '0.406']
379
+ 组 115: X110 (IC=0.0113)
380
+ 包含特征: X110, X62, X68, X74, X80, X104, X116, X122, X146, X152, X158, X164, X309, X315, X321, X351, X357, X363, X393, X399, X405
381
+ 权重: ['0.056', '0.054', '0.056', '0.051', '0.034', '0.054', '0.051', '0.034', '0.054', '0.056', '0.051', '0.034', '0.052', '0.048', '0.039', '0.052', '0.048', '0.039', '0.052', '0.048', '0.039']
382
+ 组 116: X91 (IC=0.0085)
383
+ 包含特征: X91, X97
384
+ 权重: ['0.777', '0.223']
385
+ 组 117: X262 (IC=0.0092)
386
+ 包含特征: X262, X256, X260, X261, X263
387
+ 权重: ['0.288', '0.017', '0.148', '0.278', '0.269']
388
+ 组 118: X890 (IC=0.0061)
389
+ 包含特征: X890, X888, X889
390
+ 权重: ['0.714', '0.041', '0.245']
391
+ 组 119: X350 (IC=0.0108)
392
+ 包含特征: X350, X392
393
+ 权重: ['0.602', '0.398']
394
+ 组 120: X766 (IC=0.0103)
395
+ 包含特征: X766, X762, X770, X774
396
+ 权重: ['0.306', '0.264', '0.272', '0.158']
397
+ 组 121: X483 (IC=0.0119)
398
+ 包含特征: X483, X490
399
+ 权重: ['0.514', '0.486']
400
+ 组 122: X155 (IC=0.0094)
401
+ 包含特征: X155, X107, X149
402
+ 权重: ['0.430', '0.342', '0.228']
403
+ 组 123: X460 (IC=0.0048)
404
+ 包含特征: X460, X439, X461, X467
405
+ 权重: ['0.615', '0.035', '0.319', '0.031']
406
+ 组 124: X853 (IC=0.0089)
407
+ 包含特征: X853, X854
408
+ 权重: ['0.656', '0.344']
409
+ 组 125: X353 (IC=0.0085)
410
+ 包含特征: X353, X311, X317, X347, X359, X365, X395, X401
411
+ 权重: ['0.172', '0.152', '0.157', '0.149', '0.165', '0.121', '0.048', '0.037']
412
+ 组 126: X102 (IC=0.0104)
413
+ 包含特征: X102, X108
414
+ 权重: ['0.558', '0.442']
415
+ 组 127: X345 (IC=0.0068)
416
+ 包含特征: X345, X92, X98, X134, X140, X176, X182, X333, X339, X375, X381, X387, X417, X423, X429
417
+ 权重: ['0.111', '0.053', '0.099', '0.053', '0.099', '0.053', '0.099', '0.010', '0.059', '0.010', '0.059', '0.111', '0.010', '0.059', '0.111']
418
+ 组 128: X90 (IC=0.0074)
419
+ 包含特征: X90, X78, X84, X96
420
+ 权重: ['0.384', '0.032', '0.289', '0.295']
421
+ 组 129: X76 (IC=0.0090)
422
+ 包含特征: X76, X64, X70, X112, X160
423
+ 权重: ['0.254', '0.230', '0.231', '0.138', '0.148']
424
+ 组 130: X670 (IC=0.0097)
425
+ 包含特征: X670, X652, X658
426
+ 权重: ['0.388', '0.320', '0.291']
427
+ 组 131: X257 (IC=0.0092)
428
+ 包含特征: X257, X258
429
+ 权重: ['0.610', '0.390']
430
+ 组 132: sell_qty (IC=0.0095)
431
+ 包含特征: sell_qty, buy_qty, volume, X594, X596
432
+ 权重: ['0.235', '0.118', '0.185', '0.232', '0.230']
433
+ 组 133: X326 (IC=0.0094)
434
+ 包含特征: X326, X314, X320, X332
435
+ 权重: ['0.293', '0.196', '0.251', '0.260']
436
+ 组 134: X493 (IC=0.0109)
437
+ 包含特征: X493, X486
438
+ 权重: ['0.505', '0.495']
439
+ 组 135: X631 (IC=0.0080)
440
+ 包含特征: X631, X613, X619
441
+ 权重: ['0.454', '0.285', '0.261']
442
+ 组 136: X174 (IC=0.0079)
443
+ 包含特征: X174, X180
444
+ 权重: ['0.673', '0.327']
445
+ 组 137: X741 (IC=0.0091)
446
+ 包含特征: X741, X736, X737, X740, X744, X745
447
+ 权重: ['0.194', '0.143', '0.163', '0.174', '0.151', '0.174']
448
+ 组 138: X845 (IC=0.0044)
449
+ 包含特征: X845, X214, X722, X723
450
+ 权重: ['0.548', '0.121', '0.024', '0.308']
451
+ 组 139: X718 (IC=0.0060)
452
+ 包含特征: X718, X719, X720, X721
453
+ 权重: ['0.394', '0.317', '0.212', '0.078']
454
+ 组 140: X516 (IC=0.0082)
455
+ 包含特征: X516, X509
456
+ 权重: ['0.573', '0.427']
457
+ 组 141: X448 (IC=0.0057)
458
+ 包含特征: X448, X440, X441, X447, X454, X455, X468, X475
459
+ 权重: ['0.205', '0.116', '0.186', '0.151', '0.114', '0.154', '0.074', '0.001']
460
+ 组 142: X344 (IC=0.0085)
461
+ 包含特征: X344, X338
462
+ 权重: ['0.536', '0.464']
463
+ 组 143: X687 (IC=0.0050)
464
+ 包含特征: X687, X675, X681, X693
465
+ 权重: ['0.444', '0.126', '0.060', '0.370']
466
+ 组 144: X402 (IC=0.0066)
467
+ 包含特征: X402, X396
468
+ 权重: ['0.671', '0.329']
469
+ 组 145: X323 (IC=0.0048)
470
+ 包含特征: X323, X329
471
+ 权重: ['0.904', '0.096']
472
+ 组 146: X248 (IC=0.0079)
473
+ 包含特征: X248, X253, X254, X255
474
+ 权重: ['0.274', '0.216', '0.260', '0.251']
475
+ 组 147: X523 (IC=0.0064)
476
+ 包含特征: X523, X530
477
+ 权重: ['0.673', '0.327']
478
+ 组 148: X801 (IC=0.0059)
479
+ 包含特征: X801, X793, X796, X797, X800, X804, X805
480
+ 权重: ['0.207', '0.093', '0.134', '0.173', '0.158', '0.118', '0.116']
481
+ 组 149: X192 (IC=0.0058)
482
+ 包含特征: X192, X193, X199, X200, X206, X220, X227
483
+ 权重: ['0.205', '0.071', '0.193', '0.073', '0.196', '0.084', '0.178']
484
+ 组 150: X640 (IC=0.0067)
485
+ 包含特征: X640, X628, X634, X646
486
+ 权重: ['0.301', '0.215', '0.201', '0.283']
487
+ 组 151: X818 (IC=0.0072)
488
+ 包含特征: X818, X822
489
+ 权重: ['0.555', '0.445']
490
+ 组 152: X249 (IC=0.0042)
491
+ 包含特征: X249, X250, X251
492
+ 权重: ['0.623', '0.223', '0.154']
493
+ 组 153: X148 (IC=0.0063)
494
+ 包含特征: X148, X106, X142, X154
495
+ 权重: ['0.300', '0.233', '0.167', '0.299']
496
+ 组 154: X58 (IC=0.0055)
497
+ 包含特征: X58, X100
498
+ 权重: ['0.690', '0.310']
499
+ 组 155: X752 (IC=0.0038)
500
+ 包含特征: X752, X748, X749, X753, X756, X757
501
+ 权重: ['0.322', '0.155', '0.009', '0.109', '0.313', '0.092']
502
+ 组 156: X114 (IC=0.0037)
503
+ 包含特征: X114, X120, X126
504
+ 权重: ['0.588', '0.244', '0.168']
505
+ 组 157: order_flow_imbalance (IC=0.0060)
506
+ 包含特征: order_flow_imbalance, selling_pressure
507
+ 权重: ['0.504', '0.496']
508
+ 组 158: X229 (IC=0.0038)
509
+ 包含特征: X229, X228
510
+ 权重: ['0.783', '0.217']
511
+ 组 159: X1 (IC=0.0029)
512
+ 包含特征: X1, X5, X6, X7, X8
513
+ 权重: ['0.411', '0.071', '0.106', '0.177', '0.235']
514
+ 组 160: X409 (IC=0.0045)
515
+ 包含特征: X409, X397, X403
516
+ 权重: ['0.440', '0.283', '0.277']
517
+ 组 161: X733 (IC=0.0055)
518
+ 包含特征: X733, X732
519
+ 权重: ['0.534', '0.466']
520
+ 组 162: X194 (IC=0.0045)
521
+ 包含特征: X194, X201, X207, X208, X221
522
+ 权重: ['0.255', '0.242', '0.159', '0.191', '0.152']
523
+ 组 163: X666 (IC=0.0037)
524
+ 包含特征: X666, X654, X660, X672
525
+ 权重: ['0.384', '0.064', '0.216', '0.336']
526
+ 组 164: X411 (IC=0.0025)
527
+ 包含特征: X411, X86, X128, X170, X327, X369
528
+ 权重: ['0.325', '0.008', '0.008', '0.008', '0.325', '0.325']
529
+ 组 165: X826 (IC=0.0033)
530
+ 包含特征: X826, X830
531
+ 权重: ['0.749', '0.251']
532
+ 组 166: X234 (IC=0.0040)
533
+ 包含特征: X234, X241, X481, X488
534
+ 权重: ['0.298', '0.253', '0.236', '0.213']
535
+ 组 167: X597 (IC=0.0040)
536
+ 包含特征: X597, X595
537
+ 权重: ['0.542', '0.458']
538
+ 组 168: X657 (IC=0.0016)
539
+ 包含特征: X657, X639, X651, X663, X669
540
+ 权重: ['0.520', '0.068', '0.060', '0.108', '0.243']
541
+ 组 169: X413 (IC=0.0017)
542
+ 包含特征: X413, X371, X377, X407, X419, X425
543
+ 权重: ['0.386', '0.025', '0.161', '0.003', '0.210', '0.216']
544
+ 组 170: X808 (IC=0.0016)
545
+ 包含特征: X808, X809, X812, X813
546
+ 权重: ['0.510', '0.373', '0.062', '0.055']
547
+ 组 171: X138 (IC=0.0019)
548
+ 包含特征: X138, X132
549
+ 权重: ['0.830', '0.170']
550
+ 组 172: X446 (IC=0.0022)
551
+ 包含特征: X446, X453, X474
552
+ 权重: ['0.457', '0.320', '0.223']
553
+ 组 173: X792 (IC=0.0013)
554
+ 包含特征: X792, X788, X789
555
+ 权重: ['0.740', '0.118', '0.142']
556
+ 组 174: X624 (IC=0.0026)
557
+ 包含特征: X624, X618
558
+ 权重: ['0.565', '0.435']
559
+ 组 175: X621 (IC=0.0024)
560
+ 包含特征: X621, X615
561
+ 权重: ['0.613', '0.387']
562
+ 组 176: X636 (IC=0.0023)
563
+ 包含特征: X636, X630
564
+ 权重: ['0.577', '0.423']
565
+ 组 177: X616 (IC=0.0024)
566
+ 包含特征: X616, X622
567
+ 权重: ['0.553', '0.447']
568
+ 组 178: X885 (IC=0.0017)
569
+ 包含特征: X885, X884, X886, X887
570
+ 权重: ['0.371', '0.334', '0.242', '0.053']
571
+ 组 179: X874 (IC=0.0019)
572
+ 包含特征: X874, X873
573
+ 权重: ['0.503', '0.497']
574
+ 组 180: X335 (IC=0.0007)
575
+ 包含特征: X335, X341, X383
576
+ 权重: ['0.452', '0.231', '0.318']
ZMJ/data_processed/ic_values.csv ADDED
@@ -0,0 +1,904 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ,0
2
+ sell_qty,0.011165790013440683
3
+ ask_qty,0.01576156636733563
4
+ buy_qty,0.005617629417558399
5
+ X1,0.006009626297946501
6
+ volume,0.00880928321686586
7
+ X2,0.015751598790973508
8
+ bid_qty,0.013220012317315321
9
+ X3,0.012269485842255193
10
+ X4,0.00654239449422849
11
+ X5,0.0010348928441024636
12
+ X7,0.0025944308821192327
13
+ X8,0.0034300916332591818
14
+ X10,0.020332801489482785
15
+ X9,0.00028580908342140166
16
+ X11,0.01929229926777724
17
+ X6,0.001548050610163923
18
+ X12,0.0165588715634825
19
+ X13,0.015827167242547596
20
+ X14,0.019205324757393307
21
+ X16,0.015038233692562328
22
+ X18,0.0495389286089903
23
+ X19,0.06229317282289419
24
+ X17,0.019308888760557745
25
+ X20,0.06766745905539914
26
+ X15,0.01801380256953214
27
+ X21,0.06940129957714401
28
+ X22,0.057675788985933445
29
+ X26,0.051697030980441185
30
+ X23,0.04495868378089756
31
+ X25,0.02232073770118373
32
+ X27,0.062251351915023165
33
+ X28,0.0640924668700726
34
+ X24,0.033804815625741495
35
+ X29,0.06233881415688355
36
+ X30,0.04959420617388743
37
+ X33,0.017364207888062616
38
+ X31,0.04083370346268456
39
+ X35,0.01935431667141867
40
+ X36,0.025447235242806194
41
+ X34,0.02398556527445634
42
+ X32,0.032325233211784425
43
+ X37,0.022956211669709656
44
+ X38,0.02795916333831952
45
+ X39,0.027772217360705318
46
+ X41,0.028453309524691398
47
+ X42,0.032786534146259996
48
+ X43,0.029359757138521316
49
+ X44,0.03309283242317208
50
+ X40,0.03246273431140262
51
+ X45,0.028187752253932954
52
+ X46,0.032909379715677894
53
+ X47,0.02819078295406666
54
+ X49,0.028018660517079203
55
+ X51,0.025232368496274946
56
+ X50,0.032185682871376534
57
+ X48,0.03270084271421543
58
+ X52,0.028611374031482027
59
+ X53,0.02489357290431081
60
+ X54,0.02805955207026172
61
+ X56,0.02708418198354691
62
+ X57,0.01818910773647039
63
+ X58,0.007577917684088424
64
+ X59,0.01534699169057386
65
+ X60,0.013268220237658888
66
+ X55,0.024265722686556607
67
+ X61,0.012208632097557221
68
+ X63,0.022774806280935023
69
+ X62,0.012693382069210677
70
+ X67,0.020068242180993203
71
+ X65,0.025288687433257757
72
+ X66,0.00907155581211408
73
+ X68,0.01325047827893222
74
+ X64,0.010306872323105442
75
+ X70,0.010362017605785458
76
+ X69,0.022313902534032794
77
+ X73,0.022930617113019382
78
+ X71,0.030534110676999775
79
+ X74,0.012165448248978339
80
+ X77,0.03303197883423314
81
+ X75,0.01914283426334949
82
+ X79,0.023489078739195295
83
+ X78,0.0009420697457429854
84
+ X76,0.011379908702989303
85
+ X72,0.0049679650800335635
86
+ X80,0.008046271001358036
87
+ X81,0.01593323128158368
88
+ X82,0.020462738410646823
89
+ X84,0.008592577364157427
90
+ X86,0.0001202324781391542
91
+ X85,0.0195864031220606
92
+ X83,0.03572965303857348
93
+ X87,0.013768460470153149
94
+ X88,0.026294949922472194
95
+ X89,0.04052014893258882
96
+ X90,0.011400905431695038
97
+ X91,0.013215487591812916
98
+ X92,0.005490030279697772
99
+ X94,0.031305769536799324
100
+ X93,0.010539851904178537
101
+ X95,0.04294833761960832
102
+ X96,0.008760860343302914
103
+ X97,0.0037947174551185435
104
+ X98,0.0101981901114295
105
+ X99,0.02268229774279479
106
+ X100,0.0034113535917924104
107
+ X101,0.0011613177208819278
108
+ X102,0.011608067662744059
109
+ X103,0.014275807197569743
110
+ X104,0.012693382069210677
111
+ X105,0.030543003747560023
112
+ X107,0.00960969580439658
113
+ X108,0.00918454102101371
114
+ X106,0.005899379718680667
115
+ X109,0.022215766640047225
116
+ X111,0.03308689336173107
117
+ X110,0.01325047827893222
118
+ X112,0.006185143977872367
119
+ X114,0.006492917419956656
120
+ X115,0.026277660981014236
121
+ X113,0.015842023978840797
122
+ X116,0.012165448248978339
123
+ X117,0.03181311109313795
124
+ X119,0.021755400323134073
125
+ X118,0.008184800114662124
126
+ X120,0.002694578376939187
127
+ X121,0.02802883003942629
128
+ X122,0.008046271001358036
129
+ X123,0.024687315905733184
130
+ X125,0.03270885085007737
131
+ X124,0.016338076417826544
132
+ X126,0.001858994544980292
133
+ X127,0.028608367876344284
134
+ X128,0.0001202324781391542
135
+ X129,0.016308131485285052
136
+ X131,0.040582262726368695
137
+ X130,0.019223099577685185
138
+ X132,0.0006543664529083002
139
+ X133,0.030838448531144202
140
+ X134,0.005490030279697772
141
+ X135,0.007609332364634602
142
+ X136,0.022301552247880465
143
+ X138,0.0031921652780032457
144
+ X139,0.030552188737768198
145
+ X137,0.04429001859855588
146
+ X140,0.0101981901114295
147
+ X141,0.023042845128059892
148
+ X142,0.004235982613046026
149
+ X145,0.017292792022015235
150
+ X143,0.0019933391640393327
151
+ X144,0.01363650076175092
152
+ X146,0.012693382069210677
153
+ X147,0.031495573785063145
154
+ X150,0.015340435037503288
155
+ X149,0.006417382791704757
156
+ X148,0.007607130695117981
157
+ X152,0.01325047827893222
158
+ X153,0.03298982703124171
159
+ X151,0.023104291599572186
160
+ X154,0.007572777762347976
161
+ X155,0.012104282733329634
162
+ X157,0.026754581806821814
163
+ X156,0.015561773693076462
164
+ X159,0.029654594230513594
165
+ X160,0.006645180613341763
166
+ X162,0.015826905453806918
167
+ X158,0.012165448248978339
168
+ X161,0.01699996064135944
169
+ X164,0.008046271001358036
170
+ X165,0.01969959131962092
171
+ X167,0.025575270174227914
172
+ X166,0.00797484520088552
173
+ X163,0.03029886933487298
174
+ X169,0.03847601071389171
175
+ X170,0.0001202324781391542
176
+ X168,0.014749713182852075
177
+ X171,0.012466512005420378
178
+ X172,0.010055361507981147
179
+ X174,0.010663218662580406
180
+ X176,0.005490030279697772
181
+ X173,0.031776339675040594
182
+ X175,0.04581511374937557
183
+ X177,0.006842485571387934
184
+ X178,0.012840398293201938
185
+ X179,0.034397211073118716
186
+ X180,0.005172808602252609
187
+ X181,0.04916403319775866
188
+ X182,0.0101981901114295
189
+ X185,0.020489574294610912
190
+ X186,0.03734676725079289
191
+ X183,0.031730441381844804
192
+ X184,0.026552091041314445
193
+ X187,0.026794552458108262
194
+ X188,0.022605215393942033
195
+ X191,0.02436859374316991
196
+ X189,0.03485990908002203
197
+ X190,0.02621294178041448
198
+ X192,0.008331880770956143
199
+ X193,0.002882603579195492
200
+ X196,0.03453819898752205
201
+ X194,0.005797419666617228
202
+ X195,0.01813785360358937
203
+ X197,0.040228207552348766
204
+ X198,0.04286448308122376
205
+ X200,0.002956859849027475
206
+ X199,0.00781712350453114
207
+ X201,0.005498581860449145
208
+ X202,0.01717770044299138
209
+ X203,0.03151333070055543
210
+ X205,0.0368172839832081
211
+ X204,0.03554017155796463
212
+ X206,0.00794135047740499
213
+ X208,0.0043338452833440715
214
+ X207,0.003616688561134941
215
+ X209,0.01468297489158659
216
+ X210,0.026244140127870824
217
+ X211,0.028824214825896293
218
+ X213,0.006027340837544057
219
+ X212,0.02893725318341518
220
+ X214,0.0021162782904006455
221
+ X215,0.010741882431978537
222
+ X216,0.02385776743387177
223
+ X217,0.04609159741312273
224
+ X218,0.05367577723545755
225
+ X220,0.003427212211532864
226
+ X221,0.0034532696338179466
227
+ X219,0.05671258349737499
228
+ X223,0.02456730457636394
229
+ X222,0.012361959439243814
230
+ X224,0.04170717675273872
231
+ X225,0.04502749481163463
232
+ X226,0.045167778630829346
233
+ X227,0.007226328108337774
234
+ X229,0.006013905941975985
235
+ X228,0.0016677272943551738
236
+ X231,0.0222024274440164
237
+ X233,0.027319134855787686
238
+ X232,0.026220575462856287
239
+ X230,0.01332799184085372
240
+ X234,0.004802599279103908
241
+ X235,0.014915328427032113
242
+ X236,0.02016651376292459
243
+ X237,0.025646871398892806
244
+ X239,0.030652976130057196
245
+ X238,0.03288083499821081
246
+ X241,0.004080853463394878
247
+ X240,0.022108300720443737
248
+ X242,0.013477383770803065
249
+ X243,0.01821433852582617
250
+ X244,0.023394550818409618
251
+ X245,0.0320808658876768
252
+ X249,0.00781607971112136
253
+ X246,0.031458217776739385
254
+ X247,0.024210763067772034
255
+ X248,0.00863641572532207
256
+ X250,0.0027962684704145946
257
+ X251,0.0019318570757931224
258
+ X252,0.00515208968271676
259
+ X255,0.00789104052422053
260
+ X253,0.006788801858526989
261
+ X254,0.00817527909654802
262
+ X256,0.0007592684874167758
263
+ X257,0.011270578272792906
264
+ X258,0.0071947205377904
265
+ X260,0.0068110323303332086
266
+ X259,0.0053586658138714445
267
+ X261,0.012756228867117175
268
+ X263,0.012351675221826626
269
+ X264,0.029664898637745345
270
+ X262,0.01320555850144599
271
+ X265,0.03083543653798079
272
+ X267,0.033153158810700765
273
+ X266,0.03411762779330201
274
+ X268,0.03388656581803048
275
+ X269,0.03962107379275058
276
+ X271,0.03848886311034982
277
+ X270,0.03875202640359222
278
+ X272,0.04253566247716398
279
+ X274,0.03045878258773289
280
+ X275,0.029810274240332505
281
+ X273,0.029779784480246364
282
+ X276,0.032973727817054405
283
+ X277,0.042090016419938744
284
+ X278,0.04043904861919386
285
+ X279,0.0399028729101604
286
+ X280,0.03952704679369435
287
+ X281,0.04786508179780874
288
+ X282,0.041294235420860345
289
+ X283,0.04983782500741813
290
+ X284,0.04376298279116218
291
+ X285,0.052865461293543504
292
+ X286,0.04866142874455701
293
+ X288,0.04889133105146001
294
+ X287,0.055892868911022124
295
+ X289,0.05589197750170329
296
+ X290,0.048879292680438294
297
+ X293,0.05529117296384762
298
+ X292,0.05150887683057305
299
+ X294,0.05130548343745833
300
+ X291,0.055435097195834934
301
+ X295,0.05463728050716738
302
+ X296,0.05072460123909159
303
+ X297,0.05328322542690132
304
+ X298,0.05310381859226198
305
+ X299,0.05236608777359549
306
+ X301,0.051464941364311
307
+ X300,0.052631468067681736
308
+ X302,0.051736262039276
309
+ X303,0.04988449271900805
310
+ X304,0.015115450772781404
311
+ X306,0.013953873767093956
312
+ X305,0.009297012223438934
313
+ X307,0.026854817464545794
314
+ X308,0.00534034257442587
315
+ X309,0.012227045059450103
316
+ X311,0.010286769745277096
317
+ X310,0.016328941631575146
318
+ X312,0.018049055700084506
319
+ X314,0.007370533776718553
320
+ X313,0.030764854544905008
321
+ X315,0.011343486452670895
322
+ X317,0.010653294584636291
323
+ X316,0.013834108405729954
324
+ X318,0.019755274146099105
325
+ X319,0.03200075360083825
326
+ X320,0.009451915423723607
327
+ X321,0.009330154366034476
328
+ X322,0.009955897654046046
329
+ X324,0.022153922628416256
330
+ X323,0.008713511333356804
331
+ X325,0.03422793548485072
332
+ X326,0.011020616977703792
333
+ X327,0.004964570848726991
334
+ X329,0.0009253743928405479
335
+ X328,0.011480873944642924
336
+ X330,0.027857545963232713
337
+ X331,0.03631603617736357
338
+ X332,0.009786741972240599
339
+ X334,0.015661324167114778
340
+ X333,0.0010543713990520094
341
+ X335,0.000976978174210585
342
+ X336,0.031051559424361136
343
+ X337,0.03201458707345958
344
+ X340,0.016961942354104798
345
+ X339,0.006045303318496352
346
+ X338,0.007851404022343118
347
+ X341,0.000498957878007569
348
+ X343,0.027815697857855277
349
+ X344,0.009060565920824132
350
+ X342,0.02985719161300178
351
+ X346,0.0166218637161054
352
+ X345,0.011438585908311288
353
+ X347,0.010129776444548867
354
+ X348,0.0074992830624918405
355
+ X349,0.029479656398089846
356
+ X350,0.012993672635989183
357
+ X351,0.012227045059450103
358
+ X352,0.017752772506635595
359
+ X353,0.011630740768134407
360
+ X354,0.014360202438445706
361
+ X355,0.035867191399074995
362
+ X356,0.016560404242590143
363
+ X357,0.011343486452670895
364
+ X360,0.01786664297047221
365
+ X358,0.015879653363168204
366
+ X359,0.011193245588155543
367
+ X361,0.037180487197274216
368
+ X362,0.01715178345613917
369
+ X363,0.009330154366034476
370
+ X364,0.01272459215799313
371
+ X365,0.008200552598189624
372
+ X366,0.020283749882732665
373
+ X367,0.03770372684000478
374
+ X369,0.004964570848726991
375
+ X368,0.015954528082885708
376
+ X370,0.013577251202526976
377
+ X371,0.0002580360578592943
378
+ X374,0.014137738874577517
379
+ X372,0.024979062081955748
380
+ X375,0.0010543713990520094
381
+ X373,0.03813565139723776
382
+ X376,0.016019059486055507
383
+ X377,0.0016664007538047315
384
+ X378,0.02996770273994749
385
+ X379,0.03794460211066158
386
+ X380,0.013864904214159049
387
+ X381,0.006045303318496352
388
+ X382,0.016674784452924366
389
+ X383,0.0006869604635523245
390
+ X384,0.032097530928572406
391
+ X385,0.03809464465373621
392
+ X386,0.00892984272760343
393
+ X388,0.015516466610183714
394
+ X387,0.011438585908311288
395
+ X389,0.003239395154835977
396
+ X390,0.0023214346148472
397
+ X391,0.004887673001145214
398
+ X393,0.012227045059450103
399
+ X394,0.019201784813083028
400
+ X392,0.008577188750002959
401
+ X396,0.004336040919130043
402
+ X395,0.00323401260909416
403
+ X397,0.003782817687480835
404
+ X398,0.010554168956545937
405
+ X399,0.011343486452670895
406
+ X400,0.018426861383463364
407
+ X401,0.0024775595714782665
408
+ X403,0.003709282191637838
409
+ X404,0.010157359953035084
410
+ X402,0.008835946809441777
411
+ X406,0.01558222908644609
412
+ X405,0.009330154366034476
413
+ X407,2.792132455082131e-05
414
+ X408,0.01148717057281973
415
+ X409,0.005875428372046327
416
+ X411,0.004964570848726991
417
+ X410,0.008990675307716989
418
+ X412,0.015701086156328908
419
+ X413,0.00398114170937874
420
+ X414,0.01587301353942877
421
+ X415,0.00828652411850645
422
+ X416,0.011032771196386537
423
+ X418,0.016880404757385194
424
+ X417,0.0010543713990520094
425
+ X419,0.0021659062855295045
426
+ X420,0.022010904826082346
427
+ X421,0.010708317086460846
428
+ X423,0.006045303318496352
429
+ X422,0.016800665513200676
430
+ X425,0.0022252818616368296
431
+ X424,0.016176302313515873
432
+ X427,0.013807009745532546
433
+ X426,0.026630274349096702
434
+ X428,0.01927169677538422
435
+ X430,0.021898633029896225
436
+ X429,0.011438585908311288
437
+ X431,0.030079767071827412
438
+ X432,0.03734260767135224
439
+ X433,0.024408452050988606
440
+ X435,0.036952683486087004
441
+ X434,0.027752935329207834
442
+ X436,0.022994963513477556
443
+ X438,0.03311542406361313
444
+ X437,0.0232416623268677
445
+ X439,0.0006674076273566988
446
+ X440,0.0053046727900784435
447
+ X442,0.014225618925895599
448
+ X441,0.008517663726876432
449
+ X443,0.03160692993429311
450
+ X444,0.03945923552438475
451
+ X445,0.03847565190003187
452
+ X446,0.0030565442473429166
453
+ X447,0.006925826553175941
454
+ X448,0.009391731108401406
455
+ X449,0.013879886828492375
456
+ X450,0.028654784568285553
457
+ X452,0.0342146409757117
458
+ X451,0.03532866510146789
459
+ X453,0.0021416103257284294
460
+ X454,0.0052396304757588615
461
+ X456,0.010218912048981867
462
+ X455,0.007056477863367114
463
+ X457,0.02175995826852137
464
+ X458,0.027053100624689966
465
+ X460,0.01179206527007791
466
+ X459,0.02613119302137511
467
+ X461,0.00610544813647415
468
+ X462,0.00043370218338843573
469
+ X463,0.012848357209391894
470
+ X465,0.04953812582425797
471
+ X464,0.04033763404028344
472
+ X466,0.049228334452943945
473
+ X467,0.0005979717119699696
474
+ X468,0.0033881051369683193
475
+ X469,0.007685825483870779
476
+ X471,0.03707210771504421
477
+ X470,0.01673665198093028
478
+ X472,0.04246242914286378
479
+ X474,0.0014884753337954136
480
+ X473,0.04019964914248776
481
+ X475,6.298768554620273e-05
482
+ X476,0.0025105676696775357
483
+ X477,0.007376651961858709
484
+ X478,0.01966994682803971
485
+ X480,0.021931338665365896
486
+ X479,0.023444568662026655
487
+ X481,0.0037933191605447466
488
+ X482,0.009456081492703813
489
+ X483,0.012227628925595592
490
+ X484,0.014504507166841114
491
+ X485,0.014401739806055185
492
+ X486,0.010803910374854175
493
+ X487,0.006573198322241548
494
+ X489,0.00935766397058297
495
+ X488,0.0034270507400633054
496
+ X490,0.011559619845561928
497
+ X492,0.014357170304625369
498
+ X491,0.013454130753045596
499
+ X493,0.011001477913856656
500
+ X495,0.006353651469653068
501
+ X496,0.017572029862777082
502
+ X497,0.02886487059569618
503
+ X494,0.007621631433950686
504
+ X499,0.017589309325062075
505
+ X498,0.022737458238146543
506
+ X500,0.014795334814436397
507
+ X501,0.013516682096938844
508
+ X502,0.007102224662157954
509
+ X503,0.027664784124541267
510
+ X504,0.037431152300813474
511
+ X505,0.02808419401296176
512
+ X506,0.02459729559049703
513
+ X507,0.019694315336865942
514
+ X508,0.022755646248770425
515
+ X509,0.006993292839321513
516
+ X510,0.0320748269109876
517
+ X511,0.0367726500006249
518
+ X512,0.031114046611546257
519
+ X514,0.02168095568693507
520
+ X515,0.021584258837756914
521
+ X513,0.021806087315372404
522
+ X516,0.009398230414242616
523
+ X517,0.03614880523691168
524
+ X518,0.03187091234297883
525
+ X519,0.034542211530895456
526
+ X520,0.015370205031353938
527
+ X521,0.027395803529844
528
+ X522,0.012461414592708657
529
+ X524,0.04802013567400222
530
+ X523,0.00856697789299001
531
+ X525,0.0198212932022529
532
+ X526,0.02925843985586276
533
+ X527,0.005158103286663351
534
+ X529,0.010665814227123746
535
+ X528,0.02498965092507624
536
+ X530,0.004165433245888023
537
+ X531,0.05618361361403625
538
+ X532,0.030601998655923682
539
+ X533,0.03710041831258064
540
+ X534,0.0013293427861043247
541
+ X536,0.012308813330731722
542
+ X535,0.02560570170433261
543
+ X537,0.004721675034502699
544
+ X538,0.04099174076600001
545
+ X540,0.03742205579551493
546
+ X539,0.029653227351439312
547
+ X542,0.03122523701731007
548
+ X541,0.00027286465365741665
549
+ X543,0.010987778309195007
550
+ X544,4.211922161061338e-05
551
+ X546,0.01977980375587412
552
+ X547,0.017076700050764844
553
+ X545,0.012760868988627628
554
+ X548,0.012706293941809875
555
+ X549,0.017839577925667812
556
+ X550,0.013534716268347046
557
+ X551,0.003331454741458525
558
+ X552,0.020890445206327966
559
+ X554,0.029353831462913954
560
+ X553,0.028138617010427358
561
+ X555,0.013475953016055225
562
+ X556,0.026573257956355137
563
+ X559,0.024388436421656685
564
+ X558,0.007205396733588569
565
+ X557,0.010738686134352483
566
+ X560,0.02673470328124766
567
+ X563,0.01813375553299066
568
+ X561,0.026699020419041257
569
+ X562,0.015624952478745171
570
+ X564,0.008757526385577578
571
+ X565,0.015515157101454246
572
+ X566,0.02663574146165673
573
+ X568,0.02508164707594524
574
+ X567,0.021091635071447155
575
+ X569,0.011058995430675529
576
+ X570,0.013973782068801005
577
+ X571,0.004565121121850348
578
+ X572,0.022527184368686552
579
+ X574,0.019503402991749963
580
+ X573,0.03472478182667333
581
+ X575,0.00035972505129474425
582
+ X576,0.03281278968326802
583
+ X578,0.018259220972135323
584
+ X577,0.002515018470042229
585
+ X579,0.024823312205901754
586
+ X580,0.04172458904654195
587
+ X581,0.03201246659649179
588
+ X582,0.012573595321147376
589
+ X583,0.03412015486713785
590
+ X584,0.0027885537822756237
591
+ X585,0.022471794514023598
592
+ X586,0.012969158411920038
593
+ X587,0.035917611827566094
594
+ X588,0.03403859606170242
595
+ X589,0.016362968742996944
596
+ X590,0.02722336239576063
597
+ X591,0.005202253581104186
598
+ X592,0.007565904948598441
599
+ X593,0.01285899740499138
600
+ X594,0.011059213565168187
601
+ X595,0.003707164552688389
602
+ X596,0.01093572782830869
603
+ X597,0.004387906538154478
604
+ X598,0.054527755682121384
605
+ X599,0.01476557221811526
606
+ X600,0.01703685476113392
607
+ X601,0.019903325690859338
608
+ X603,0.029827344475402928
609
+ X602,0.02406277096382
610
+ X604,0.031407057390806056
611
+ X605,0.03115843839837678
612
+ X606,0.01500296831777802
613
+ X608,0.020785439238087827
614
+ X607,0.019171039096985868
615
+ X609,0.022094209449879304
616
+ X610,0.027743705753122874
617
+ X611,0.03438629083472148
618
+ X612,0.04061425206068214
619
+ X613,0.006802600089049049
620
+ X614,0.006787142150752563
621
+ X616,0.0026093316854809328
622
+ X615,0.0018385886722819657
623
+ X617,0.009132872446091286
624
+ X618,0.0022625779022328683
625
+ X621,0.0029094414758438587
626
+ X619,0.006242624221814225
627
+ X622,0.0021052183440793595
628
+ X620,0.004915890948794204
629
+ X623,0.007695518495453411
630
+ X624,0.002941719943783813
631
+ X625,0.0114819783366659
632
+ X626,0.011645601197147457
633
+ X627,0.001535519450341619
634
+ X628,0.005801250491160117
635
+ X629,0.01543437270388279
636
+ X630,0.0019246140380028303
637
+ X631,0.010852816115796951
638
+ X632,0.007455340999907042
639
+ X633,0.0035188671566097297
640
+ X634,0.005431152652918018
641
+ X635,0.014415339705721125
642
+ X636,0.002623010529335238
643
+ X637,0.014338509407605409
644
+ X638,0.015334318974029149
645
+ X642,0.0017286660923145293
646
+ X639,0.0005366782536744723
647
+ X640,0.008108190421653306
648
+ X641,0.019955800139309348
649
+ X644,0.009292651437215617
650
+ X643,0.013462342146901913
651
+ X645,0.004458670578635313
652
+ X646,0.007620914409814536
653
+ X647,0.018207984979476167
654
+ X648,0.0035117669053162403
655
+ X649,0.014671413723246318
656
+ X650,0.016225304317315455
657
+ X651,0.000471530546651396
658
+ X653,0.0205321715801488
659
+ X652,0.009331525441641514
660
+ X654,0.0009390003331521383
661
+ X655,0.013655415543528067
662
+ X656,0.008361579660947844
663
+ X657,0.004085847590628451
664
+ X658,0.008493165881913007
665
+ X659,0.016637821981365535
666
+ X660,0.003166510434759787
667
+ X661,0.013895906541256924
668
+ X662,0.013920014448508287
669
+ X663,0.000851832770835647
670
+ X664,0.012444468000828799
671
+ X665,0.01516206998773945
672
+ X666,0.005635761266991576
673
+ X667,0.012645200876932175
674
+ X668,0.009064909103222833
675
+ X670,0.011322154910258092
676
+ X669,0.0019113035733135239
677
+ X671,0.015210649436339662
678
+ X672,0.004939087142730333
679
+ X673,0.013758620633175686
680
+ X674,0.02116310035929281
681
+ X675,0.002500286021268377
682
+ X676,0.015401799469385894
683
+ X677,0.01883827586871801
684
+ X678,0.01849662220976336
685
+ X679,0.012629660568329582
686
+ X680,0.015036355087687109
687
+ X681,0.001193663120383472
688
+ X683,0.02081787928886514
689
+ X682,0.014446721301616075
690
+ X684,0.01864308164695379
691
+ X687,0.008839154705720768
692
+ X685,0.011565541616818407
693
+ X686,0.024187496831472848
694
+ X688,0.015653600731238008
695
+ X689,0.0187912664629739
696
+ X691,0.011005725163480043
697
+ X690,0.031847808295059
698
+ X692,0.01753162897469928
699
+ X693,0.00736604628624425
700
+ X694,0.015068590870286398
701
+ X695,0.0213795219966739
702
+ X696,0.03182787755083162
703
+ X697,
704
+ X698,
705
+ X699,
706
+ X702,
707
+ X700,
708
+ X701,
709
+ X703,
710
+ X705,
711
+ X704,
712
+ X707,
713
+ X706,
714
+ X708,
715
+ X709,
716
+ X711,
717
+ X712,
718
+ X710,
719
+ X713,
720
+ X717,
721
+ X715,
722
+ X714,
723
+ X716,
724
+ X718,0.009457993754945004
725
+ X721,0.0018671778678267315
726
+ X719,0.007621036759886809
727
+ X720,0.00508266823165767
728
+ X722,0.00041963071776523645
729
+ X724,0.007627076843920589
730
+ X723,0.005400758027977596
731
+ X725,0.01468597088940295
732
+ X726,0.01731113297651062
733
+ X729,0.03451753412793433
734
+ X727,0.026971256166292753
735
+ X730,0.026503842571771456
736
+ X728,0.02321473532495592
737
+ X731,0.03937010303197312
738
+ X732,0.0051023999462277226
739
+ X733,0.005857356282121524
740
+ X734,0.003328231538286187
741
+ X735,0.00662426772030378
742
+ X736,0.007839198951606568
743
+ X737,0.008949104600815203
744
+ X738,0.0013047654972600077
745
+ X740,0.009530445151010395
746
+ X739,0.006731623350020346
747
+ X741,0.01062820202586949
748
+ X742,0.006724758140987537
749
+ X743,0.0054711058912251045
750
+ X745,0.009551997869648518
751
+ X744,0.008281386908726833
752
+ X746,0.015731320601958067
753
+ X747,0.0032029624219608805
754
+ X749,0.0001924354904351321
755
+ X748,0.00351326108957656
756
+ X750,0.03418492048302036
757
+ X751,0.006935654219009256
758
+ X753,0.0024697797572265133
759
+ X752,0.007280034335204959
760
+ X754,0.0411880116098168
761
+ X755,0.02333796146821493
762
+ X756,0.007080643929230414
763
+ X757,0.0020882138653961313
764
+ X758,0.04146452100721608
765
+ X759,0.02745285489486186
766
+ X761,0.013720913256680482
767
+ X760,0.011581769785129696
768
+ X762,0.010917849216945249
769
+ X763,0.011613157040094982
770
+ X765,0.022977757845687723
771
+ X764,0.018921111760867977
772
+ X766,0.012641816745130374
773
+ X770,0.011253824441069213
774
+ X767,0.012958050549466266
775
+ X769,0.029310569862042734
776
+ X768,0.023832472342753996
777
+ X771,0.014719337781706383
778
+ X772,0.026046609163355782
779
+ X773,0.032444108074526544
780
+ X774,0.006525316008111323
781
+ X775,0.013252320015948069
782
+ X776,0.01607020987355814
783
+ X777,0.02410233447444815
784
+ X781,0.01536369088361011
785
+ X780,0.00742579143440711
786
+ X778,0.005747357196470954
787
+ X779,0.004581127423623524
788
+ X782,0.014402362859179056
789
+ X783,0.0010980644199857091
790
+ X784,0.0022990384110193386
791
+ X785,0.008273111762437993
792
+ X786,0.019267153183417943
793
+ X788,0.000478488477540488
794
+ X787,0.0018770626506715718
795
+ X789,0.0005743574756732422
796
+ X792,0.0029899297529808137
797
+ X791,0.0018047978250534404
798
+ X790,0.0010510546274008245
799
+ X793,0.003849497350441939
800
+ X795,0.00018470213595348826
801
+ X794,0.005183910214476209
802
+ X796,0.005514841598927894
803
+ X797,0.007130879694779356
804
+ X801,0.008527292006270948
805
+ X799,0.0017605402318868956
806
+ X800,0.006514482261348192
807
+ X798,0.008742274213048712
808
+ X803,0.0027540251228215444
809
+ X804,0.0048600073997061375
810
+ X805,0.004774387162698313
811
+ X802,0.014937761268070907
812
+ X806,0.027319302825886068
813
+ X808,0.003330071712592306
814
+ X807,0.01826271695630487
815
+ X809,0.0024352638735362676
816
+ X810,0.03354016733145779
817
+ X812,0.00040372169697806045
818
+ X813,0.00035623228846241015
819
+ X814,0.0356869899094025
820
+ X811,0.025866141188072022
821
+ X815,0.023863156009089795
822
+ X817,0.011364233621504353
823
+ X818,0.007985945636623712
824
+ X816,0.011107842211523892
825
+ X819,0.009131988751408571
826
+ X820,0.016100971785304178
827
+ X821,0.017326093455108237
828
+ X823,0.013663647105019952
829
+ X824,0.020283531970469993
830
+ X825,0.02271730794521153
831
+ X826,0.0048985871849534296
832
+ X822,0.006394060379167369
833
+ X828,0.02323950245782004
834
+ X827,0.017393143284950712
835
+ X829,0.02726801505751245
836
+ X831,0.0210970101438473
837
+ X832,0.01603439414243064
838
+ X833,0.022529855772244575
839
+ X834,0.011763352992927667
840
+ X830,0.0016413187138757186
841
+ X835,0.014967848792296998
842
+ X836,0.00821785832133803
843
+ X838,0.022968273208048395
844
+ X839,0.012105941132643051
845
+ X840,0.002704768413188096
846
+ X837,0.015224769791920067
847
+ X841,0.011517471235324499
848
+ X842,0.028516300553349395
849
+ X843,0.0037902397742158797
850
+ X844,0.01449061205816509
851
+ X845,0.009612274245240948
852
+ X846,0.014575365760658883
853
+ X847,0.0030809994038687063
854
+ X848,0.009547639363191663
855
+ X850,0.02771470787289373
856
+ X852,0.04068629885231368
857
+ X851,0.007517190854361444
858
+ X854,0.006138833566092506
859
+ X853,0.01168317532100576
860
+ X849,0.011104578876167524
861
+ X855,0.037702173190174944
862
+ X856,0.04727261815748245
863
+ X857,0.05468596363733885
864
+ X858,0.05732029603524905
865
+ X859,0.02007516421342708
866
+ X860,0.056258822130222125
867
+ X863,0.06405738275143834
868
+ X861,0.045514811573915834
869
+ X862,0.040827642919763804
870
+ X864,
871
+ X865,0.022374691114163715
872
+ X866,0.028888661046387747
873
+ X868,0.03778984343758759
874
+ X871,
875
+ X870,
876
+ X869,
877
+ X867,
878
+ X873,0.001883405226228079
879
+ X874,0.0019070735822866018
880
+ X872,
881
+ X876,0.0105051309323369
882
+ X875,0.01662466300872184
883
+ X877,0.010606541305084056
884
+ X879,0.024398139029589098
885
+ X878,0.018658979685034462
886
+ X881,0.021525129940913657
887
+ X883,0.013195271643438242
888
+ X882,0.016577921420096075
889
+ X884,0.0022380002551989253
890
+ X880,0.027224954783997914
891
+ X885,0.0024919561501807416
892
+ X887,0.0003554317373171134
893
+ X886,0.0016227242980143418
894
+ X889,0.004510706868926807
895
+ X890,0.013163234885488786
896
+ volume_weighted_sell,0.000920180159340804
897
+ buy_sell_ratio,0.0038328322728427722
898
+ X888,0.0007600458032861104
899
+ selling_pressure,0.00598292992874158
900
+ log_volume,0.009152478116209888
901
+ effective_spread_proxy,0.00280868848193676
902
+ bid_ask_imbalance,0.011018573520529186
903
+ order_flow_imbalance,0.006074116379646094
904
+ liquidity_ratio,0.005108214037839909
ZMJ/data_processed/test_aggregated.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78849b15de7197ff26cc84104a045625f4b64308fa9b035883f5b547122a5c49
3
+ size 1254913844
ZMJ/data_processed/train_aggregated.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:616b9c4135e67380ba5b7649d69ec91d1d5e5cca2f47dac3ff6e365020c00222
3
+ size 1216012686
ZMJ/data_processed_7_16/alpha_selected.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import pandas as pd
3
+ import numpy as np
4
+ from sklearn.model_selection import KFold
5
+ import xgboost as xgb
6
+ from xgboost import XGBRegressor
7
+ from lightgbm import LGBMRegressor
8
+ from sklearn.linear_model import (
9
+ HuberRegressor, RANSACRegressor, TheilSenRegressor,
10
+ Lasso, ElasticNet, Ridge
11
+ )
12
+ from sklearn.cross_decomposition import PLSRegression
13
+ from sklearn.preprocessing import StandardScaler, RobustScaler
14
+ from sklearn.ensemble import RandomForestRegressor
15
+ from scipy.stats import pearsonr
16
+ import warnings
17
+ import torch
18
+ import matplotlib.pyplot as plt
19
+ import seaborn as sns
20
+ from concurrent.futures import ThreadPoolExecutor, as_completed
21
+ from itertools import combinations
22
+ import time
23
+
24
+ train_df = pd.read_pickle('train_df.pkl')
25
+ test_df = pd.read_pickle('test_df.pkl')
26
+
27
+ length = len(train_df)
28
+ df = pd.concat([train_df, test_df], axis=0)
29
+ LABEL_COLUMN = 'label'
30
+ feature_cols = [col for col in train_df.columns if col != LABEL_COLUMN]
31
+
32
+ X = train_df[feature_cols].values
33
+ y = train_df[LABEL_COLUMN].values
34
+
35
+ # 先将X转为numpy数组,再删除全为0的列,并同步更新feature_cols
36
+ X_np = np.asarray(X)
37
+ nonzero_col_idx = np.where((X_np != 0).any(axis=0))[0]
38
+ X = X_np[:, nonzero_col_idx]
39
+ feature_cols = [feature_cols[i] for i in nonzero_col_idx]
40
+
41
+ X_np = np.asarray(X)
42
+ y_np = np.asarray(y)
43
+ corrs = np.array([np.corrcoef(X_np[:, i], y_np)[0, 1] for i in range(X_np.shape[1])])
44
+
45
+ # 对于相关性为负的特征,将该列取负
46
+ X_adj = X_np.copy()
47
+ neg_idx = np.where(corrs < 0)[0]
48
+ X_adj[:, neg_idx] = -X_adj[:, neg_idx]
49
+
50
+ # 找到相关性绝对值大于0.01的特征索引
51
+ selected_idx = np.where(np.abs(corrs) > 0.01)[0]
52
+
53
+ # 取出这些特征对应的X的列
54
+ X_selected = X_adj[:, selected_idx]
55
+ selected_features = [feature_cols[i] for i in selected_idx]
56
+
57
+ def max_ic_factor_selection(X, y, feature_cols, threshold=0.9):
58
+ X = np.asarray(X)
59
+ n_features = X.shape[1]
60
+ corr_matrix = np.corrcoef(X, rowvar=False)
61
+ used = set()
62
+ selected_idx = []
63
+ for i in range(n_features):
64
+ if i in used:
65
+ continue
66
+ # 找到与第i个特征高度相关的特征
67
+ group = [i]
68
+ for j in range(i+1, n_features):
69
+ if j not in used and abs(corr_matrix[i, j]) > threshold:
70
+ group.append(j)
71
+ # 组内选与y相关性(IC)最大的特征
72
+ if len(group) == 1:
73
+ selected_idx.append(group[0])
74
+ else:
75
+ ic_list = [abs(pearsonr(X[:, k], y)[0]) for k in group]
76
+ best_k = group[np.argmax(ic_list)]
77
+ selected_idx.append(best_k)
78
+ used.update(group)
79
+ X_new = X[:, selected_idx]
80
+ feature_cols_new = [feature_cols[i] for i in selected_idx]
81
+ return X_new, feature_cols_new
82
+
83
+ # 在训练前进行最大IC因子合成,减少共线性
84
+ n_train = train_df.shape[0]
85
+ X_selected, selected_features = max_ic_factor_selection(X_selected, y[:n_train], selected_features, threshold=0.9)
86
+
87
+ X_train = X_selected
88
+ X_test = test_df[selected_features].values
89
+
90
+ y_train = y
91
+ y_test = test_df[LABEL_COLUMN].values
92
+ breakpoint()
93
+
94
+ kf = KFold(n_splits=5, shuffle=True, random_state=42)
95
+
96
+ import math
97
+
98
+ # 余弦退火调度函数
99
+ def cosine_annealing(epoch, initial_lr=0.01, T_max=5000, eta_min=1e-4):
100
+ return eta_min + (initial_lr - eta_min) * (1 + math.cos(math.pi * epoch / T_max)) / 2
101
+
102
+ # XGBoost参数(更复杂的树结构+更强正则+早停机制)
103
+ xgb_params = {
104
+ 'n_estimators': 10000, # 增加树的数量
105
+ 'learning_rate': 0.01,
106
+ 'max_depth': 10, # 增加树的深度
107
+ 'subsample': 0.85, # 增加样本采样比例
108
+ 'colsample_bytree': 0.85, # 增加特征采样比例
109
+ 'tree_method': 'hist',
110
+ 'device': 'gpu',
111
+ 'predictor': 'gpu_predictor',
112
+ 'random_state': 42,
113
+ 'reg_alpha': 5, # 增大L1正则
114
+ 'reg_lambda': 10, # 增大L2正则
115
+ 'min_child_weight': 5, # 增大叶子节点最小样本权重和
116
+ 'gamma': 0.2, # 增大分裂所需的最小损失减少
117
+ 'early_stopping_round': 100,
118
+ 'verbose_eval': 100,
119
+ 'eval_metric': 'rmse',
120
+ 'callbacks': [
121
+ xgb.callback.LearningRateScheduler(cosine_annealing)
122
+ ]
123
+ }
124
+ print("start training")
125
+ val_scores = []
126
+ test_preds = np.zeros(X_test.shape[0])
127
+
128
+ for train_idx, val_idx in kf.split(X_train):
129
+ X_tr, X_val = X_train[train_idx], X_train[val_idx]
130
+ y_tr, y_val = y_train[train_idx], y_train[val_idx]
131
+ model = XGBRegressor(**xgb_params)
132
+ model.fit(
133
+ X_tr, y_tr,
134
+ eval_set=[(X_val, y_val)],
135
+ # eval_metric='rmse',
136
+ )
137
+ val_pred = model.predict(X_val)
138
+ val_score = np.sqrt(np.mean((val_pred - y_val) ** 2)) # RMSE
139
+ val_scores.append(val_score)
140
+ test_preds += model.predict(X_test) / kf.n_splits
141
+
142
+ print(f"平均验证RMSE: {np.mean(val_scores):.6f}")
143
+
144
+ # 保存预测结果到csv
145
+ result_df = pd.DataFrame({
146
+ 'ID': np.arange(1, len(test_preds) + 1),
147
+ 'prediction': test_preds
148
+ })
149
+ result_df.to_csv('xgb_prediction-3.csv', index=False)
150
+ print('预测结果已保存到 xgb_prediction.csv')
151
+
152
+
153
+
154
+
ZMJ/data_processed_7_16/data_processed.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import pandas as pd
3
+ import numpy as np
4
+ from sklearn.model_selection import KFold
5
+ from xgboost import XGBRegressor
6
+ from lightgbm import LGBMRegressor
7
+ from sklearn.linear_model import (
8
+ HuberRegressor, RANSACRegressor, TheilSenRegressor,
9
+ Lasso, ElasticNet, Ridge
10
+ )
11
+ from sklearn.cross_decomposition import PLSRegression
12
+ from sklearn.preprocessing import StandardScaler, RobustScaler
13
+ from sklearn.ensemble import RandomForestRegressor
14
+ from scipy.stats import pearsonr
15
+ import warnings
16
+ import torch
17
+ import matplotlib.pyplot as plt
18
+ import seaborn as sns
19
+ from concurrent.futures import ThreadPoolExecutor, as_completed
20
+ from itertools import combinations
21
+ import time
22
+
23
+ TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/train.parquet"
24
+ TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/test.parquet"
25
+
26
+ train_df = pd.read_parquet(TRAIN_PATH)
27
+ test_df = pd.read_parquet(TEST_PATH)
28
+
29
+ # ===== Feature Engineering =====
30
+ def feature_engineering(df):
31
+ """Original features plus new robust features"""
32
+ # Original features
33
+ df['volume_weighted_sell'] = df['sell_qty'] * df['volume']
34
+ df['buy_sell_ratio'] = df['buy_qty'] / (df['sell_qty'] + 1e-8)
35
+ df['selling_pressure'] = df['sell_qty'] / (df['volume'] + 1e-8)
36
+ df['effective_spread_proxy'] = np.abs(df['buy_qty'] - df['sell_qty']) / (df['volume'] + 1e-8)
37
+
38
+ # New robust features
39
+ df['log_volume'] = np.log1p(df['volume'])
40
+ df['bid_ask_imbalance'] = (df['bid_qty'] - df['ask_qty']) / (df['bid_qty'] + df['ask_qty'] + 1e-8)
41
+ df['order_flow_imbalance'] = (df['buy_qty'] - df['sell_qty']) / (df['buy_qty'] + df['sell_qty'] + 1e-8)
42
+ df['liquidity_ratio'] = (df['bid_qty'] + df['ask_qty']) / (df['volume'] + 1e-8)
43
+
44
+ # Handle infinities and NaN
45
+ df = df.replace([np.inf, -np.inf], np.nan)
46
+
47
+ # For each column, replace NaN with median for robustness
48
+ for col in df.columns:
49
+ if df[col].isna().any():
50
+ median_val = df[col].median()
51
+ df[col] = df[col].fillna(median_val if not pd.isna(median_val) else 0)
52
+
53
+ return df
54
+
55
+ train_df = feature_engineering(train_df)
56
+ test_df = feature_engineering(test_df)
57
+ LABEL_COLUMN = 'label'
58
+ feature_cols = [col for col in train_df.columns if col != LABEL_COLUMN]
59
+ train_len = len(train_df)
60
+ df = pd.concat([train_df, test_df], axis=0)
61
+ X = train_df[feature_cols].values
62
+ y = train_df[LABEL_COLUMN].values
63
+
64
+ from sklearn.preprocessing import StandardScaler
65
+ import joblib
66
+
67
+ def clip_by_median_mad(df, n=3):
68
+ df_num = df.select_dtypes(include=[np.number])
69
+ median = df_num.median()
70
+ mad = (df_num - median).abs().median()
71
+ lower = median - n * mad
72
+ upper = median + n * mad
73
+ df_clipped = df_num.clip(lower=lower, upper=upper, axis=1)
74
+ # 如果原df有非数值型列,合并回来
75
+ for col in df.columns:
76
+ if col not in df_clipped.columns:
77
+ df_clipped[col] = df[col]
78
+ return df_clipped
79
+
80
+ all_features = feature_cols + [LABEL_COLUMN]
81
+ train_df[all_features] = clip_by_median_mad(train_df[all_features])
82
+ test_df[all_features] = clip_by_median_mad(test_df[all_features])
83
+
84
+ scaler = StandardScaler()
85
+ train_df[all_features] = scaler.fit_transform(train_df[all_features])
86
+ test_df[all_features] = scaler.transform(test_df[all_features])
87
+
88
+ joblib.dump(scaler, 'scaler.pkl')
89
+
90
+ train_df.to_pickle('train_df.pkl')
91
+ test_df.to_pickle('test_df.pkl')
92
+
ZMJ/data_processed_7_16/output.log ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Loading data...
2
+ Loaded data - Train: (525886, 270), Test: (538150, 270), Submission: (538150, 2)
3
+ Total features: 269
4
+
5
+ Training models...
6
+
7
+ --- Fold 1/5 ---
8
+ Training slice: full_data, samples: 420708
9
+ Error training xgb_baseline: value 100 for Parameter verbosity exceed bound [0,3]
10
+ verbosity: Flag to print out detailed breakdown of runtime.
11
+ Training slice: last_75pct, samples: 394415
12
+ Error training xgb_baseline: value 100 for Parameter verbosity exceed bound [0,3]
13
+ verbosity: Flag to print out detailed breakdown of runtime.
14
+ Training slice: last_50pct, samples: 262943
15
+ Error training xgb_baseline: value 100 for Parameter verbosity exceed bound [0,3]
16
+ verbosity: Flag to print out detailed breakdown of runtime.
17
+
18
+ --- Fold 2/5 ---
19
+ Training slice: full_data, samples: 420709
20
+ Error training xgb_baseline: value 100 for Parameter verbosity exceed bound [0,3]
21
+ verbosity: Flag to print out detailed breakdown of runtime.
22
+ Training slice: last_75pct, samples: 315531
23
+ Error training xgb_baseline: value 100 for Parameter verbosity exceed bound [0,3]
24
+ verbosity: Flag to print out detailed breakdown of runtime.
25
+ Training slice: last_50pct, samples: 262943
26
+ Error training xgb_baseline: value 100 for Parameter verbosity exceed bound [0,3]
27
+ verbosity: Flag to print out detailed breakdown of runtime.
28
+
29
+ --- Fold 3/5 ---
30
+ Training slice: full_data, samples: 420709
31
+ Error training xgb_baseline: value 100 for Parameter verbosity exceed bound [0,3]
32
+ verbosity: Flag to print out detailed breakdown of runtime.
33
+ Training slice: last_75pct, samples: 289238
34
+ Error training xgb_baseline: value 100 for Parameter verbosity exceed bound [0,3]
35
+ verbosity: Flag to print out detailed breakdown of runtime.
36
+ Training slice: last_50pct, samples: 210354
37
+ Error training xgb_baseline: value 100 for Parameter verbosity exceed bound [0,3]
38
+ verbosity: Flag to print out detailed breakdown of runtime.
39
+
40
+ --- Fold 4/5 ---
41
+ Training slice: full_data, samples: 420709
42
+ Error training xgb_baseline: value 100 for Parameter verbosity exceed bound [0,3]
43
+ verbosity: Flag to print out detailed breakdown of runtime.
44
+ Training slice: last_75pct, samples: 289238
45
+ Error training xgb_baseline: value 100 for Parameter verbosity exceed bound [0,3]
46
+ verbosity: Flag to print out detailed breakdown of runtime.
47
+ Training slice: last_50pct, samples: 157766
48
+ Error training xgb_baseline: value 100 for Parameter verbosity exceed bound [0,3]
49
+ verbosity: Flag to print out detailed breakdown of runtime.
50
+
51
+ --- Fold 5/5 ---
52
+ Training slice: full_data, samples: 420709
53
+ Error training xgb_baseline: value 100 for Parameter verbosity exceed bound [0,3]
54
+ verbosity: Flag to print out detailed breakdown of runtime.
55
+ Training slice: last_75pct, samples: 289238
56
+ Error training xgb_baseline: value 100 for Parameter verbosity exceed bound [0,3]
57
+ verbosity: Flag to print out detailed breakdown of runtime.
58
+ Training slice: last_50pct, samples: 157766
59
+ Error training xgb_baseline: value 100 for Parameter verbosity exceed bound [0,3]
60
+ verbosity: Flag to print out detailed breakdown of runtime.
61
+
62
+ Creating submissions...
63
+
64
+ XGBoost Baseline Score: nan
65
+
66
+ ==================================================
67
+ SUBMISSION SUMMARY:
68
+ ==================================================
69
+ xgb_baseline : nan
70
+
71
+ All submissions created successfully!
72
+ Files created:
73
+ - submission_xgb_baseline.csv (original baseline)
ZMJ/data_processed_7_16/scaler.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf51e9e850652ebdfbed065bc240ecccbe722816049ca6b76bb7f558aac04c1b
3
+ size 25495
ZMJ/data_processed_7_16/submission_xgb_baseline_59_pca.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31d3822df5d11aad9004c54ff1125d35e7f83b302da648e9a3b8dda0d27ceb9b
3
+ size 14177262
ZMJ/data_processed_7_16/test_df.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9aef44bd247a8a4f853979aaf0dd0c0961fff68d5a57b1fc2e2f6756ed3bd753
3
+ size 3418382946
ZMJ/data_processed_7_16/train.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import pandas as pd
3
+ import numpy as np
4
+ from sklearn.model_selection import KFold
5
+ from xgboost import XGBRegressor
6
+ from lightgbm import LGBMRegressor
7
+ from sklearn.linear_model import (
8
+ HuberRegressor, RANSACRegressor, TheilSenRegressor,
9
+ Lasso, ElasticNet, Ridge
10
+ )
11
+ from sklearn.cross_decomposition import PLSRegression
12
+ from sklearn.preprocessing import StandardScaler, RobustScaler
13
+ from sklearn.ensemble import RandomForestRegressor
14
+ from scipy.stats import pearsonr
15
+ import warnings
16
+ from sklearn.decomposition import PCA
17
+ warnings.filterwarnings('ignore')
18
+
19
+ # ===== Feature Engineering =====
20
+ def feature_engineering(df):
21
+ """Original features plus new robust features"""
22
+ # Original features
23
+ df['volume_weighted_sell'] = df['sell_qty'] * df['volume']
24
+ df['buy_sell_ratio'] = df['buy_qty'] / (df['sell_qty'] + 1e-8)
25
+ df['selling_pressure'] = df['sell_qty'] / (df['volume'] + 1e-8)
26
+ df['effective_spread_proxy'] = np.abs(df['buy_qty'] - df['sell_qty']) / (df['volume'] + 1e-8)
27
+
28
+ # New robust features
29
+ df['log_volume'] = np.log1p(df['volume'])
30
+ df['bid_ask_imbalance'] = (df['bid_qty'] - df['ask_qty']) / (df['bid_qty'] + df['ask_qty'] + 1e-8)
31
+ df['order_flow_imbalance'] = (df['buy_qty'] - df['sell_qty']) / (df['buy_qty'] + df['sell_qty'] + 1e-8)
32
+ df['liquidity_ratio'] = (df['bid_qty'] + df['ask_qty']) / (df['volume'] + 1e-8)
33
+
34
+ # Handle infinities and NaN
35
+ df = df.replace([np.inf, -np.inf], np.nan)
36
+
37
+ # For each column, replace NaN with median for robustness
38
+ for col in df.columns:
39
+ if df[col].isna().any():
40
+ median_val = df[col].median()
41
+ df[col] = df[col].fillna(median_val if not pd.isna(median_val) else 0)
42
+
43
+ return df
44
+
45
+ # ===== Configuration =====
46
+ class Config:
47
+ ORIGIN_TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/train.parquet"
48
+ ORIGIN_TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/test.parquet"
49
+ TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/train_aggregated.parquet"
50
+ TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/test_aggregated.parquet"
51
+ SUBMISSION_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/threshold_6_29/sample_submission.csv"
52
+
53
+ # Original features plus additional market features
54
+ FEATURES = [
55
+ "X863", "X856", "X598", "X862", "X385", "X852", "X603", "X860", "X674",
56
+ "X415", "X345", "X855", "X174", "X302", "X178", "X168", "X612",
57
+ "buy_qty", "sell_qty", "volume", "X888", "X421", "X333",
58
+ "bid_qty", "ask_qty"
59
+ ]
60
+
61
+ MERGE = False
62
+ LABEL_COLUMN = "label"
63
+ N_FOLDS = 5
64
+ RANDOM_STATE = 42
65
+ # 新增PCA相关配置
66
+ USE_PCA = False # 是否使用PCA降维
67
+ PCA_N_COMPONENTS = 20 # 降到多少维
68
+
69
+ def load_data():
70
+ """Load and preprocess data"""
71
+ origin_train_df = pd.read_parquet(Config.ORIGIN_TRAIN_PATH)
72
+ origin_test_df = pd.read_parquet(Config.ORIGIN_TEST_PATH)
73
+ train_df = pd.read_parquet(Config.TRAIN_PATH)
74
+ test_df = pd.read_parquet(Config.TEST_PATH)
75
+ submission_df = pd.read_csv(Config.SUBMISSION_PATH)
76
+
77
+ Config.AGGREGATE_FEATURES = [col for col in train_df.columns.tolist() if col != 'label']
78
+
79
+ Config.FEATURES = Config.AGGREGATE_FEATURES
80
+ merged_train_df = train_df
81
+ merged_test_df = test_df
82
+
83
+ print(f"Loaded data - Train: {merged_train_df.shape}, Test: {merged_test_df.shape}, Submission: {submission_df.shape}")
84
+ print(f"Total features: {len(Config.FEATURES)}")
85
+
86
+ return merged_train_df.reset_index(drop=True), merged_test_df.reset_index(drop=True), submission_df
87
+
88
+ # ===== Model Parameters =====
89
+ # 只保留XGBoost参数
90
+ import math
91
+ import xgboost as xgb
92
+
93
+ train_data, _, _ = load_data()
94
+ X_train = train_data[Config.FEATURES].values
95
+ y_train = train_data[[Config.LABEL_COLUMN]].values
96
+ dtrain = xgb.DMatrix(X_train, label=y_train)
97
+
98
+ # 余弦退火调度函数
99
+ def cosine_annealing(epoch, initial_lr=0.01, T_max=5000, eta_min=1e-4):
100
+ return eta_min + (initial_lr - eta_min) * (1 + math.cos(math.pi * epoch / T_max)) / 2
101
+ XGB_PARAMS = {
102
+ "objective": 'reg:squarederror',
103
+ "tree_method": "hist",
104
+ "device": "gpu",
105
+ "colsample_bylevel": 0.4778,
106
+ "colsample_bynode": 0.3628,
107
+ "colsample_bytree": 0.7107,
108
+ "gamma": 1.7095,
109
+ # "learning_rate": 0.04426,
110
+ "learning_rate": 0.2213,
111
+ "max_depth": 20,
112
+ "max_leaves": 12,
113
+ "min_child_weight": 16,
114
+ "n_estimators": 13508,
115
+ "subsample": 0.07567,
116
+ "reg_alpha": 19.3524,
117
+ "reg_lambda": 35.4484,
118
+ 'predictor': 'gpu_predictor',
119
+ 'random_state': 42,
120
+ 'early_stopping_rounds': 50, # 稍晚早停
121
+ 'eval_metric': 'rmse',
122
+ 'verbosity': 1
123
+ }
124
+
125
+ # cv_results = xgb.cv(
126
+ # XGB_PARAMS,
127
+ # dtrain,
128
+ # num_boost_round=20000,
129
+ # nfold=5,
130
+ # early_stopping_rounds=50,
131
+ # verbose_eval=True,
132
+ # as_pandas=True
133
+ # )
134
+ # breakpoint()
135
+
136
+ # 只保留XGBoost
137
+ LEARNERS = [
138
+ {"name": "xgb_baseline", "Estimator": XGBRegressor, "params": XGB_PARAMS, "need_scale": False},
139
+ ]
140
+
141
+ # ===== Data Loading =====
142
+ def create_time_decay_weights(n: int, decay: float = 0.9) -> np.ndarray:
143
+ """Create time decay weights for more recent data importance"""
144
+ positions = np.arange(n)
145
+ normalized = positions / (n - 1)
146
+ weights = decay ** (1.0 - normalized)
147
+ return weights * n / weights.sum()
148
+
149
+ # ===== Model Training =====
150
+ def get_model_slices(n_samples: int):
151
+ """Define different data slices for training"""
152
+ return [
153
+ {"name": "full_data", "cutoff": 0},
154
+ {"name": "last_75pct", "cutoff": int(0.25 * n_samples)},
155
+ {"name": "last_50pct", "cutoff": int(0.50 * n_samples)},
156
+ ]
157
+
158
+ def train_single_model(X_train, y_train, X_valid, y_valid, X_test, learner, sample_weights=None):
159
+ """Train a single model with appropriate scaling if needed"""
160
+ if learner["need_scale"]:
161
+ scaler = RobustScaler() # More robust to outliers than StandardScaler
162
+ X_train_scaled = scaler.fit_transform(X_train)
163
+ X_valid_scaled = scaler.transform(X_valid)
164
+ X_test_scaled = scaler.transform(X_test)
165
+ else:
166
+ X_train_scaled = X_train
167
+ X_valid_scaled = X_valid
168
+ X_test_scaled = X_test
169
+
170
+ model = learner["Estimator"](**learner["params"])
171
+
172
+ # Handle different model training approaches
173
+ if learner["name"] in ["xgb_baseline"]:
174
+ model.fit(X_train_scaled, y_train, sample_weight=sample_weights,
175
+ eval_set=[(X_valid_scaled, y_valid)],
176
+ # eval_metric='rmse', # 直接在 fit 中指定 eval_metric
177
+ # early_stopping_rounds=50,
178
+ verbose=True)
179
+ else:
180
+ # RANSAC, TheilSen, PLS don't support sample weights
181
+ model.fit(X_train_scaled, y_train)
182
+
183
+ valid_pred = model.predict(X_valid_scaled)
184
+ test_pred = model.predict(X_test_scaled)
185
+
186
+ return valid_pred, test_pred
187
+
188
+ def train_and_evaluate(train_df, test_df):
189
+ """只训练XGBoost模型,交叉验证"""
190
+ n_samples = len(train_df)
191
+ model_slices = get_model_slices(n_samples)
192
+
193
+ # 初始化预测字典
194
+ oof_preds = {
195
+ "xgb_baseline": {s["name"]: np.zeros(n_samples) for s in model_slices}
196
+ }
197
+ test_preds = {
198
+ "xgb_baseline": {s["name"]: np.zeros(len(test_df)) for s in model_slices}
199
+ }
200
+
201
+ full_weights = create_time_decay_weights(n_samples)
202
+ kf = KFold(n_splits=Config.N_FOLDS, shuffle=True)
203
+
204
+ for fold, (train_idx, valid_idx) in enumerate(kf.split(train_df), start=1):
205
+ print(f"\n--- Fold {fold}/{Config.N_FOLDS} ---")
206
+ X_valid = train_df.iloc[valid_idx][Config.FEATURES]
207
+ y_valid = train_df.iloc[valid_idx][Config.LABEL_COLUMN]
208
+ X_test = test_df[Config.FEATURES]
209
+
210
+ for s in model_slices:
211
+ cutoff = s["cutoff"]
212
+ slice_name = s["name"]
213
+ subset = train_df.iloc[cutoff:].reset_index(drop=True)
214
+ rel_idx = train_idx[train_idx >= cutoff] - cutoff
215
+
216
+ if len(rel_idx) == 0:
217
+ continue
218
+
219
+ X_train = subset.iloc[rel_idx][Config.FEATURES]
220
+ y_train = subset.iloc[rel_idx][Config.LABEL_COLUMN]
221
+ sw = create_time_decay_weights(len(subset))[rel_idx] if cutoff > 0 else full_weights[train_idx]
222
+
223
+ print(f" Training slice: {slice_name}, samples: {len(X_train)}")
224
+
225
+ # 只训练XGBoost
226
+ learner = LEARNERS[0]
227
+ try:
228
+ valid_pred, test_pred = train_single_model(
229
+ X_train, y_train, X_valid, y_valid, X_test, learner, sw
230
+ )
231
+ # Store OOF predictions
232
+ mask = valid_idx >= cutoff
233
+ if mask.any():
234
+ idxs = valid_idx[mask]
235
+ oof_preds[learner["name"]][slice_name][idxs] = valid_pred[mask]
236
+ if cutoff > 0 and (~mask).any():
237
+ oof_preds[learner["name"]][slice_name][valid_idx[~mask]] = \
238
+ oof_preds[learner["name"]]["full_data"][valid_idx[~mask]]
239
+ test_preds[learner["name"]][slice_name] += test_pred
240
+ except Exception as e:
241
+ print(f" Error training {learner['name']}: {str(e)}")
242
+ continue
243
+ # Normalize test predictions
244
+ for slice_name in test_preds["xgb_baseline"]:
245
+ test_preds["xgb_baseline"][slice_name] /= Config.N_FOLDS
246
+ return oof_preds, test_preds, model_slices
247
+
248
+ # ===== Ensemble and Submission =====
249
+ def create_submissions(train_df, oof_preds, test_preds, submission_df):
250
+ """只生成XGBoost提交文件"""
251
+ all_submissions = {}
252
+ # 只保留XGBoost
253
+ if "xgb_baseline" in oof_preds:
254
+ xgb_oof = np.mean(list(oof_preds["xgb_baseline"].values()), axis=0)
255
+ xgb_test = np.mean(list(test_preds["xgb_baseline"].values()), axis=0)
256
+ xgb_score = pearsonr(train_df[Config.LABEL_COLUMN], xgb_oof)[0]
257
+ print(f"\nXGBoost Baseline Score: {xgb_score:.4f}")
258
+ submission_xgb = submission_df.copy()
259
+ submission_xgb["prediction"] = xgb_test
260
+ submission_xgb.to_csv("/AI4M/users/mjzhang/workspace/DRW/ZMJ/data_processed_7_16/submission_xgb_baseline_59_pca.csv", index=False)
261
+ all_submissions["xgb_baseline"] = xgb_score
262
+ print("\n" + "="*50)
263
+ print("SUBMISSION SUMMARY:")
264
+ print("="*50)
265
+ for name, score in sorted(all_submissions.items(), key=lambda x: x[1], reverse=True):
266
+ print(f"{name:25s}: {score:.4f}")
267
+ return all_submissions
268
+
269
+ # ===== Main Execution =====
270
+ if __name__ == "__main__":
271
+ print("Loading data...")
272
+ train_df, test_df, submission_df = load_data()
273
+
274
+ print("\nTraining models...")
275
+ oof_preds, test_preds, model_slices = train_and_evaluate(train_df, test_df)
276
+
277
+ print("\nCreating submissions...")
278
+ submission_scores = create_submissions(train_df, oof_preds, test_preds, submission_df)
279
+
280
+ print("\nAll submissions created successfully!")
281
+ print("Files created:")
282
+ print("- submission_xgb_baseline.csv (original baseline)")
ZMJ/data_processed_7_16/train_df.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8817730be609ad591801562a2415f2eb2db4a507a3bc380da7dbe4a9f2736de1
3
+ size 3344689432
ZMJ/data_processed_7_16/xgb_prediction-2.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdeb23e24591e9bd0cf0961213a5d6dc431312f5fbd429cd7fe67dc99608f39b
3
+ size 14476941
ZMJ/data_processed_7_16/xgb_prediction.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d0131de297b7213dc717883daaa29d065ec6a9832dd210cbb5088449a13886f
3
+ size 14525435
ZMJ/data_processed_new/correlation_matrix.csv ADDED
The diff for this file is too large to render. See raw diff
 
ZMJ/data_processed_new/feature_analysis.png ADDED

Git LFS Details

  • SHA256: e5e8b46e373c0b107f7b7c8f4a10a4716a942670a8450de3a3f644d3ab2e5e02
  • Pointer size: 131 Bytes
  • Size of remote file: 923 kB