File size: 5,063 Bytes
c687548 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 | import sys
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
import xgboost as xgb
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from sklearn.linear_model import (
HuberRegressor, RANSACRegressor, TheilSenRegressor,
Lasso, ElasticNet, Ridge
)
from sklearn.cross_decomposition import PLSRegression
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.ensemble import RandomForestRegressor
from scipy.stats import pearsonr
import warnings
import torch
import matplotlib.pyplot as plt
import seaborn as sns
from concurrent.futures import ThreadPoolExecutor, as_completed
from itertools import combinations
import time
train_df = pd.read_pickle('train_df.pkl')
test_df = pd.read_pickle('test_df.pkl')
length = len(train_df)
df = pd.concat([train_df, test_df], axis=0)
LABEL_COLUMN = 'label'
feature_cols = [col for col in train_df.columns if col != LABEL_COLUMN]
X = train_df[feature_cols].values
y = train_df[LABEL_COLUMN].values
# 先将X转为numpy数组,再删除全为0的列,并同步更新feature_cols
X_np = np.asarray(X)
nonzero_col_idx = np.where((X_np != 0).any(axis=0))[0]
X = X_np[:, nonzero_col_idx]
feature_cols = [feature_cols[i] for i in nonzero_col_idx]
X_np = np.asarray(X)
y_np = np.asarray(y)
corrs = np.array([np.corrcoef(X_np[:, i], y_np)[0, 1] for i in range(X_np.shape[1])])
# 对于相关性为负的特征,将该列取负
X_adj = X_np.copy()
neg_idx = np.where(corrs < 0)[0]
X_adj[:, neg_idx] = -X_adj[:, neg_idx]
# 找到相关性绝对值大于0.01的特征索引
selected_idx = np.where(np.abs(corrs) > 0.01)[0]
# 取出这些特征对应的X的列
X_selected = X_adj[:, selected_idx]
selected_features = [feature_cols[i] for i in selected_idx]
def max_ic_factor_selection(X, y, feature_cols, threshold=0.9):
X = np.asarray(X)
n_features = X.shape[1]
corr_matrix = np.corrcoef(X, rowvar=False)
used = set()
selected_idx = []
for i in range(n_features):
if i in used:
continue
# 找到与第i个特征高度相关的特征
group = [i]
for j in range(i+1, n_features):
if j not in used and abs(corr_matrix[i, j]) > threshold:
group.append(j)
# 组内选与y相关性(IC)最大的特征
if len(group) == 1:
selected_idx.append(group[0])
else:
ic_list = [abs(pearsonr(X[:, k], y)[0]) for k in group]
best_k = group[np.argmax(ic_list)]
selected_idx.append(best_k)
used.update(group)
X_new = X[:, selected_idx]
feature_cols_new = [feature_cols[i] for i in selected_idx]
return X_new, feature_cols_new
# 在训练前进行最大IC因子合成,减少共线性
n_train = train_df.shape[0]
X_selected, selected_features = max_ic_factor_selection(X_selected, y[:n_train], selected_features, threshold=0.9)
X_train = X_selected
X_test = test_df[selected_features].values
y_train = y
y_test = test_df[LABEL_COLUMN].values
breakpoint()
kf = KFold(n_splits=5, shuffle=True, random_state=42)
import math
# 余弦退火调度函数
def cosine_annealing(epoch, initial_lr=0.01, T_max=5000, eta_min=1e-4):
return eta_min + (initial_lr - eta_min) * (1 + math.cos(math.pi * epoch / T_max)) / 2
# XGBoost参数(更复杂的树结构+更强正则+早停机制)
xgb_params = {
'n_estimators': 10000, # 增加树的数量
'learning_rate': 0.01,
'max_depth': 10, # 增加树的深度
'subsample': 0.85, # 增加样本采样比例
'colsample_bytree': 0.85, # 增加特征采样比例
'tree_method': 'hist',
'device': 'gpu',
'predictor': 'gpu_predictor',
'random_state': 42,
'reg_alpha': 5, # 增大L1正则
'reg_lambda': 10, # 增大L2正则
'min_child_weight': 5, # 增大叶子节点最小样本权重和
'gamma': 0.2, # 增大分裂所需的最小损失减少
'early_stopping_round': 100,
'verbose_eval': 100,
'eval_metric': 'rmse',
'callbacks': [
xgb.callback.LearningRateScheduler(cosine_annealing)
]
}
print("start training")
val_scores = []
test_preds = np.zeros(X_test.shape[0])
for train_idx, val_idx in kf.split(X_train):
X_tr, X_val = X_train[train_idx], X_train[val_idx]
y_tr, y_val = y_train[train_idx], y_train[val_idx]
model = XGBRegressor(**xgb_params)
model.fit(
X_tr, y_tr,
eval_set=[(X_val, y_val)],
# eval_metric='rmse',
)
val_pred = model.predict(X_val)
val_score = np.sqrt(np.mean((val_pred - y_val) ** 2)) # RMSE
val_scores.append(val_score)
test_preds += model.predict(X_test) / kf.n_splits
print(f"平均验证RMSE: {np.mean(val_scores):.6f}")
# 保存预测结果到csv
result_df = pd.DataFrame({
'ID': np.arange(1, len(test_preds) + 1),
'prediction': test_preds
})
result_df.to_csv('xgb_prediction-3.csv', index=False)
print('预测结果已保存到 xgb_prediction.csv')
|