| | import sys |
| | import pandas as pd |
| | import numpy as np |
| | from sklearn.model_selection import KFold |
| | import xgboost as xgb |
| | from xgboost import XGBRegressor |
| | from lightgbm import LGBMRegressor |
| | from sklearn.linear_model import ( |
| | HuberRegressor, RANSACRegressor, TheilSenRegressor, |
| | Lasso, ElasticNet, Ridge |
| | ) |
| | from sklearn.cross_decomposition import PLSRegression |
| | from sklearn.preprocessing import StandardScaler, RobustScaler |
| | from sklearn.ensemble import RandomForestRegressor |
| | from scipy.stats import pearsonr |
| | import warnings |
| | import torch |
| | import matplotlib.pyplot as plt |
| | import seaborn as sns |
| | from concurrent.futures import ThreadPoolExecutor, as_completed |
| | from itertools import combinations |
| | import time |
| |
|
| | train_df = pd.read_pickle('train_df.pkl') |
| | test_df = pd.read_pickle('test_df.pkl') |
| |
|
| | length = len(train_df) |
| | df = pd.concat([train_df, test_df], axis=0) |
| | LABEL_COLUMN = 'label' |
| | feature_cols = [col for col in train_df.columns if col != LABEL_COLUMN] |
| |
|
| | X = train_df[feature_cols].values |
| | y = train_df[LABEL_COLUMN].values |
| |
|
| | |
| | X_np = np.asarray(X) |
| | nonzero_col_idx = np.where((X_np != 0).any(axis=0))[0] |
| | X = X_np[:, nonzero_col_idx] |
| | feature_cols = [feature_cols[i] for i in nonzero_col_idx] |
| |
|
| | X_np = np.asarray(X) |
| | y_np = np.asarray(y) |
| | corrs = np.array([np.corrcoef(X_np[:, i], y_np)[0, 1] for i in range(X_np.shape[1])]) |
| |
|
| | |
| | X_adj = X_np.copy() |
| | neg_idx = np.where(corrs < 0)[0] |
| | X_adj[:, neg_idx] = -X_adj[:, neg_idx] |
| |
|
| | |
| | selected_idx = np.where(np.abs(corrs) > 0.01)[0] |
| |
|
| | |
| | X_selected = X_adj[:, selected_idx] |
| | selected_features = [feature_cols[i] for i in selected_idx] |
| |
|
| | def max_ic_factor_selection(X, y, feature_cols, threshold=0.9): |
| | X = np.asarray(X) |
| | n_features = X.shape[1] |
| | corr_matrix = np.corrcoef(X, rowvar=False) |
| | used = set() |
| | selected_idx = [] |
| | for i in range(n_features): |
| | if i in used: |
| | continue |
| | |
| | group = [i] |
| | for j in range(i+1, n_features): |
| | if j not in used and abs(corr_matrix[i, j]) > threshold: |
| | group.append(j) |
| | |
| | if len(group) == 1: |
| | selected_idx.append(group[0]) |
| | else: |
| | ic_list = [abs(pearsonr(X[:, k], y)[0]) for k in group] |
| | best_k = group[np.argmax(ic_list)] |
| | selected_idx.append(best_k) |
| | used.update(group) |
| | X_new = X[:, selected_idx] |
| | feature_cols_new = [feature_cols[i] for i in selected_idx] |
| | return X_new, feature_cols_new |
| |
|
| | |
| | n_train = train_df.shape[0] |
| | X_selected, selected_features = max_ic_factor_selection(X_selected, y[:n_train], selected_features, threshold=0.9) |
| |
|
| | X_train = X_selected |
| | X_test = test_df[selected_features].values |
| |
|
| | y_train = y |
| | y_test = test_df[LABEL_COLUMN].values |
| | breakpoint() |
| |
|
| | kf = KFold(n_splits=5, shuffle=True, random_state=42) |
| |
|
| | import math |
| |
|
| | |
| | def cosine_annealing(epoch, initial_lr=0.01, T_max=5000, eta_min=1e-4): |
| | return eta_min + (initial_lr - eta_min) * (1 + math.cos(math.pi * epoch / T_max)) / 2 |
| |
|
| | |
| | xgb_params = { |
| | 'n_estimators': 10000, |
| | 'learning_rate': 0.01, |
| | 'max_depth': 10, |
| | 'subsample': 0.85, |
| | 'colsample_bytree': 0.85, |
| | 'tree_method': 'hist', |
| | 'device': 'gpu', |
| | 'predictor': 'gpu_predictor', |
| | 'random_state': 42, |
| | 'reg_alpha': 5, |
| | 'reg_lambda': 10, |
| | 'min_child_weight': 5, |
| | 'gamma': 0.2, |
| | 'early_stopping_round': 100, |
| | 'verbose_eval': 100, |
| | 'eval_metric': 'rmse', |
| | 'callbacks': [ |
| | xgb.callback.LearningRateScheduler(cosine_annealing) |
| | ] |
| | } |
| | print("start training") |
| | val_scores = [] |
| | test_preds = np.zeros(X_test.shape[0]) |
| |
|
| | for train_idx, val_idx in kf.split(X_train): |
| | X_tr, X_val = X_train[train_idx], X_train[val_idx] |
| | y_tr, y_val = y_train[train_idx], y_train[val_idx] |
| | model = XGBRegressor(**xgb_params) |
| | model.fit( |
| | X_tr, y_tr, |
| | eval_set=[(X_val, y_val)], |
| | |
| | ) |
| | val_pred = model.predict(X_val) |
| | val_score = np.sqrt(np.mean((val_pred - y_val) ** 2)) |
| | val_scores.append(val_score) |
| | test_preds += model.predict(X_test) / kf.n_splits |
| |
|
| | print(f"平均验证RMSE: {np.mean(val_scores):.6f}") |
| |
|
| | |
| | result_df = pd.DataFrame({ |
| | 'ID': np.arange(1, len(test_preds) + 1), |
| | 'prediction': test_preds |
| | }) |
| | result_df.to_csv('xgb_prediction-3.csv', index=False) |
| | print('预测结果已保存到 xgb_prediction.csv') |
| |
|
| |
|
| |
|
| |
|
| |
|